From 7e4c224abfe8e2a00f56a2ef0198e6de3ca1852c Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 29 Jun 2016 10:17:53 +0100 Subject: i2c: core: Add support for 'i2c-bus' subnode If the 'i2c-bus' device-tree node is present for an I2C adapter then parse this subnode for I2C slaves. Signed-off-by: Jon Hunter Acked-by: Wolfram Sang Signed-off-by: Thierry Reding --- drivers/i2c/i2c-core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index af11b658984d..ed7002dd9401 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1411,7 +1411,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, static void of_i2c_register_devices(struct i2c_adapter *adap) { - struct device_node *node; + struct device_node *bus, *node; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) @@ -1419,11 +1419,17 @@ static void of_i2c_register_devices(struct i2c_adapter *adap) dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); - for_each_available_child_of_node(adap->dev.of_node, node) { + bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); + if (!bus) + bus = of_node_get(adap->dev.of_node); + + for_each_available_child_of_node(bus, node) { if (of_node_test_and_set_flag(node, OF_POPULATED)) continue; of_i2c_register_device(adap, node); } + + of_node_put(bus); } static int of_dev_node_match(struct device *dev, void *data) -- cgit v1.2.3-59-g8ed1b From d7372cb6290eedadcf2d191b033388df3c0c39f2 Mon Sep 17 00:00:00 2001 From: Tom Yan Date: Sat, 23 Jul 2016 02:34:08 +0800 Subject: libata-scsi: use u8 array to store mode page copy ata_mselect_*() would initialize a char array for storing a copy of the current mode page. However, char could be signed char. In that case, bytes larger than 127 would be converted to negative number. For example, 0xff from def_control_mpage[] would become -1. This prevented ata_mselect_control() from working at all, since when it did the read-only bits check, there would always be a mismatch. Signed-off-by: Tom Yan Signed-off-by: Tejun Heo --- drivers/ata/libata-scsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e207b33e4ce9..0bc4532bb60a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3610,7 +3610,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, { struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; - char mpage[CACHE_MPAGE_LEN]; + u8 mpage[CACHE_MPAGE_LEN]; u8 wce; int i; @@ -3666,7 +3666,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, const u8 *buf, int len, u16 *fp) { struct ata_device *dev = qc->dev; - char mpage[CONTROL_MPAGE_LEN]; + u8 mpage[CONTROL_MPAGE_LEN]; u8 d_sense; int i; -- cgit v1.2.3-59-g8ed1b From 535fd072202dbc8afe5a4093178132bdd4ac2424 Mon Sep 17 00:00:00 2001 From: Tom Yan Date: Fri, 22 Jul 2016 02:41:54 +0800 Subject: libata-scsi: fix MODE SELECT translation for Control mode page scsi_done() was called repeatedly and apparently because of that, the kernel would call trace when we touch the Control mode page: Call Trace: [] dump_stack+0x63/0x81 [] __warn+0xcb/0xf0 [] warn_slowpath_null+0x1d/0x20 [] ata_eh_finish+0xe0/0xf0 [libata] [] sata_pmp_error_handler+0x640/0xa50 [libata] [] ahci_error_handler+0x1d/0x70 [libahci] [] ata_scsi_port_error_handler+0x430/0x770 [libata] [] ? ata_scsi_cmd_error_handler+0xdd/0x160 [libata] [] ata_scsi_error+0xa7/0xf0 [libata] [] scsi_error_handler+0xaa/0x560 [scsi_mod] [] ? scsi_eh_get_sense+0x180/0x180 [scsi_mod] [] kthread+0xd8/0xf0 [] ret_from_fork+0x1f/0x40 [] ? kthread_worker_fn+0x170/0x170 ---[ end trace 8b7501047e928a17 ]--- Removed the unnecessary code and let ata_scsi_translate() do the job. Also, since ata_mselect_control() has no ATA command to send to the device, ata_scsi_mode_select_xlat() should return 1 for it, so that ata_scsi_translate() will finish early to avoid ata_qc_issue(). Signed-off-by: Tom Yan Signed-off-by: Tejun Heo --- drivers/ata/libata-scsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0bc4532bb60a..be9c76c938b2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3701,8 +3701,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, dev->flags |= ATA_DFLAG_D_SENSE; else dev->flags &= ~ATA_DFLAG_D_SENSE; - qc->scsicmd->result = SAM_STAT_GOOD; - qc->scsicmd->scsi_done(qc->scsicmd); return 0; } @@ -3829,6 +3827,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (ata_mselect_control(qc, p, pg_len, &fp) < 0) { fp += hdr_len + bd_len; goto invalid_param; + } else { + goto skip; /* No ATA command to send */ } break; default: /* invalid page code */ -- cgit v1.2.3-59-g8ed1b From 107a077d19341b4d47ae06f0bd24883b94e64628 Mon Sep 17 00:00:00 2001 From: Tang Yuantian Date: Tue, 9 Aug 2016 09:51:21 +0800 Subject: ahci: qoriq: adjust sata parameter The default values for Port Phy2Cfg register and Port Phy3Cfg register are better, no need to overwrite them. Signed-off-by: Tang Yuantian Signed-off-by: Tejun Heo --- drivers/ata/ahci_qoriq.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 7bdee9bd8786..ed357a184ee4 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -44,10 +44,6 @@ #define SATA_ECC_DISABLE 0x00020000 -/* for ls1043a */ -#define LS1043A_PORT_PHY2 0x28184d1f -#define LS1043A_PORT_PHY3 0x0e081509 - enum ahci_qoriq_type { AHCI_LS1021A, AHCI_LS1043A, @@ -166,8 +162,6 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) case AHCI_LS1043A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); - writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2); - writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); break; -- cgit v1.2.3-59-g8ed1b From 16af080e47785d14919bf94bf8a4097c5c8529fb Mon Sep 17 00:00:00 2001 From: Tang Yuantian Date: Tue, 9 Aug 2016 09:51:22 +0800 Subject: ahci: qoriq: enable snoopable sata read and write By default the SATA IP on the qoriq SoCs does not generating coherent/snoopable transactions. This patch enable it in the sata axicc register. In addition, the dma-coherent property must be set on the SATA controller nodes. Signed-off-by: Tang Yuantian Signed-off-by: Tejun Heo --- arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 3 ++- arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 2 ++ drivers/ata/ahci_qoriq.c | 6 ++++++ 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index e669fbd7f9c3..e8e4c3ed1b7e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -495,10 +495,11 @@ }; sata: sata@3200000 { - compatible = "fsl,ls1043a-ahci", "fsl,ls1021a-ahci"; + compatible = "fsl,ls1043a-ahci"; reg = <0x0 0x3200000 0x0 0x10000>; interrupts = <0 69 0x4>; clocks = <&clockgen 4 0>; + dma-coherent; }; msi1: msi-controller1@1571000 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi index 21023a388c29..5ead17c05bb8 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi @@ -679,6 +679,7 @@ reg = <0x0 0x3200000 0x0 0x10000>; interrupts = <0 133 0x4>; /* Level high type */ clocks = <&clockgen 4 3>; + dma-coherent; }; sata1: sata@3210000 { @@ -687,6 +688,7 @@ reg = <0x0 0x3210000 0x0 0x10000>; interrupts = <0 136 0x4>; /* Level high type */ clocks = <&clockgen 4 3>; + dma-coherent; }; usb0: usb3@3100000 { diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index ed357a184ee4..925c4b6a753b 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -30,17 +30,20 @@ #define PORT_PHY3 0xB0 #define PORT_PHY4 0xB4 #define PORT_PHY5 0xB8 +#define PORT_AXICC 0xBC #define PORT_TRANS 0xC8 /* port register default value */ #define AHCI_PORT_PHY_1_CFG 0xa003fffe #define AHCI_PORT_TRANS_CFG 0x08000029 +#define AHCI_PORT_AXICC_CFG 0x3fffffff /* for ls1021a */ #define LS1021A_PORT_PHY2 0x28183414 #define LS1021A_PORT_PHY3 0x0e080e06 #define LS1021A_PORT_PHY4 0x064a080b #define LS1021A_PORT_PHY5 0x2aa86470 +#define LS1021A_AXICC_ADDR 0xC0 #define SATA_ECC_DISABLE 0x00020000 @@ -158,16 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4); writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR); break; case AHCI_LS1043A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); break; case AHCI_LS2080A: writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); + writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); break; } -- cgit v1.2.3-59-g8ed1b From e7190699ca8c8daa52bfc62c01a71dea6decc9ee Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Wed, 10 Aug 2016 17:37:29 +0200 Subject: ahci: st: Add ports-implemented property in support Despite ST AHCI version = 1.3, reading HOST_PORTS_IMPL returns 0. So force_port_map to 1 by using ports-implemented DT property. Signed-off-by: Patrice Chotard Signed-off-by: Tejun Heo --- drivers/ata/ahci_st.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 8ff428fe8e0f..bc345f249555 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = { static int st_ahci_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct st_ahci_drv_data *drv_data; struct ahci_host_priv *hpriv; int err; @@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev) st_ahci_configure_oob(hpriv->mmio); + of_property_read_u32(dev->of_node, + "ports-implemented", &hpriv->force_port_map); + err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, &ahci_platform_sht); if (err) { -- cgit v1.2.3-59-g8ed1b From cd27396e6103a43c3df76f70db1feafa16206471 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Aug 2016 07:26:02 -0700 Subject: ahci: also use a per-port lock for the multi-MSIX case Signed-off-by: Christoph Hellwig Signed-off-by: Tejun Heo --- drivers/ata/libahci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 7461a587b39b..7c01192d3c48 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2378,7 +2378,7 @@ static int ahci_port_start(struct ata_port *ap) /* * Switch to per-port locking in case each port has its own MSI vector. */ - if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) { + if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { spin_lock_init(&pp->lock); ap->lock = &pp->lock; } -- cgit v1.2.3-59-g8ed1b From 9379e6b8e0f995365dc6158a1463c8dab4f2c8da Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Sun, 21 Aug 2016 23:23:18 -0500 Subject: libata: Safely overwrite attached page in WRITE SAME xlat Safely overwriting the attached page to ATA format from the SCSI formatted variant. Signed-off-by: Shaun Tancheff Reviewed-by: Hannes Reinecke Acked-by: Tejun Heo --- drivers/ata/libata-scsi.c | 56 ++++++++++++++++++++++++++++++++++++++++++----- include/linux/ata.h | 26 ---------------------- 2 files changed, 51 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index be9c76c938b2..84e7f48ca83e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3282,6 +3282,54 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) return 1; } +/** + * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim + * @cmd: SCSI command being translated + * @num: Maximum number of entries (nominally 64). + * @sector: Starting sector + * @count: Total Range of request + * + * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted + * descriptor. + * + * Upto 64 entries of the format: + * 63:48 Range Length + * 47:0 LBA + * + * Range Length of 0 is ignored. + * LBA's should be sorted order and not overlap. + * + * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET + */ +static unsigned int ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 num, + u64 sector, u32 count) +{ + __le64 *buffer; + u32 i = 0, used_bytes; + unsigned long flags; + + BUILD_BUG_ON(512 > ATA_SCSI_RBUF_SIZE); + + spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); + buffer = ((void *)ata_scsi_rbuf); + while (i < num) { + u64 entry = sector | + ((u64)(count > 0xffff ? 0xffff : count) << 48); + buffer[i++] = __cpu_to_le64(entry); + if (count <= 0xffff) + break; + count -= 0xffff; + sector += 0xffff; + } + + used_bytes = ALIGN(i * 8, 512); + memset(buffer + i, 0, used_bytes - i * 8); + sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buffer, 512); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); + + return used_bytes; +} + static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; @@ -3290,8 +3338,8 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) const u8 *cdb = scmd->cmnd; u64 block; u32 n_block; + const u32 trmax = ATA_MAX_TRIM_RNUM; u32 size; - void *buf; u16 fp; u8 bp = 0xff; @@ -3319,10 +3367,8 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd)) goto invalid_param_len; - buf = page_address(sg_page(scsi_sglist(scmd))); - - if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) { - size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block); + if (n_block <= 0xffff * trmax) { + size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); } else { fp = 2; goto invalid_fld; diff --git a/include/linux/ata.h b/include/linux/ata.h index adbc812c009b..45a1d71c55f1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -1071,32 +1071,6 @@ static inline void ata_id_to_hd_driveid(u16 *id) #endif } -/* - * Write LBA Range Entries to the buffer that will cover the extent from - * sector to sector + count. This is used for TRIM and for ADD LBA(S) - * TO NV CACHE PINNED SET. - */ -static inline unsigned ata_set_lba_range_entries(void *_buffer, - unsigned num, u64 sector, unsigned long count) -{ - __le64 *buffer = _buffer; - unsigned i = 0, used_bytes; - - while (i < num) { - u64 entry = sector | - ((u64)(count > 0xffff ? 0xffff : count) << 48); - buffer[i++] = __cpu_to_le64(entry); - if (count <= 0xffff) - break; - count -= 0xffff; - sector += 0xffff; - } - - used_bytes = ALIGN(i * 8, 512); - memset(buffer + i, 0, used_bytes - i * 8); - return used_bytes; -} - static inline bool ata_ok(u8 status) { return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) -- cgit v1.2.3-59-g8ed1b From 7b20309428598df00ffeb0b01f5948dea6aaf1f7 Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Sun, 21 Aug 2016 23:23:19 -0500 Subject: libata: Add support for SCT Write Same SATA drives may support write same via SCT. This is useful for setting the drive contents to a specific pattern (0's). Translate a SCSI WRITE SAME 16 command to be either a DSM TRIM command or an SCT Write Same command. Based on the UNMAP flag: - When set translate to DSM TRIM - When not set translate to SCT Write Same Signed-off-by: Shaun Tancheff Reviewed-by: Hannes Reinecke Acked-by: Tejun Heo --- drivers/ata/libata-scsi.c | 199 +++++++++++++++++++++++++++++++++++++++------- include/linux/ata.h | 43 ++++++++++ 2 files changed, 213 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 84e7f48ca83e..08d9c8e731b3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; - sdev->no_report_opcodes = 1; - sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to @@ -3287,7 +3285,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * @cmd: SCSI command being translated * @num: Maximum number of entries (nominally 64). * @sector: Starting sector - * @count: Total Range of request + * @count: Total Range of request in logical sectors * * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted * descriptor. @@ -3330,6 +3328,45 @@ static unsigned int ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 num, return used_bytes; } +/** + * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same + * @cmd: SCSI command being translated + * @lba: Starting sector + * @num: Number of logical sectors to be zero'd. + * + * Rewrite the WRITE SAME descriptor to be an SCT Write Same formatted + * descriptor. + * NOTE: Writes a pattern (0's) in the foreground. + * Large write-same requents can timeout. + */ +static void ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num) +{ + u16 *sctpg; + unsigned long flags; + + spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); + sctpg = ((void *)ata_scsi_rbuf); + + put_unaligned_le16(0x0002, &sctpg[0]); /* SCT_ACT_WRITE_SAME */ + put_unaligned_le16(0x0101, &sctpg[1]); /* WRITE PTRN FG */ + put_unaligned_le64(lba, &sctpg[2]); + put_unaligned_le64(num, &sctpg[6]); + put_unaligned_le32(0u, &sctpg[10]); + + sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), sctpg, 512); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); +} + +/** + * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same + * @qc: Command to be translated + * + * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or + * an SCT Write Same command. + * Based on WRITE SAME has the UNMAP flag + * When set translate to DSM TRIM + * When clear translate to SCT Write Same + */ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; @@ -3342,6 +3379,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) u32 size; u16 fp; u8 bp = 0xff; + u8 unmap = cdb[1] & 0x8; /* we may not issue DMA commands if no DMA mode is set */ if (unlikely(!dev->dma_mode)) @@ -3353,11 +3391,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) } scsi_16_lba_len(cdb, &block, &n_block); - /* for now we only support WRITE SAME with the unmap bit set */ - if (unlikely(!(cdb[1] & 0x8))) { - fp = 1; - bp = 3; - goto invalid_fld; + if (unmap) { + /* If trim is not enabled the cmd is invalid. */ + if ((dev->horkage & ATA_HORKAGE_NOTRIM) || + !ata_id_has_trim(dev->id)) { + fp = 1; + bp = 3; + goto invalid_fld; + } + /* If the request is too large the cmd is invalid */ + if (n_block > 0xffff * trmax) { + fp = 2; + goto invalid_fld; + } + } else { + /* If write same is not available the cmd is invalid */ + if (!ata_id_sct_write_same(dev->id)) { + fp = 1; + bp = 3; + goto invalid_fld; + } } /* @@ -3367,30 +3420,42 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd)) goto invalid_param_len; - if (n_block <= 0xffff * trmax) { + if (unmap) { size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); + if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { + /* Newer devices support queued TRIM commands */ + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_FPDMA_SEND; + tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; + tf->nsect = qc->tag << 3; + tf->hob_feature = (size / 512) >> 8; + tf->feature = size / 512; + + tf->auxiliary = 1; + } else { + tf->protocol = ATA_PROT_DMA; + tf->hob_feature = 0; + tf->feature = ATA_DSM_TRIM; + tf->hob_nsect = (size / 512) >> 8; + tf->nsect = size / 512; + tf->command = ATA_CMD_DSM; + } } else { - fp = 2; - goto invalid_fld; - } - - if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { - /* Newer devices support queued TRIM commands */ - tf->protocol = ATA_PROT_NCQ; - tf->command = ATA_CMD_FPDMA_SEND; - tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; - tf->nsect = qc->tag << 3; - tf->hob_feature = (size / 512) >> 8; - tf->feature = size / 512; + ata_format_sct_write_same(scmd, block, n_block); - tf->auxiliary = 1; - } else { - tf->protocol = ATA_PROT_DMA; tf->hob_feature = 0; - tf->feature = ATA_DSM_TRIM; - tf->hob_nsect = (size / 512) >> 8; - tf->nsect = size / 512; - tf->command = ATA_CMD_DSM; + tf->feature = 0; + tf->hob_nsect = 0; + tf->nsect = 1; + tf->lbah = 0; + tf->lbam = 0; + tf->lbal = ATA_CMD_STANDBYNOW1; + tf->hob_lbah = 0; + tf->hob_lbam = 0; + tf->hob_lbal = 0; + tf->device = ATA_CMD_STANDBYNOW1; + tf->protocol = ATA_PROT_DMA; + tf->command = ATA_CMD_WRITE_LOG_DMA_EXT; } tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | @@ -3413,6 +3478,76 @@ invalid_opcode: return 1; } +/** + * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN + * @args: device MAINTENANCE_IN data / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * + * Yields a subset to satisfy scsi_report_opcode() + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) +{ + struct ata_device *dev = args->dev; + u8 *cdb = args->cmd->cmnd; + u8 supported = 0; + unsigned int err = 0; + + if (cdb[2] != 1) { + ata_dev_warn(dev, "invalid command format %d\n", cdb[2]); + err = 2; + goto out; + } + switch (cdb[3]) { + case INQUIRY: + case MODE_SENSE: + case MODE_SENSE_10: + case READ_CAPACITY: + case SERVICE_ACTION_IN_16: + case REPORT_LUNS: + case REQUEST_SENSE: + case SYNCHRONIZE_CACHE: + case REZERO_UNIT: + case SEEK_6: + case SEEK_10: + case TEST_UNIT_READY: + case SEND_DIAGNOSTIC: + case MAINTENANCE_IN: + case READ_6: + case READ_10: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_16: + case ATA_12: + case ATA_16: + case VERIFY: + case VERIFY_16: + case MODE_SELECT: + case MODE_SELECT_10: + case START_STOP: + supported = 3; + break; + case WRITE_SAME_16: + if (ata_id_sct_write_same(dev->id)) + supported = 3; + break; + case ZBC_IN: + case ZBC_OUT: + if (ata_id_zoned_cap(dev->id) || + dev->class == ATA_DEV_ZAC) + supported = 3; + break; + default: + break; + } +out: + rbuf[1] = supported; /* supported */ + return err; +} + /** * ata_scsi_report_zones_complete - convert ATA output * @qc: command structure returning the data @@ -4193,6 +4328,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ata_scsi_invalid_field(dev, cmd, 1); break; + case MAINTENANCE_IN: + if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) + ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); + else + ata_scsi_invalid_field(dev, cmd, 1); + break; + /* all other commands */ default: ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0); @@ -4225,7 +4367,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) shost->max_lun = 1; shost->max_channel = 1; shost->max_cmd_len = 16; - shost->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() * callback and it needs to see every deferred qc. diff --git a/include/linux/ata.h b/include/linux/ata.h index 45a1d71c55f1..fdb180367ba1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -105,6 +105,7 @@ enum { ATA_ID_CFA_KEY_MGMT = 162, ATA_ID_CFA_MODES = 163, ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), @@ -788,6 +789,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id) return id[ATA_ID_COMMAND_SET_4] & (1 << 6); } +/** + * + * Word: 206 - SCT Command Transport + * 15:12 - Vendor Specific + * 11:6 - Reserved + * 5 - SCT Command Transport Data Tables supported + * 4 - SCT Command Transport Features Control supported + * 3 - SCT Command Transport Error Recovery Control supported + * 2 - SCT Command Transport Write Same supported + * 1 - SCT Command Transport Long Sector Access supported + * 0 - SCT Command Transport supported + */ +static inline bool ata_id_sct_data_tables(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false; +} + +static inline bool ata_id_sct_features_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false; +} + +static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; +} + +static inline bool ata_id_sct_write_same(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false; +} + +static inline bool ata_id_sct_long_sector_access(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; +} + +static inline bool ata_id_sct_supported(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false; +} + /** * ata_id_major_version - get ATA level of drive * @id: Identify data -- cgit v1.2.3-59-g8ed1b From ef2d7392c4ece5c3cd12a6c7ca9366cd8f189aff Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Wed, 24 Aug 2016 13:08:14 -0500 Subject: libata: SCT Write Same / DSM Trim Correct handling of devices with sector_size other that 512 bytes. In the case of a 4Kn device sector_size it is possible to describe a much larger DSM Trim than the current fixed default of 512 bytes. This patch assumes the minimum descriptor is sector_size and fills out the descriptor accordingly. The ACS-2 specification is quite clear that the DSM command payload is sized as number of 512 byte transfers so a 4Kn device will operate correctly without this patch. Signed-off-by: Shaun Tancheff Acked-by: Tejun Heo --- drivers/ata/libata-scsi.c | 85 +++++++++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 08d9c8e731b3..35b1f9e475cc 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3283,7 +3283,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) /** * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim * @cmd: SCSI command being translated - * @num: Maximum number of entries (nominally 64). + * @trmax: Maximum number of entries that will fit in sector_size bytes. * @sector: Starting sector * @count: Total Range of request in logical sectors * @@ -3298,63 +3298,80 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * LBA's should be sorted order and not overlap. * * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET + * + * Return: Number of bytes copied into sglist. */ -static unsigned int ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 num, - u64 sector, u32 count) +static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax, + u64 sector, u32 count) { - __le64 *buffer; - u32 i = 0, used_bytes; + struct scsi_device *sdp = cmd->device; + size_t len = sdp->sector_size; + size_t r; + __le64 *buf; + u32 i = 0; unsigned long flags; - BUILD_BUG_ON(512 > ATA_SCSI_RBUF_SIZE); + WARN_ON(len > ATA_SCSI_RBUF_SIZE); + + if (len > ATA_SCSI_RBUF_SIZE) + len = ATA_SCSI_RBUF_SIZE; spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); - buffer = ((void *)ata_scsi_rbuf); - while (i < num) { + buf = ((void *)ata_scsi_rbuf); + memset(buf, 0, len); + while (i < trmax) { u64 entry = sector | ((u64)(count > 0xffff ? 0xffff : count) << 48); - buffer[i++] = __cpu_to_le64(entry); + buf[i++] = __cpu_to_le64(entry); if (count <= 0xffff) break; count -= 0xffff; sector += 0xffff; } - - used_bytes = ALIGN(i * 8, 512); - memset(buffer + i, 0, used_bytes - i * 8); - sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buffer, 512); + r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); - return used_bytes; + return r; } /** * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same * @cmd: SCSI command being translated * @lba: Starting sector - * @num: Number of logical sectors to be zero'd. + * @num: Number of sectors to be zero'd. * - * Rewrite the WRITE SAME descriptor to be an SCT Write Same formatted + * Rewrite the WRITE SAME payload to be an SCT Write Same formatted * descriptor. * NOTE: Writes a pattern (0's) in the foreground. - * Large write-same requents can timeout. + * + * Return: Number of bytes copied into sglist. */ -static void ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num) +static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num) { - u16 *sctpg; + struct scsi_device *sdp = cmd->device; + size_t len = sdp->sector_size; + size_t r; + u16 *buf; unsigned long flags; spin_lock_irqsave(&ata_scsi_rbuf_lock, flags); - sctpg = ((void *)ata_scsi_rbuf); + buf = ((void *)ata_scsi_rbuf); + + put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */ + put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */ + put_unaligned_le64(lba, &buf[2]); + put_unaligned_le64(num, &buf[6]); + put_unaligned_le32(0u, &buf[10]); /* pattern */ + + WARN_ON(len > ATA_SCSI_RBUF_SIZE); - put_unaligned_le16(0x0002, &sctpg[0]); /* SCT_ACT_WRITE_SAME */ - put_unaligned_le16(0x0101, &sctpg[1]); /* WRITE PTRN FG */ - put_unaligned_le64(lba, &sctpg[2]); - put_unaligned_le64(num, &sctpg[6]); - put_unaligned_le32(0u, &sctpg[10]); + if (len > ATA_SCSI_RBUF_SIZE) + len = ATA_SCSI_RBUF_SIZE; - sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), sctpg, 512); + r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len); spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags); + + return r; } /** @@ -3371,11 +3388,13 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; struct scsi_cmnd *scmd = qc->scsicmd; + struct scsi_device *sdp = scmd->device; + size_t len = sdp->sector_size; struct ata_device *dev = qc->dev; const u8 *cdb = scmd->cmnd; u64 block; u32 n_block; - const u32 trmax = ATA_MAX_TRIM_RNUM; + const u32 trmax = len >> 3; u32 size; u16 fp; u8 bp = 0xff; @@ -3420,8 +3439,16 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd)) goto invalid_param_len; + /* + * size must match sector size in bytes + * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) + * is defined as number of 512 byte blocks to be transferred. + */ if (unmap) { size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block); + if (size != len) + goto invalid_param_len; + if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { /* Newer devices support queued TRIM commands */ tf->protocol = ATA_PROT_NCQ; @@ -3441,7 +3468,9 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) tf->command = ATA_CMD_DSM; } } else { - ata_format_sct_write_same(scmd, block, n_block); + size = ata_format_sct_write_same(scmd, block, n_block); + if (size != len) + goto invalid_param_len; tf->hob_feature = 0; tf->feature = 0; -- cgit v1.2.3-59-g8ed1b From 7d36dd00ab8d57134cce86ad4febc2f5bbdf3e72 Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Sun, 21 Aug 2016 23:23:21 -0500 Subject: libata: SCT Write Same handle ATA_DFLAG_PIO Use non DMA write log when ATA_DFLAG_PIO is set. Signed-off-by: Shaun Tancheff Reviewed-by: Hannes Reinecke Acked-by: Tejun Heo --- drivers/ata/libata-scsi.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 35b1f9e475cc..2f5487f02de1 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3485,6 +3485,8 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) tf->device = ATA_CMD_STANDBYNOW1; tf->protocol = ATA_PROT_DMA; tf->command = ATA_CMD_WRITE_LOG_DMA_EXT; + if (unlikely(dev->flags & ATA_DFLAG_PIO)) + tf->command = ATA_CMD_WRITE_LOG_EXT; } tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | -- cgit v1.2.3-59-g8ed1b From 83ab7dad06b74e390c2ce0e7b5136daf286e1f5e Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 9 Aug 2016 13:58:27 +0200 Subject: rtc: pcf2123: Add missing error code assignment before test It is likely that checking the result of 'pcf2123_write_reg' is expected here. Also fix a small style issue. The '{' at the beginning of the function is misplaced. Fixes: 809b453b76e15 ("rtc: pcf2123: clean up writes to the rtc chip") Signed-off-by: Christophe JAILLET Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-pcf2123.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index b4478cc92b55..8895f77726e8 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr, } static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, - const char *buffer, size_t count) { + const char *buffer, size_t count) +{ struct pcf2123_sysfs_reg *r; unsigned long reg; unsigned long val; @@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - pcf2123_write_reg(dev, reg, val); + ret = pcf2123_write_reg(dev, reg, val); if (ret < 0) return -EIO; return count; -- cgit v1.2.3-59-g8ed1b From 50d6c0ea8111100a102c03b4410ba1e45fa8b771 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 13 Jul 2016 02:26:08 +0200 Subject: rtc: ds1307: fix century bit support Add an option to properly support the century bit of ds1337 and compatibles and ds1340. Because the driver had a bug until now, it is not possible to switch users to the fixed code directly as RTCs in the field will wrongly have the century bit set. Acked-by: Arnaud Ebalard Signed-off-by: Alexandre Belloni --- drivers/rtc/Kconfig | 14 ++++++++++++++ drivers/rtc/rtc-ds1307.c | 48 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 7fc11cdfd27e..6ce3dad73253 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -234,6 +234,20 @@ config RTC_DRV_DS1307_HWMON Say Y here if you want to expose temperature sensor data on rtc-ds1307 (only DS3231) +config RTC_DRV_DS1307_CENTURY + bool "Century bit support for rtc-ds1307" + depends on RTC_DRV_DS1307 + default n + help + The DS1307 driver suffered from a bug where it was enabling the + century bit inconditionnally but never used it when reading the time. + It made the driver unable to support dates beyond 2099. + Setting this option will add proper support for the century bit but if + the time was previously set using a kernel predating this option, + reading the date will return a date in the next century. + To solve that, you could boot a kernel without this option set, set + the RTC date and then boot a kernel with this option set. + config RTC_DRV_DS1374 tristate "Dallas/Maxim DS1374" help diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 8e1c5cb6ece6..712e221d2415 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -382,10 +382,25 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f); tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f; t->tm_mon = bcd2bin(tmp) - 1; - - /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */ t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100; +#ifdef CONFIG_RTC_DRV_DS1307_CENTURY + switch (ds1307->type) { + case ds_1337: + case ds_1339: + case ds_3231: + if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY) + t->tm_year += 100; + break; + case ds_1340: + if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY) + t->tm_year += 100; + break; + default: + break; + } +#endif + dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", "read", t->tm_sec, t->tm_min, @@ -409,6 +424,27 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) t->tm_hour, t->tm_mday, t->tm_mon, t->tm_year, t->tm_wday); +#ifdef CONFIG_RTC_DRV_DS1307_CENTURY + if (t->tm_year < 100) + return -EINVAL; + + switch (ds1307->type) { + case ds_1337: + case ds_1339: + case ds_3231: + case ds_1340: + if (t->tm_year > 299) + return -EINVAL; + default: + if (t->tm_year > 199) + return -EINVAL; + break; + } +#else + if (t->tm_year < 100 || t->tm_year > 199) + return -EINVAL; +#endif + buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); @@ -424,11 +460,13 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) case ds_1337: case ds_1339: case ds_3231: - buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; + if (t->tm_year > 199) + buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; break; case ds_1340: - buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN - | DS1340_BIT_CENTURY; + buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN; + if (t->tm_year > 199) + buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY; break; case mcp794xx: /* -- cgit v1.2.3-59-g8ed1b From 78aaa06d7956dc4cd42fff5033f4bf6fafcbc79f Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 13 Jul 2016 02:36:41 +0200 Subject: rtc: ds1307: add Intersil ISL12057 support Intersil ISL12057 is a drop-in replacement for DS1337. It can be supported by the ds1307 driver. Acked-by: Arnaud Ebalard Signed-off-by: Alexandre Belloni --- drivers/rtc/Kconfig | 8 ++++---- drivers/rtc/rtc-ds1307.c | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6ce3dad73253..83a788416721 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -208,14 +208,14 @@ config RTC_DRV_AS3722 will be called rtc-as3722. config RTC_DRV_DS1307 - tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025" + tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057" help If you say yes here you get support for various compatible RTC chips (often with battery backup) connected with I2C. This driver should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00, - EPSON RX-8025 and probably other chips. In some cases the RTC - must already have been initialized (by manufacturing or a - bootloader). + EPSON RX-8025, Intersil ISL12057 and probably other chips. In some + cases the RTC must already have been initialized (by manufacturing or + a bootloader). The first seven registers on these chips hold an RTC, and other registers may add features such as NVRAM, a trickle charger for diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 712e221d2415..4e31036ee259 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -186,6 +186,7 @@ static const struct i2c_device_id ds1307_id[] = { { "mcp7941x", mcp794xx }, { "pt7c4338", ds_1307 }, { "rx8025", rx_8025 }, + { "isl12057", ds_1337 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1307_id); @@ -1333,6 +1334,11 @@ static int ds1307_probe(struct i2c_client *client, if (of_property_read_bool(client->dev.of_node, "wakeup-source")) { ds1307_can_wakeup_device = true; } + /* Intersil ISL12057 DT backward compatibility */ + if (of_property_read_bool(client->dev.of_node, + "isil,irq2-can-wakeup-machine")) { + ds1307_can_wakeup_device = true; + } #endif switch (ds1307->type) { -- cgit v1.2.3-59-g8ed1b From e8aa7dcbf0b1fdd79127b125d2369bca7bdb5b03 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 13 Jul 2016 02:39:54 +0200 Subject: rtc: isl12057: remove driver The Intersil isl12057 is now supported by the ds1307 driver. Acked-by: Arnaud Ebalard Signed-off-by: Alexandre Belloni --- drivers/rtc/Kconfig | 10 - drivers/rtc/Makefile | 1 - drivers/rtc/rtc-isl12057.c | 643 --------------------------------------------- 3 files changed, 654 deletions(-) delete mode 100644 drivers/rtc/rtc-isl12057.c (limited to 'drivers') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 83a788416721..23acc3f536b3 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -388,16 +388,6 @@ config RTC_DRV_ISL12022 This driver can also be built as a module. If so, the module will be called rtc-isl12022. -config RTC_DRV_ISL12057 - select REGMAP_I2C - tristate "Intersil ISL12057" - help - If you say yes here you get support for the Intersil ISL12057 - I2C RTC chip. - - This driver can also be built as a module. If so, the module - will be called rtc-isl12057. - config RTC_DRV_X1205 tristate "Xicor/Intersil X1205" help diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 8fb994bacdf7..1ac694a330c8 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -72,7 +72,6 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o -obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c deleted file mode 100644 index 0e7f0f52bfe4..000000000000 --- a/drivers/rtc/rtc-isl12057.c +++ /dev/null @@ -1,643 +0,0 @@ -/* - * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock - * - * Copyright (C) 2013, Arnaud EBALARD - * - * This work is largely based on Intersil ISL1208 driver developed by - * Hebert Valerio Riedel . - * - * Detailed datasheet on which this development is based is available here: - * - * http://natisbad.org/NAS2/refs/ISL12057.pdf - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "rtc-isl12057" - -/* RTC section */ -#define ISL12057_REG_RTC_SC 0x00 /* Seconds */ -#define ISL12057_REG_RTC_MN 0x01 /* Minutes */ -#define ISL12057_REG_RTC_HR 0x02 /* Hours */ -#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */ -#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */ -#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */ -#define ISL12057_REG_RTC_DT 0x04 /* Date */ -#define ISL12057_REG_RTC_MO 0x05 /* Month */ -#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */ -#define ISL12057_REG_RTC_YR 0x06 /* Year */ -#define ISL12057_RTC_SEC_LEN 7 - -/* Alarm 1 section */ -#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */ -#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */ -#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */ -#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */ -#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */ -#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */ -#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */ -#define ISL12057_A1_SEC_LEN 4 - -/* Alarm 2 section */ -#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */ -#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */ -#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */ -#define ISL12057_A2_SEC_LEN 3 - -/* Control/Status registers */ -#define ISL12057_REG_INT 0x0E -#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */ -#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */ -#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */ -#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */ -#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */ -#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */ - -#define ISL12057_REG_SR 0x0F -#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */ -#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */ -#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */ - -/* Register memory map length */ -#define ISL12057_MEM_MAP_LEN 0x10 - -struct isl12057_rtc_data { - struct rtc_device *rtc; - struct regmap *regmap; - struct mutex lock; - int irq; -}; - -static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) -{ - tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]); - tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]); - - if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */ - tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f); - if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM) - tm->tm_hour += 12; - } else { /* 24 hour mode */ - tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f); - } - - tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]); - tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */ - tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */ - tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100; - - /* Check if years register has overflown from 99 to 00 */ - if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN) - tm->tm_year += 100; -} - -static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm) -{ - u8 century_bit; - - /* - * The clock has an 8 bit wide bcd-coded register for the year. - * It also has a century bit encoded in MO flag which provides - * information about overflow of year register from 99 to 00. - * tm_year is an offset from 1900 and we are interested in the - * 2000-2199 range, so any value less than 100 or larger than - * 299 is invalid. - */ - if (tm->tm_year < 100 || tm->tm_year > 299) - return -EINVAL; - - century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0; - - regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec); - regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min); - regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */ - regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday); - regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit; - regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100); - regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1); - - return 0; -} - -/* - * Try and match register bits w/ fixed null values to see whether we - * are dealing with an ISL12057. Note: this function is called early - * during init and hence does need mutex protection. - */ -static int isl12057_i2c_validate_chip(struct regmap *regmap) -{ - u8 regs[ISL12057_MEM_MAP_LEN]; - static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8, - 0xc0, 0x60, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x60, 0x7c }; - int ret, i; - - ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN); - if (ret) - return ret; - - for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) { - if (regs[i] & mask[i]) /* check if bits are cleared */ - return -ENODEV; - } - - return 0; -} - -static int _isl12057_rtc_clear_alarm(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - ret = regmap_update_bits(data->regmap, ISL12057_REG_SR, - ISL12057_REG_SR_A1F, 0); - if (ret) - dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret); - - return ret; -} - -static int _isl12057_rtc_update_alarm(struct device *dev, int enable) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - ret = regmap_update_bits(data->regmap, ISL12057_REG_INT, - ISL12057_REG_INT_A1IE, - enable ? ISL12057_REG_INT_A1IE : 0); - if (ret) - dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n", - __func__, ret); - - return ret; -} - -/* - * Note: as we only read from device and do not perform any update, there is - * no need for an equivalent function which would try and get driver's main - * lock. Here, it is safe for everyone if we just use regmap internal lock - * on the device when reading. - */ -static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - u8 regs[ISL12057_RTC_SEC_LEN]; - unsigned int sr; - int ret; - - ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr); - if (ret) { - dev_err(dev, "%s: unable to read oscillator status flag (%d)\n", - __func__, ret); - goto out; - } else { - if (sr & ISL12057_REG_SR_OSF) { - ret = -ENODATA; - goto out; - } - } - - ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs, - ISL12057_RTC_SEC_LEN); - if (ret) - dev_err(dev, "%s: unable to read RTC time section (%d)\n", - __func__, ret); - -out: - if (ret) - return ret; - - isl12057_rtc_regs_to_tm(tm, regs); - - return rtc_valid_tm(tm); -} - -static int isl12057_rtc_update_alarm(struct device *dev, int enable) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - int ret; - - mutex_lock(&data->lock); - ret = _isl12057_rtc_update_alarm(dev, enable); - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - struct rtc_time *alarm_tm = &alarm->time; - u8 regs[ISL12057_A1_SEC_LEN]; - unsigned int ir; - int ret; - - mutex_lock(&data->lock); - ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs, - ISL12057_A1_SEC_LEN); - if (ret) { - dev_err(dev, "%s: reading alarm section failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f); - alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f); - alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f); - alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f); - - ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir); - if (ret) { - dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE); - -err_unlock: - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - struct rtc_time *alarm_tm = &alarm->time; - unsigned long rtc_secs, alarm_secs; - u8 regs[ISL12057_A1_SEC_LEN]; - struct rtc_time rtc_tm; - int ret, enable = 1; - - mutex_lock(&data->lock); - ret = _isl12057_rtc_read_time(dev, &rtc_tm); - if (ret) - goto err_unlock; - - ret = rtc_tm_to_time(&rtc_tm, &rtc_secs); - if (ret) - goto err_unlock; - - ret = rtc_tm_to_time(alarm_tm, &alarm_secs); - if (ret) - goto err_unlock; - - /* If alarm time is before current time, disable the alarm */ - if (!alarm->enabled || alarm_secs <= rtc_secs) { - enable = 0; - } else { - /* - * Chip only support alarms up to one month in the future. Let's - * return an error if we get something after that limit. - * Comparison is done by incrementing rtc_tm month field by one - * and checking alarm value is still below. - */ - if (rtc_tm.tm_mon == 11) { /* handle year wrapping */ - rtc_tm.tm_mon = 0; - rtc_tm.tm_year += 1; - } else { - rtc_tm.tm_mon += 1; - } - - ret = rtc_tm_to_time(&rtc_tm, &rtc_secs); - if (ret) - goto err_unlock; - - if (alarm_secs > rtc_secs) { - dev_err(dev, "%s: max for alarm is one month (%d)\n", - __func__, ret); - ret = -EINVAL; - goto err_unlock; - } - } - - /* Disable the alarm before modifying it */ - ret = _isl12057_rtc_update_alarm(dev, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to disable the alarm (%d)\n", - __func__, ret); - goto err_unlock; - } - - /* Program alarm registers */ - regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f; - regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f; - regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f; - regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f; - - ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs, - ISL12057_A1_SEC_LEN); - if (ret < 0) { - dev_err(dev, "%s: writing alarm section failed (%d)\n", - __func__, ret); - goto err_unlock; - } - - /* Enable or disable alarm */ - ret = _isl12057_rtc_update_alarm(dev, enable); - -err_unlock: - mutex_unlock(&data->lock); - - return ret; -} - -static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - u8 regs[ISL12057_RTC_SEC_LEN]; - int ret; - - ret = isl12057_rtc_tm_to_regs(regs, tm); - if (ret) - return ret; - - mutex_lock(&data->lock); - ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs, - ISL12057_RTC_SEC_LEN); - if (ret) { - dev_err(dev, "%s: unable to write RTC time section (%d)\n", - __func__, ret); - goto out; - } - - /* - * Now that RTC time has been updated, let's clear oscillator - * failure flag, if needed. - */ - ret = regmap_update_bits(data->regmap, ISL12057_REG_SR, - ISL12057_REG_SR_OSF, 0); - if (ret < 0) - dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n", - __func__, ret); - -out: - mutex_unlock(&data->lock); - - return ret; -} - -/* - * Check current RTC status and enable/disable what needs to be. Return 0 if - * everything went ok and a negative value upon error. Note: this function - * is called early during init and hence does need mutex protection. - */ -static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap) -{ - int ret; - - /* Enable oscillator if not already running */ - ret = regmap_update_bits(regmap, ISL12057_REG_INT, - ISL12057_REG_INT_EOSC, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to enable oscillator (%d)\n", - __func__, ret); - return ret; - } - - /* Clear alarm bit if needed */ - ret = regmap_update_bits(regmap, ISL12057_REG_SR, - ISL12057_REG_SR_A1F, 0); - if (ret < 0) { - dev_err(dev, "%s: unable to clear alarm bit (%d)\n", - __func__, ret); - return ret; - } - - return 0; -} - -#ifdef CONFIG_OF -/* - * One would expect the device to be marked as a wakeup source only - * when an IRQ pin of the RTC is routed to an interrupt line of the - * CPU. In practice, such an IRQ pin can be connected to a PMIC and - * this allows the device to be powered up when RTC alarm rings. This - * is for instance the case on ReadyNAS 102, 104 and 2120. On those - * devices with no IRQ driectly connected to the SoC, the RTC chip - * can be forced as a wakeup source by stating that explicitly in - * the device's .dts file using the "wakeup-source" boolean property. - * This will guarantee 'wakealarm' sysfs entry is available on the device. - * - * The function below returns 1, i.e. the capability of the chip to - * wakeup the device, based on IRQ availability or if the boolean - * property has been set in the .dts file. Otherwise, it returns 0. - */ - -static bool isl12057_can_wakeup_machine(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - - return data->irq || of_property_read_bool(dev->of_node, "wakeup-source") - || of_property_read_bool(dev->of_node, /* legacy */ - "isil,irq2-can-wakeup-machine"); -} -#else -static bool isl12057_can_wakeup_machine(struct device *dev) -{ - struct isl12057_rtc_data *data = dev_get_drvdata(dev); - - return !!data->irq; -} -#endif - -static int isl12057_rtc_alarm_irq_enable(struct device *dev, - unsigned int enable) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - int ret = -ENOTTY; - - if (rtc_data->irq) - ret = isl12057_rtc_update_alarm(dev, enable); - - return ret; -} - -static irqreturn_t isl12057_rtc_interrupt(int irq, void *data) -{ - struct i2c_client *client = data; - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev); - struct rtc_device *rtc = rtc_data->rtc; - int ret, handled = IRQ_NONE; - unsigned int sr; - - ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr); - if (!ret && (sr & ISL12057_REG_SR_A1F)) { - dev_dbg(&client->dev, "RTC alarm!\n"); - - rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); - - /* Acknowledge and disable the alarm */ - _isl12057_rtc_clear_alarm(&client->dev); - _isl12057_rtc_update_alarm(&client->dev, 0); - - handled = IRQ_HANDLED; - } - - return handled; -} - -static const struct rtc_class_ops rtc_ops = { - .read_time = _isl12057_rtc_read_time, - .set_time = isl12057_rtc_set_time, - .read_alarm = isl12057_rtc_read_alarm, - .set_alarm = isl12057_rtc_set_alarm, - .alarm_irq_enable = isl12057_rtc_alarm_irq_enable, -}; - -static const struct regmap_config isl12057_rtc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, -}; - -static int isl12057_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct device *dev = &client->dev; - struct isl12057_rtc_data *data; - struct regmap *regmap; - int ret; - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | - I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_I2C_BLOCK)) - return -ENODEV; - - regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - dev_err(dev, "%s: regmap allocation failed (%d)\n", - __func__, ret); - return ret; - } - - ret = isl12057_i2c_validate_chip(regmap); - if (ret) - return ret; - - ret = isl12057_check_rtc_status(dev, regmap); - if (ret) - return ret; - - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - mutex_init(&data->lock); - data->regmap = regmap; - dev_set_drvdata(dev, data); - - if (client->irq > 0) { - ret = devm_request_threaded_irq(dev, client->irq, NULL, - isl12057_rtc_interrupt, - IRQF_SHARED|IRQF_ONESHOT, - DRV_NAME, client); - if (!ret) - data->irq = client->irq; - else - dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__, - client->irq, ret); - } - - if (isl12057_can_wakeup_machine(dev)) - device_init_wakeup(dev, true); - - data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, - THIS_MODULE); - ret = PTR_ERR_OR_ZERO(data->rtc); - if (ret) { - dev_err(dev, "%s: unable to register RTC device (%d)\n", - __func__, ret); - goto err; - } - - /* We cannot support UIE mode if we do not have an IRQ line */ - if (!data->irq) - data->rtc->uie_unsupported = 1; - -err: - return ret; -} - -static int isl12057_remove(struct i2c_client *client) -{ - if (isl12057_can_wakeup_machine(&client->dev)) - device_init_wakeup(&client->dev, false); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int isl12057_rtc_suspend(struct device *dev) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - - if (rtc_data->irq && device_may_wakeup(dev)) - return enable_irq_wake(rtc_data->irq); - - return 0; -} - -static int isl12057_rtc_resume(struct device *dev) -{ - struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev); - - if (rtc_data->irq && device_may_wakeup(dev)) - return disable_irq_wake(rtc_data->irq); - - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend, - isl12057_rtc_resume); - -#ifdef CONFIG_OF -static const struct of_device_id isl12057_dt_match[] = { - { .compatible = "isl,isl12057" }, /* for backward compat., don't use */ - { .compatible = "isil,isl12057" }, - { }, -}; -MODULE_DEVICE_TABLE(of, isl12057_dt_match); -#endif - -static const struct i2c_device_id isl12057_id[] = { - { "isl12057", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, isl12057_id); - -static struct i2c_driver isl12057_driver = { - .driver = { - .name = DRV_NAME, - .pm = &isl12057_rtc_pm_ops, - .of_match_table = of_match_ptr(isl12057_dt_match), - }, - .probe = isl12057_probe, - .remove = isl12057_remove, - .id_table = isl12057_id, -}; -module_i2c_driver(isl12057_driver); - -MODULE_AUTHOR("Arnaud EBALARD "); -MODULE_DESCRIPTION("Intersil ISL12057 RTC driver"); -MODULE_LICENSE("GPL"); -- cgit v1.2.3-59-g8ed1b From 84281c2d72a70456a3ba8d1e49548dd469f2e9d8 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Fri, 12 Aug 2016 14:46:14 +0200 Subject: rtc: sysfs: fix a cast removing the const attribute The char pointer buf_ptr is assigned an address from a const char pointer buf (parameter of wakealarm_store). The data pointer by buf_ptr is never modified. So casting it to a (char *) is useless. This patch remove this cast, and transform buf_ptr to a const char pointer. Signed-off-by: LABBE Corentin Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index 63b9fb1318c2..1218d5d4224d 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -160,7 +160,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, unsigned long push = 0; struct rtc_wkalrm alm; struct rtc_device *rtc = to_rtc_device(dev); - char *buf_ptr; + const char *buf_ptr; int adjust = 0; /* Only request alarms that trigger in the future. Disable them @@ -171,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, return retval; rtc_tm_to_time(&alm.time, &now); - buf_ptr = (char *)buf; + buf_ptr = buf; if (*buf_ptr == '+') { buf_ptr++; if (*buf_ptr == '=') { -- cgit v1.2.3-59-g8ed1b From 6ed1a51e0b8a981ab0540117f0c47c1f11fca5fa Mon Sep 17 00:00:00 2001 From: Jan Östlund Date: Thu, 11 Aug 2016 13:31:43 +0200 Subject: rtc: bq32k: Use correct mask name for 'minutes' register. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The BQ32K_SECONDS_MASK and BQ32K_MINUTES_MASK both has the same value. This is no functional change. Signed-off-by: Jan Östlund Signed-off-by: Daniel Romell Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-bq32k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 0299988b4f13..5a0c13761023 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -94,7 +94,7 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm) return error; tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK); - tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK); + tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK); tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK); tm->tm_mday = bcd2bin(regs.date); tm->tm_wday = bcd2bin(regs.day) - 1; -- cgit v1.2.3-59-g8ed1b From 421a5ba24b3ef23b8972a6c238bd300abef532da Mon Sep 17 00:00:00 2001 From: Jan Östlund Date: Thu, 11 Aug 2016 13:31:44 +0200 Subject: rtc: bq32k: Fix handling of oscillator failure flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While the oscillator failure flag is set, the RTC registers should be considered invalid. bq32k_rtc_read_time() now returns an error instead of an invalid time. The failure flag is cleared the next time the clock is set. Signed-off-by: Jan Östlund Signed-off-by: Daniel Romell Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-bq32k.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 5a0c13761023..397742446007 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -93,6 +93,13 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm) if (error) return error; + /* + * In case of oscillator failure, the register contents should be + * considered invalid. The flag is cleared the next time the RTC is set. + */ + if (regs.minutes & BQ32K_OF) + return -EINVAL; + tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK); tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK); tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK); @@ -204,13 +211,10 @@ static int bq32k_probe(struct i2c_client *client, /* Check Oscillator Failure flag */ error = bq32k_read(dev, ®, BQ32K_MINUTES, 1); - if (!error && (reg & BQ32K_OF)) { - dev_warn(dev, "Oscillator Failure. Check RTC battery.\n"); - reg &= ~BQ32K_OF; - error = bq32k_write(dev, ®, BQ32K_MINUTES, 1); - } if (error) return error; + if (reg & BQ32K_OF) + dev_warn(dev, "Oscillator Failure. Check RTC battery.\n"); if (client->dev.of_node) trickle_charger_of_init(dev, client->dev.of_node); -- cgit v1.2.3-59-g8ed1b From e545b984f0225244505b5e0605070bfc6099f475 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Mon, 15 Aug 2016 10:52:47 +0200 Subject: rtc: pic32: Delete owner assignment The field "owner" is set by core. Thus delete an extra initialisation. Generated by: scripts/coccinelle/api/platform_no_drv_owner.cocci Signed-off-by: Markus Elfring Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-pic32.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 64e1e4578492..5cfb6df5c430 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -400,7 +400,6 @@ static struct platform_driver pic32_rtc_driver = { .remove = pic32_rtc_remove, .driver = { .name = "pic32-rtc", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(pic32_rtc_dt_ids), }, }; -- cgit v1.2.3-59-g8ed1b From 1bc40cb1ccbc7e33622a1fd2a8a37f311e64d4fc Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 15 Aug 2016 17:04:42 +0200 Subject: rtc: rx6110: remove owner assignment .owner is already set by the spi core. Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-rx6110.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index bbad00b233bc..591793e2b4ac 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(spi, rx6110_id); static struct spi_driver rx6110_driver = { .driver = { .name = RX6110_DRIVER_NAME, - .owner = THIS_MODULE, }, .probe = rx6110_probe, .remove = rx6110_remove, -- cgit v1.2.3-59-g8ed1b From 637cac7c139f6433506f38aca85c2bf37ff64787 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Fri, 19 Aug 2016 15:42:23 +0800 Subject: rtc: ac100: support clock-output-names in device tree binding The ac100 device tree binding specifies the usage of clock-output-names to specify the names of its 3 clock outputs. This is needed for orphan clock resolution, when the ac100 is probed much later than any clocks that consume any of its outputs. This wasn't supported by the driver. Add support for this. Signed-off-by: Chen-Yu Tsai Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-ac100.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c index 70b4fd0f6122..2d0bb02a25d9 100644 --- a/drivers/rtc/rtc-ac100.c +++ b/drivers/rtc/rtc-ac100.c @@ -327,6 +327,8 @@ static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip) .flags = 0, }; + of_property_read_string_index(np, "clock-output-names", + i, &init.name); clk->regmap = chip->regmap; clk->offset = AC100_CLKOUT_CTRL1 + i; clk->hw.init = &init; -- cgit v1.2.3-59-g8ed1b From 34c7b3ac4ccb6a972f81a4102c8ef216ca1fb155 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 31 Aug 2016 10:05:25 +0200 Subject: rtc: constify rtc_class_ops structures Check for rtc_class_ops structures that are only passed to devm_rtc_device_register, rtc_device_register, platform_device_register_data, all of which declare the corresponding parameter as const. Declare rtc_class_ops structures that have these properties as const. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @r disable optional_qualifier@ identifier i; position p; @@ static struct rtc_class_ops i@p = { ... }; @ok@ identifier r.i; expression e1,e2,e3,e4; position p; @@ ( devm_rtc_device_register(e1,e2,&i@p,e3) | rtc_device_register(e1,e2,&i@p,e3) | platform_device_register_data(e1,e2,e3,&i@p,e4) ) @bad@ position p != {r.p,ok.p}; identifier r.i; @@ i@p @depends on !bad disable optional_qualifier@ identifier r.i; @@ static +const struct rtc_class_ops i = { ... }; // Signed-off-by: Julia Lawall Acked-by: Baruch Siach Acked-by: Hans Ulli Kroll Acked-by: Linus Walleij Acked-by: Thierry Reding Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-at32ap700x.c | 2 +- drivers/rtc/rtc-coh901331.c | 2 +- drivers/rtc/rtc-davinci.c | 2 +- drivers/rtc/rtc-digicolor.c | 2 +- drivers/rtc/rtc-ds1302.c | 2 +- drivers/rtc/rtc-gemini.c | 2 +- drivers/rtc/rtc-jz4740.c | 2 +- drivers/rtc/rtc-mcp795.c | 2 +- drivers/rtc/rtc-mt6397.c | 2 +- drivers/rtc/rtc-nuc900.c | 2 +- drivers/rtc/rtc-omap.c | 2 +- drivers/rtc/rtc-palmas.c | 2 +- drivers/rtc/rtc-pcf50633.c | 2 +- drivers/rtc/rtc-rx6110.c | 2 +- drivers/rtc/rtc-rx8025.c | 2 +- drivers/rtc/rtc-spear.c | 2 +- drivers/rtc/rtc-stmp3xxx.c | 2 +- drivers/rtc/rtc-tegra.c | 2 +- drivers/rtc/rtc-twl.c | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index 83ac2337c0f7..de8bf56a41e7 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c @@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) return ret; } -static struct rtc_class_ops at32_rtc_ops = { +static const struct rtc_class_ops at32_rtc_ops = { .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c index 101b7a240e0f..cfc4141d99cd 100644 --- a/drivers/rtc/rtc-coh901331.c +++ b/drivers/rtc/rtc-coh901331.c @@ -140,7 +140,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops coh901331_ops = { +static const struct rtc_class_ops coh901331_ops = { .read_time = coh901331_read_time, .set_mmss = coh901331_set_mmss, .read_alarm = coh901331_read_alarm, diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index dba60c1dfce2..caf35567e14c 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c @@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return 0; } -static struct rtc_class_ops davinci_rtc_ops = { +static const struct rtc_class_ops davinci_rtc_ops = { .ioctl = davinci_rtc_ioctl, .read_time = davinci_rtc_read_time, .set_time = davinci_rtc_set_time, diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c index 8d05596a6765..b253bf1b3531 100644 --- a/drivers/rtc/rtc-digicolor.c +++ b/drivers/rtc/rtc-digicolor.c @@ -159,7 +159,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops dc_rtc_ops = { +static const struct rtc_class_ops dc_rtc_ops = { .read_time = dc_rtc_read_time, .set_mmss = dc_rtc_set_mmss, .read_alarm = dc_rtc_read_alarm, diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index f5dd09fe5add..0ec4be62322b 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -102,7 +102,7 @@ static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time) return rtc_valid_tm(time); } -static struct rtc_class_ops ds1302_rtc_ops = { +static const struct rtc_class_ops ds1302_rtc_ops = { .read_time = ds1302_rtc_get_time, .set_time = ds1302_rtc_set_time, }; diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c index b57505efadbc..688debc14348 100644 --- a/drivers/rtc/rtc-gemini.c +++ b/drivers/rtc/rtc-gemini.c @@ -110,7 +110,7 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm) return 0; } -static struct rtc_class_ops gemini_rtc_ops = { +static const struct rtc_class_ops gemini_rtc_ops = { .read_time = gemini_rtc_read_time, .set_time = gemini_rtc_set_time, }; diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index b2bcfc0bf2e5..5e14651b71a8 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable); } -static struct rtc_class_ops jz4740_rtc_ops = { +static const struct rtc_class_ops jz4740_rtc_ops = { .read_time = jz4740_rtc_read_time, .set_mmss = jz4740_rtc_set_mmss, .read_alarm = jz4740_rtc_read_alarm, diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 025bb33b9cd2..4021fd04cb0a 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -151,7 +151,7 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim) return rtc_valid_tm(tim); } -static struct rtc_class_ops mcp795_rtc_ops = { +static const struct rtc_class_ops mcp795_rtc_ops = { .read_time = mcp795_read_time, .set_time = mcp795_set_time }; diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 44f622c3e048..1a61fa56f3ad 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -301,7 +301,7 @@ exit: return ret; } -static struct rtc_class_ops mtk_rtc_ops = { +static const struct rtc_class_ops mtk_rtc_ops = { .read_time = mtk_rtc_read_time, .set_time = mtk_rtc_set_time, .read_alarm = mtk_rtc_read_alarm, diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index 09fc1c19f0df..b1b6b3041bfb 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c @@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return 0; } -static struct rtc_class_ops nuc900_rtc_ops = { +static const struct rtc_class_ops nuc900_rtc_ops = { .read_time = nuc900_rtc_read_time, .set_time = nuc900_rtc_set_time, .read_alarm = nuc900_rtc_read_alarm, diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index ec2e9c5fb993..cadac8e2aa6b 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -469,7 +469,7 @@ static void omap_rtc_power_off(void) mdelay(2500); } -static struct rtc_class_ops omap_rtc_ops = { +static const struct rtc_class_ops omap_rtc_ops = { .read_time = omap_rtc_read_time, .set_time = omap_rtc_set_time, .read_alarm = omap_rtc_read_alarm, diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index 6080e0edef63..4bcfb88674d3 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c @@ -225,7 +225,7 @@ static irqreturn_t palmas_rtc_interrupt(int irq, void *context) return IRQ_HANDLED; } -static struct rtc_class_ops palmas_rtc_ops = { +static const struct rtc_class_ops palmas_rtc_ops = { .read_time = palmas_rtc_read_time, .set_time = palmas_rtc_set_time, .read_alarm = palmas_rtc_read_alarm, diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c index e6b6911c8e05..00c31c91b245 100644 --- a/drivers/rtc/rtc-pcf50633.c +++ b/drivers/rtc/rtc-pcf50633.c @@ -232,7 +232,7 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return ret; } -static struct rtc_class_ops pcf50633_rtc_ops = { +static const struct rtc_class_ops pcf50633_rtc_ops = { .read_time = pcf50633_rtc_read_time, .set_time = pcf50633_rtc_set_time, .read_alarm = pcf50633_rtc_read_alarm, diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index 591793e2b4ac..7c9c08eab5e5 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -317,7 +317,7 @@ static int rx6110_init(struct rx6110_data *rx6110) return ret; } -static struct rtc_class_ops rx6110_rtc_ops = { +static const struct rtc_class_ops rx6110_rtc_ops = { .read_time = rx6110_get_time, .set_time = rx6110_set_time, }; diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 2b85cc7a24e7..91857d8d2df8 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -403,7 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static struct rtc_class_ops rx8025_rtc_ops = { +static const struct rtc_class_ops rx8025_rtc_ops = { .read_time = rx8025_get_time, .set_time = rx8025_set_time, .read_alarm = rx8025_read_alarm, diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c index f05ef8568480..e377f42abae7 100644 --- a/drivers/rtc/rtc-spear.c +++ b/drivers/rtc/rtc-spear.c @@ -343,7 +343,7 @@ static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled) return ret; } -static struct rtc_class_ops spear_rtc_ops = { +static const struct rtc_class_ops spear_rtc_ops = { .read_time = spear_rtc_read_time, .set_time = spear_rtc_set_time, .read_alarm = spear_rtc_read_alarm, diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index e6aaaa52e7fe..d578e40d5a50 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -231,7 +231,7 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return 0; } -static struct rtc_class_ops stmp3xxx_rtc_ops = { +static const struct rtc_class_ops stmp3xxx_rtc_ops = { .alarm_irq_enable = stmp3xxx_alarm_irq_enable, .read_time = stmp3xxx_rtc_gettime, diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index 15ac597d54da..3853ba963bb5 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -291,7 +291,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static struct rtc_class_ops tegra_rtc_ops = { +static const struct rtc_class_ops tegra_rtc_ops = { .read_time = tegra_rtc_read_time, .set_time = tegra_rtc_set_time, .read_alarm = tegra_rtc_read_alarm, diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 2dc787dc06c1..176720b7b9e5 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -462,7 +462,7 @@ out: return ret; } -static struct rtc_class_ops twl_rtc_ops = { +static const struct rtc_class_ops twl_rtc_ops = { .read_time = twl_rtc_read_time, .set_time = twl_rtc_set_time, .read_alarm = twl_rtc_read_alarm, -- cgit v1.2.3-59-g8ed1b From 7f742e8e8ad4c54ae66820c91bbbb6526a461117 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 31 Aug 2016 18:15:27 +0200 Subject: rtc: asm9260: allow COMPILE_TEST The rtc-asm9260 driver compiles correctly on other architectures, add COMPILE_TEST to improve code coverage. Signed-off-by: Alexandre Belloni --- drivers/rtc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 23acc3f536b3..896302a0d4b9 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1205,7 +1205,7 @@ comment "on-CPU RTC drivers" config RTC_DRV_ASM9260 tristate "Alphascale asm9260 RTC" - depends on MACH_ASM9260 + depends on MACH_ASM9260 || COMPILE_TEST help If you say yes here you get support for the RTC on the Alphascale asm9260 SoC. -- cgit v1.2.3-59-g8ed1b From de75ccdd4118ca41ac473c1ab96365280c631cdd Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 31 Aug 2016 18:15:28 +0200 Subject: rtc: asm9260: rework locking The rtc-asm9260 driver uses a discrete spinlock (wrongly uninitialized). Use the rtc mutex to lock mmio accesses instead. Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-asm9260.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 5219916ce11d..18a93d3e3f93 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -112,8 +112,6 @@ struct asm9260_rtc_priv { void __iomem *iobase; struct rtc_device *rtc; struct clk *clk; - /* io lock */ - spinlock_t lock; }; static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id) @@ -122,11 +120,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id) u32 isr; unsigned long events = 0; + mutex_lock(&priv->rtc->ops_lock); isr = ioread32(priv->iobase + HW_CIIR); - if (!isr) + if (!isr) { + mutex_unlock(&priv->rtc->ops_lock); return IRQ_NONE; + } iowrite32(0, priv->iobase + HW_CIIR); + mutex_unlock(&priv->rtc->ops_lock); events |= RTC_AF | RTC_IRQF; @@ -139,9 +141,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); u32 ctime0, ctime1, ctime2; - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); ctime0 = ioread32(priv->iobase + HW_CTIME0); ctime1 = ioread32(priv->iobase + HW_CTIME1); ctime2 = ioread32(priv->iobase + HW_CTIME2); @@ -155,7 +155,6 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) ctime1 = ioread32(priv->iobase + HW_CTIME1); ctime2 = ioread32(priv->iobase + HW_CTIME2); } - spin_unlock_irqrestore(&priv->lock, irq_flags); tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M; tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M; @@ -174,9 +173,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm) static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); /* * make sure SEC counter will not flip other counter on write time, * real value will be written at the enf of sequence. @@ -191,7 +188,6 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) iowrite32(tm->tm_hour, priv->iobase + HW_HOUR); iowrite32(tm->tm_min, priv->iobase + HW_MIN); iowrite32(tm->tm_sec, priv->iobase + HW_SEC); - spin_unlock_irqrestore(&priv->lock, irq_flags); return 0; } @@ -199,9 +195,7 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm) static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR); alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON); alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM); @@ -213,7 +207,6 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0; alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0; - spin_unlock_irqrestore(&priv->lock, irq_flags); return rtc_valid_tm(&alrm->time); } @@ -221,9 +214,7 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct asm9260_rtc_priv *priv = dev_get_drvdata(dev); - unsigned long irq_flags; - spin_lock_irqsave(&priv->lock, irq_flags); iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR); iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON); iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM); @@ -234,7 +225,6 @@ static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC); iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR); - spin_unlock_irqrestore(&priv->lock, irq_flags); return 0; } -- cgit v1.2.3-59-g8ed1b From 0b9e2988ab2261fd6d4a0039edf81ed1e3662be8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 Sep 2016 17:21:45 +0200 Subject: ahci: use pci_alloc_irq_vectors Use the new pci_alloc_irq_vectors API to allocate MSI-X and MSI vectors. The big advantage over the old code is that we can use the same API for MSI and MSI-X, and that we don't need to store the MSI-X vector mapping in driver-private data structures. This first conversion keeps the probe order as-is: MSI-X multi vector, MSI multi vector, MSI single vector, MSI-X single vector and last a single least legacy interrupt line. There is one small change of behavior: we now check the "MSI Revert to Single Message" flag for MSI-X in addition to MSI. Because the API to find the Linux IRQ number for a MSI/MSI-X vector is PCI specific, but libahaci is bus-agnostic I had to a get_irq_vector function pointer to struct ahci_host_priv. The alternative would be to move the multi-vector case of ahci_host_activate to ahci.c and just call ata_host_activate directly from the others users of ahci_host_activate. Signed-off-by: Christoph Hellwig Signed-off-by: Tejun Heo --- drivers/ata/ahci.c | 149 +++++++++++--------------------------------------- drivers/ata/ahci.h | 24 ++------ drivers/ata/libahci.c | 11 +++- 3 files changed, 45 insertions(+), 139 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 90eabaf81215..ba5f11cebee2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1400,142 +1400,56 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) } #endif -/* - * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer - * to single msi. - */ -static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports, - struct ahci_host_priv *hpriv, unsigned long flags) +static int ahci_get_irq_vector(struct ata_host *host, int port) { - int nvec, i, rc; - - /* Do not init MSI-X if MSI is disabled for the device */ - if (hpriv->flags & AHCI_HFLAG_NO_MSI) - return -ENODEV; - - nvec = pci_msix_vec_count(pdev); - if (nvec < 0) - return nvec; - - /* - * Proper MSI-X implementations will have a vector per-port. - * Barring that, we prefer single-MSI over single-MSIX. If this - * check fails (not enough MSI-X vectors for all ports) we will - * be called again with the flag clear iff ahci_init_msi() - * fails. - */ - if (flags & AHCI_HFLAG_MULTI_MSIX) { - if (nvec < n_ports) - return -ENODEV; - nvec = n_ports; - } else if (nvec) { - nvec = 1; - } else { - /* - * Emit dev_err() since this was the non-legacy irq - * method of last resort. - */ - rc = -ENODEV; - goto fail; - } - - for (i = 0; i < nvec; i++) - hpriv->msix[i].entry = i; - rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec); - if (rc < 0) - goto fail; - - if (nvec > 1) - hpriv->flags |= AHCI_HFLAG_MULTI_MSIX; - hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */ - - return nvec; -fail: - dev_err(&pdev->dev, - "failed to enable MSI-X with error %d, # of vectors: %d\n", - rc, nvec); - - return rc; + return pci_irq_vector(to_pci_dev(host->dev), port); } static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, struct ahci_host_priv *hpriv) { - int rc, nvec; + int nvec; if (hpriv->flags & AHCI_HFLAG_NO_MSI) return -ENODEV; - nvec = pci_msi_vec_count(pdev); - if (nvec < 0) - return nvec; - /* * If number of MSIs is less than number of ports then Sharing Last * Message mode could be enforced. In this case assume that advantage * of multipe MSIs is negated and use single MSI mode instead. */ - if (nvec < n_ports) - goto single_msi; - - rc = pci_enable_msi_exact(pdev, nvec); - if (rc == -ENOSPC) - goto single_msi; - if (rc < 0) - return rc; + nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nvec > 0) { + if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { + hpriv->get_irq_vector = ahci_get_irq_vector; + hpriv->flags |= AHCI_HFLAG_MULTI_MSI; + return nvec; + } - /* fallback to single MSI mode if the controller enforced MRSM mode */ - if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) { - pci_disable_msi(pdev); + /* + * Fallback to single MSI mode if the controller enforced MRSM + * mode. + */ printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); - goto single_msi; + pci_free_irq_vectors(pdev); } - if (nvec > 1) - hpriv->flags |= AHCI_HFLAG_MULTI_MSI; - - goto out; - -single_msi: - nvec = 1; - - rc = pci_enable_msi(pdev); - if (rc < 0) - return rc; -out: - hpriv->irq = pdev->irq; - - return nvec; -} - -static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, - struct ahci_host_priv *hpriv) -{ - int nvec; - /* - * Try to enable per-port MSI-X. If the host is not capable - * fall back to single MSI before finally attempting single - * MSI-X. + * -ENOSPC indicated we don't have enough vectors. Don't bother trying + * a single vectors for any other error: */ - nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX); - if (nvec >= 0) + if (nvec < 0 && nvec != -ENOSPC) return nvec; - nvec = ahci_init_msi(pdev, n_ports, hpriv); - if (nvec >= 0) - return nvec; - - /* try single-msix */ - nvec = ahci_init_msix(pdev, n_ports, hpriv, 0); - if (nvec >= 0) + /* + * If the host is not capable of supporting per-port vectors, fall + * back to single MSI before finally attempting single MSI-X. + */ + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (nvec == 1) return nvec; - - /* legacy intx interrupts */ - pci_intx(pdev, 1); - hpriv->irq = pdev->irq; - - return 0; + return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); } static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1698,11 +1612,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!host) return -ENOMEM; host->private_data = hpriv; - hpriv->msix = devm_kzalloc(&pdev->dev, - sizeof(struct msix_entry) * n_ports, GFP_KERNEL); - if (!hpriv->msix) - return -ENOMEM; - ahci_init_interrupts(pdev, n_ports, hpriv); + + if (ahci_init_msi(pdev, n_ports, hpriv) < 0) { + /* legacy intx interrupts */ + pci_intx(pdev, 1); + } + hpriv->irq = pdev->irq; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 70b06bcfb7e3..0cc08f892fea 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -242,12 +242,10 @@ enum { AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ #ifdef CONFIG_PCI_MSI - AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ - AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ + AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ #else /* compile out MSI infrastructure */ AHCI_HFLAG_MULTI_MSI = 0, - AHCI_HFLAG_MULTI_MSIX = 0, #endif AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ @@ -351,7 +349,6 @@ struct ahci_host_priv { * the PHY position in this array. */ struct phy **phys; - struct msix_entry *msix; /* Optional MSI-X support */ unsigned nports; /* Number of ports */ void *plat_data; /* Other platform data */ unsigned int irq; /* interrupt line */ @@ -362,22 +359,11 @@ struct ahci_host_priv { */ void (*start_engine)(struct ata_port *ap); irqreturn_t (*irq_handler)(int irq, void *dev_instance); -}; -#ifdef CONFIG_PCI_MSI -static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) -{ - if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX) - return hpriv->msix[port].vector; - else - return hpriv->irq + port; -} -#else -static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port) -{ - return hpriv->irq; -} -#endif + /* only required for per-port MSI(-X) support */ + int (*get_irq_vector)(struct ata_host *host, + int port); +}; extern int ahci_ignore_sss; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 5a1329e31609..0d028ead99e8 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2378,7 +2378,7 @@ static int ahci_port_start(struct ata_port *ap) /* * Switch to per-port locking in case each port has its own MSI vector. */ - if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { + if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) { spin_lock_init(&pp->lock); ap->lock = &pp->lock; } @@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, */ for (i = 0; i < host->n_ports; i++) { struct ahci_port_priv *pp = host->ports[i]->private_data; - int irq = ahci_irq_vector(hpriv, i); + int irq = hpriv->get_irq_vector(host, i); /* Do not receive interrupts sent by dummy ports */ if (!pp) { @@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) int irq = hpriv->irq; int rc; - if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { + if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) { if (hpriv->irq_handler) dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n"); + if (!hpriv->get_irq_vector) { + dev_err(host->dev, + "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n"); + return -EIO; + } rc = ahci_host_activate_multi_irqs(host, sht); } else { -- cgit v1.2.3-59-g8ed1b From 0ce1b18c42a5b1613ad84d3bf93ee4f5bb3f8b33 Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Fri, 9 Sep 2016 11:44:19 -0500 Subject: libata: Some drives failing on SCT Write Same Restrict support SCT Write Same to devices which also support ZAC where support is required. Reported-by: Mike Krinkin Signed-off-by: Shaun Tancheff Signed-off-by: Tejun Heo --- drivers/ata/libata-scsi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2f5487f02de1..9cceb4a875a5 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3562,9 +3562,9 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) supported = 3; break; case WRITE_SAME_16: - if (ata_id_sct_write_same(dev->id)) - supported = 3; - break; + if (!ata_id_sct_write_same(dev->id)) + break; + /* fallthrough: if SCT ... only enable for ZBC */ case ZBC_IN: case ZBC_OUT: if (ata_id_zoned_cap(dev->id) || -- cgit v1.2.3-59-g8ed1b From 970fc7f4afd52d638d88aeda985ea03ccd33acee Mon Sep 17 00:00:00 2001 From: Pratyush Anand Date: Thu, 15 Sep 2016 09:38:16 +0530 Subject: rtc: cmos: Initialize hpet timer before irq is registered We have observed on few x86 machines with rtc-cmos device that hpet_rtc_interrupt() is called just after irq registration and before cmos_do_probe() could call hpet_rtc_timer_init(). So, neither hpet_default_delta nor hpet_t1_cmp is initialized by the time interrupt is raised in the given situation, and this results in NMI watchdog LOCKUP. It has only been observed sporadically on kdump secondary kernels. See the call trace: ---<-snip->--- [ 27.913194] Kernel panic - not syncing: Watchdog detected hard LOCKUP on cpu 0 [ 27.915371] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.10.0-342.el7.x86_64 #1 [ 27.917503] Hardware name: HP ProLiant DL160 Gen8, BIOS J03 02/10/2014 [ 27.919455] ffffffff8186a728 0000000059c82488 ffff880034e05af0 ffffffff81637bd4 [ 27.921870] ffff880034e05b70 ffffffff8163144a 0000000000000010 ffff880034e05b80 [ 27.924257] ffff880034e05b20 0000000059c82488 0000000000000000 0000000000000000 [ 27.926599] Call Trace: [ 27.927352] [] dump_stack+0x19/0x1b [ 27.929080] [] panic+0xd8/0x1e7 [ 27.930588] [] ? restart_watchdog_hrtimer+0x50/0x50 [ 27.932502] [] watchdog_overflow_callback+0xc2/0xd0 [ 27.934427] [] __perf_event_overflow+0xa1/0x250 [ 27.936232] [] perf_event_overflow+0x14/0x20 [ 27.937957] [] intel_pmu_handle_irq+0x1e8/0x470 [ 27.939799] [] perf_event_nmi_handler+0x2b/0x50 [ 27.941649] [] nmi_handle.isra.0+0x69/0xb0 [ 27.943348] [] do_nmi+0x169/0x340 [ 27.944802] [] end_repeat_nmi+0x1e/0x2e [ 27.946424] [] ? hpet_rtc_interrupt+0x85/0x380 [ 27.948197] [] ? hpet_rtc_interrupt+0x85/0x380 [ 27.949992] [] ? hpet_rtc_interrupt+0x85/0x380 [ 27.951816] <> [] ? run_timer_softirq+0x43/0x340 [ 27.954114] [] handle_irq_event_percpu+0x3e/0x1e0 [ 27.955962] [] handle_irq_event+0x3d/0x60 [ 27.957635] [] handle_edge_irq+0x77/0x130 [ 27.959332] [] handle_irq+0xbf/0x150 [ 27.960949] [] do_IRQ+0x4f/0xf0 [ 27.962434] [] common_interrupt+0x6d/0x6d [ 27.964101] [] ? _raw_spin_unlock_irqrestore+0x1b/0x40 [ 27.966308] [] __setup_irq+0x2a7/0x570 [ 28.067859] [] ? hpet_cpuhp_notify+0x140/0x140 [ 28.069709] [] request_threaded_irq+0xcc/0x170 [ 28.071585] [] cmos_do_probe+0x1e6/0x450 [ 28.073240] [] ? cmos_do_probe+0x450/0x450 [ 28.074911] [] cmos_pnp_probe+0xbb/0xc0 [ 28.076533] [] pnp_device_probe+0x65/0xd0 [ 28.078198] [] driver_probe_device+0x87/0x390 [ 28.079971] [] __driver_attach+0x93/0xa0 [ 28.081660] [] ? __device_attach+0x40/0x40 [ 28.083662] [] bus_for_each_dev+0x73/0xc0 [ 28.085370] [] driver_attach+0x1e/0x20 [ 28.086974] [] bus_add_driver+0x200/0x2d0 [ 28.088634] [] ? rtc_sysfs_init+0xe/0xe [ 28.090349] [] driver_register+0x64/0xf0 [ 28.091989] [] pnp_register_driver+0x20/0x30 [ 28.093707] [] cmos_init+0x11/0x71 ---<-snip->--- This patch moves hpet_rtc_timer_init() before IRQ registration, so that we can gracefully handle such spurious interrupts. It also masks HPET RTC interrupts, in case IRQ registration fails. We were able to reproduce the problem in maximum 15 trials of kdump secondary kernel boot on an hp-dl160gen8 FCoE host machine without this patch. However, more than 35 trials went fine after applying this patch. Suggested-by: Thomas Gleixner Signed-off-by: Pratyush Anand Acked-by: Thomas Gleixner Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-cmos.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 43745cac0141..fddde655cbd4 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -707,6 +707,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup1; } + hpet_rtc_timer_init(); + if (is_valid_irq(rtc_irq)) { irq_handler_t rtc_cmos_int_handler; @@ -714,6 +716,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rtc_cmos_int_handler = hpet_rtc_interrupt; retval = hpet_register_irq_handler(cmos_interrupt); if (retval) { + hpet_mask_rtc_irq_bit(RTC_IRQMASK); dev_warn(dev, "hpet_register_irq_handler " " failed in rtc_init()."); goto cleanup1; @@ -729,7 +732,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup1; } } - hpet_rtc_timer_init(); /* export at least the first block of NVRAM */ nvram.size = address_space - NVRAM_OFFSET; -- cgit v1.2.3-59-g8ed1b From 6ec76070f17993aa9ac0344330b971783d10c9c2 Mon Sep 17 00:00:00 2001 From: Harman Kalra Date: Wed, 21 Sep 2016 01:14:40 +0530 Subject: ata: sata_mv: Replacing dma_pool_alloc and memset with a single call dma_pool_zalloc. Replacing dma_pool_alloc and memset with a single call to dma_pool_zalloc Signed-off-by: Harman Kalra Signed-off-by: Tejun Heo --- drivers/ata/sata_mv.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 745489a1c86a..efc48bf89d51 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap) return -ENOMEM; ap->private_data = pp; - pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); + pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM; - memset(pp->crqb, 0, MV_CRQB_Q_SZ); - pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); + pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem; - memset(pp->crpb, 0, MV_CRPB_Q_SZ); /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) -- cgit v1.2.3-59-g8ed1b From 97ea1906b3c2201273ea6bb40c43c611c056ddb3 Mon Sep 17 00:00:00 2001 From: Marcin Niestroj Date: Fri, 16 Sep 2016 11:26:28 +0200 Subject: rtc: omap: Support ext_wakeup configuration Support configuration of ext_wakeup sources. This patch makes it possible to enable ext_wakeup and set it's polarity, depending on board configuration. AM335x's dedicated PMIC (tps65217) uses ext_wakeup to notify about power-button presses. Handling power-button presses enables to recover from RTC-only power states correctly. Signed-off-by: Marcin Niestroj Acked-by: Tony Lindgren Acked-by: Rob Herring Signed-off-by: Alexandre Belloni --- Documentation/devicetree/bindings/rtc/rtc-omap.txt | 21 +++ drivers/rtc/Kconfig | 3 + drivers/rtc/rtc-omap.c | 168 ++++++++++++++++++++- 3 files changed, 184 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt index bf7d11ae9bea..bee41f97044e 100644 --- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt +++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt @@ -18,6 +18,18 @@ Optional properties: through pmic_power_en - clocks: Any internal or external clocks feeding in to rtc - clock-names: Corresponding names of the clocks +- pinctrl-0: a phandle pointing to the pin settings for the device +- pinctrl-names: should be "default" + +Optional subnodes: +- generic pinctrl node + +Required pinctrl subnodes properties: +- pins - Names of ext_wakeup pins to configure + +Optional pinctrl subnodes properties: +- input-enable - Enables ext_wakeup +- ti,active-high - Set input active high (by default active low) Example: @@ -30,4 +42,13 @@ rtc@1c23000 { system-power-controller; clocks = <&clk_32k_rtc>, <&clk_32768_ck>; clock-names = "ext-clk", "int-clk"; + + pinctrl-0 = <&ext_wakeup>; + pinctrl-names = "default"; + + ext_wakeup: ext-wakeup { + pins = "ext_wakeup0"; + input-enable; + ti,active-high; + }; }; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 896302a0d4b9..531343a260f8 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1245,6 +1245,9 @@ config RTC_DRV_IMXDI config RTC_DRV_OMAP tristate "TI OMAP Real Time Clock" depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST + depends on OF + depends on PINCTRL + select GENERIC_PINCONF help Say "yes" here to support the on chip real time clock present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx. diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index cadac8e2aa6b..b04ea9b5ae67 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -13,19 +13,23 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include +#include +#include +#include #include -#include +#include #include -#include -#include -#include -#include +#include +#include #include #include +#include +#include +#include +#include #include -#include -#include +#include /* * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock @@ -115,6 +119,8 @@ /* OMAP_RTC_PMIC bit fields: */ #define OMAP_RTC_PMIC_POWER_EN_EN BIT(16) +#define OMAP_RTC_PMIC_EXT_WKUP_EN(x) BIT(x) +#define OMAP_RTC_PMIC_EXT_WKUP_POL(x) BIT(4 + x) /* OMAP_RTC_KICKER values */ #define KICK0_VALUE 0x83e70b13 @@ -141,6 +147,7 @@ struct omap_rtc { bool is_pmic_controller; bool has_ext_clk; const struct omap_rtc_device_type *type; + struct pinctrl_dev *pctldev; }; static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg) @@ -525,6 +532,139 @@ static const struct of_device_id omap_rtc_of_match[] = { }; MODULE_DEVICE_TABLE(of, omap_rtc_of_match); +static const struct pinctrl_pin_desc rtc_pins_desc[] = { + PINCTRL_PIN(0, "ext_wakeup0"), + PINCTRL_PIN(1, "ext_wakeup1"), + PINCTRL_PIN(2, "ext_wakeup2"), + PINCTRL_PIN(3, "ext_wakeup3"), +}; + +static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + return 0; +} + +static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int group) +{ + return NULL; +} + +static const struct pinctrl_ops rtc_pinctrl_ops = { + .get_groups_count = rtc_pinctrl_get_groups_count, + .get_group_name = rtc_pinctrl_get_group_name, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +enum rtc_pin_config_param { + PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1, +}; + +static const struct pinconf_generic_params rtc_params[] = { + {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0}, +}; + +#ifdef CONFIG_DEBUG_FS +static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = { + PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false), +}; +#endif + +static int rtc_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + unsigned int param = pinconf_to_config_param(*config); + u32 val; + u16 arg = 0; + + rtc->type->unlock(rtc); + val = rtc_readl(rtc, OMAP_RTC_PMIC_REG); + rtc->type->lock(rtc); + + switch (param) { + case PIN_CONFIG_INPUT_ENABLE: + if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin))) + return -EINVAL; + break; + case PIN_CONFIG_ACTIVE_HIGH: + if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin)) + return -EINVAL; + break; + default: + return -ENOTSUPP; + }; + + *config = pinconf_to_config_packed(param, arg); + + return 0; +} + +static int rtc_pinconf_set(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *configs, + unsigned int num_configs) +{ + struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + u32 val; + unsigned int param; + u16 param_val; + int i; + + rtc->type->unlock(rtc); + val = rtc_readl(rtc, OMAP_RTC_PMIC_REG); + rtc->type->lock(rtc); + + /* active low by default */ + val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin); + + for (i = 0; i < num_configs; i++) { + param = pinconf_to_config_param(configs[i]); + param_val = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_INPUT_ENABLE: + if (param_val) + val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin); + else + val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin); + break; + case PIN_CONFIG_ACTIVE_HIGH: + val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin); + break; + default: + dev_err(&rtc->rtc->dev, "Property %u not supported\n", + param); + return -ENOTSUPP; + } + } + + rtc->type->unlock(rtc); + rtc_writel(rtc, OMAP_RTC_PMIC_REG, val); + rtc->type->lock(rtc); + + return 0; +} + +static const struct pinconf_ops rtc_pinconf_ops = { + .is_generic = true, + .pin_config_get = rtc_pinconf_get, + .pin_config_set = rtc_pinconf_set, +}; + +static struct pinctrl_desc rtc_pinctrl_desc = { + .pins = rtc_pins_desc, + .npins = ARRAY_SIZE(rtc_pins_desc), + .pctlops = &rtc_pinctrl_ops, + .confops = &rtc_pinconf_ops, + .custom_params = rtc_params, + .num_custom_params = ARRAY_SIZE(rtc_params), +#ifdef CONFIG_DEBUG_FS + .custom_conf_items = rtc_conf_items, +#endif + .owner = THIS_MODULE, +}; + static int omap_rtc_probe(struct platform_device *pdev) { struct omap_rtc *rtc; @@ -681,6 +821,15 @@ static int omap_rtc_probe(struct platform_device *pdev) } } + /* Support ext_wakeup pinconf */ + rtc_pinctrl_desc.name = dev_name(&pdev->dev); + + rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc); + if (IS_ERR(rtc->pctldev)) { + dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); + return PTR_ERR(rtc->pctldev); + } + return 0; err: @@ -724,6 +873,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); + /* Remove ext_wakeup pinconf */ + pinctrl_unregister(rtc->pctldev); + return 0; } -- cgit v1.2.3-59-g8ed1b From 983bf1256edb477a376b6ce95adf36e13bc88f9a Mon Sep 17 00:00:00 2001 From: Gabriele Mazzotta Date: Tue, 20 Sep 2016 01:12:43 +0200 Subject: rtc: cmos: Clear ACPI-driven alarms upon resume Currently ACPI-driven alarms are not cleared when they wake the system. As consequence, expired alarms must be manually cleared to program a new alarm. Fix this by correctly handling ACPI-driven alarms. More specifically, the ACPI specification [1] provides for two alternative implementations of the RTC. Depending on the implementation, the driver either clear the alarm from the resume callback or from ACPI interrupt handler: - The platform has the RTC wakeup status fixed in hardware (ACPI_FADT_FIXED_RTC is 0). In this case the driver can determine if the RTC was the reason of the wakeup from the resume callback by reading the RTC status register. - The platform has no fixed hardware feature event bits. In this case a GPE is used to wake the system and the driver clears the alarm from its handler. [1] http://www.acpi.info/DOWNLOADS/ACPI_5_Errata%20A.pdf Signed-off-by: Gabriele Mazzotta Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-cmos.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'drivers') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index fddde655cbd4..e8f3a212d09a 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -899,6 +899,9 @@ static inline int cmos_poweroff(struct device *dev) #ifdef CONFIG_PM_SLEEP +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control); + static int cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -938,6 +941,9 @@ static int cmos_resume(struct device *dev) tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); } while (mask & RTC_AIE); + + if (tmp & RTC_AIE) + cmos_check_acpi_rtc_status(dev, &tmp); } spin_unlock_irq(&rtc_lock); @@ -975,6 +981,20 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); static u32 rtc_handler(void *context) { struct device *dev = context; + struct cmos_rtc *cmos = dev_get_drvdata(dev); + unsigned char rtc_control = 0; + unsigned char rtc_intr; + + spin_lock_irq(&rtc_lock); + if (cmos_rtc.suspend_ctrl) + rtc_control = CMOS_READ(RTC_CONTROL); + if (rtc_control & RTC_AIE) { + cmos_rtc.suspend_ctrl &= ~RTC_AIE; + CMOS_WRITE(rtc_control, RTC_CONTROL); + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, rtc_intr); + } + spin_unlock_irq(&rtc_lock); pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); @@ -1041,12 +1061,39 @@ static void cmos_wake_setup(struct device *dev) device_init_wakeup(dev, 1); } +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + acpi_event_status rtc_status; + acpi_status status; + + if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) + return; + + status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Could not get RTC status\n"); + } else if (rtc_status & ACPI_EVENT_FLAG_SET) { + unsigned char mask; + *rtc_control &= ~RTC_AIE; + CMOS_WRITE(*rtc_control, RTC_CONTROL); + mask = CMOS_READ(RTC_INTR_FLAGS); + rtc_update_irq(cmos->rtc, 1, mask); + } +} + #else static void cmos_wake_setup(struct device *dev) { } +static void cmos_check_acpi_rtc_status(struct device *dev, + unsigned char *rtc_control) +{ +} + #endif #ifdef CONFIG_PNP -- cgit v1.2.3-59-g8ed1b From 68669d55f7ad31832692254485a07b6e412ae082 Mon Sep 17 00:00:00 2001 From: Gabriele Mazzotta Date: Tue, 20 Sep 2016 01:12:44 +0200 Subject: rtc: cmos: Restore alarm after resume Some platform firmware may interfere with the RTC alarm over suspend, resulting in the kernel and hardware having different ideas about system state but also potentially causing problems with firmware that assumes the OS will clean this case up. This patch restores the RTC alarm on resume to ensure that kernel and hardware are in sync. The case we've seen is Intel Rapid Start, which is a firmware-mediated feature that automatically transitions systems from suspend-to-RAM to suspend-to-disk without OS involvement. It does this by setting the RTC alarm and a flag that indicates that on wake it should perform the transition rather than re-starting the OS. However, if the OS has set a wakeup alarm that would wake the machine earlier, it refuses to overwrite it and allows the system to wake instead. This fails in the following situation: 1) User configures Intel Rapid Start to transition after (say) 15 minutes 2) User suspends to RAM. Firmware sets the wakeup alarm for 15 minutes in the future 3) User resumes after 5 minutes. Firmware does not reset the alarm, and as such it is still set for 10 minutes in the future 4) User suspends after 5 minutes. Firmware notices that the alarm is set for 5 minutes in the future, which is less than the 15 minute transition threshold. It therefore assumes that the user wants the machine to wake in 5 minutes 5) System resumes after 5 minutes The worst case scenario here is that the user may have put the system in a bag between (4) and (5), resulting in it running in a confined space and potentially overheating. This seems reasonably important. The Rapid Start support code got added in 3.11, but it can be configured in the firmware regardless of kernel support. Signed-off-by: Gabriele Mazzotta Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-cmos.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index e8f3a212d09a..2943a0d58a3a 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -62,6 +62,8 @@ struct cmos_rtc { u8 day_alrm; u8 mon_alrm; u8 century; + + struct rtc_wkalrm saved_wkalrm; }; /* both platform and pnp busses use negative numbers for invalid irqs */ @@ -879,6 +881,8 @@ static int cmos_suspend(struct device *dev) enable_irq_wake(cmos->irq); } + cmos_read_alarm(dev, &cmos->saved_wkalrm); + dev_dbg(dev, "suspend%s, ctrl %02x\n", (tmp & RTC_AIE) ? ", alarm may wake" : "", tmp); @@ -899,6 +903,22 @@ static inline int cmos_poweroff(struct device *dev) #ifdef CONFIG_PM_SLEEP +static void cmos_check_wkalrm(struct device *dev) +{ + struct cmos_rtc *cmos = dev_get_drvdata(dev); + struct rtc_wkalrm current_alarm; + time64_t t_current_expires; + time64_t t_saved_expires; + + cmos_read_alarm(dev, ¤t_alarm); + t_current_expires = rtc_tm_to_time64(¤t_alarm.time); + t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time); + if (t_current_expires != t_saved_expires || + cmos->saved_wkalrm.enabled != current_alarm.enabled) { + cmos_set_alarm(dev, &cmos->saved_wkalrm); + } +} + static void cmos_check_acpi_rtc_status(struct device *dev, unsigned char *rtc_control); @@ -915,6 +935,9 @@ static int cmos_resume(struct device *dev) cmos->enabled_wake = 0; } + /* The BIOS might have changed the alarm, restore it */ + cmos_check_wkalrm(dev); + spin_lock_irq(&rtc_lock); tmp = cmos->suspend_ctrl; cmos->suspend_ctrl = 0; -- cgit v1.2.3-59-g8ed1b From ee85bb5bbe4ae690b48625874bc2b6ecc981326e Mon Sep 17 00:00:00 2001 From: Raghavendra Ganiga Date: Wed, 31 Aug 2016 22:56:41 +0530 Subject: rtc: ds1347: changed raw spi calls to register map calls This patch changes calls of spi read write calls to register map read and write calls in rtc ds1347 Signed-off-by: Raghavendra Chandra Ganiga Signed-off-by: Alexandre Belloni --- drivers/rtc/Kconfig | 1 + drivers/rtc/rtc-ds1347.c | 96 ++++++++++++++++++++++++++---------------------- 2 files changed, 54 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 531343a260f8..4f6bda83e099 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -665,6 +665,7 @@ config RTC_DRV_DS1343 will be called rtc-ds1343. config RTC_DRV_DS1347 + select REGMAP_SPI tristate "Dallas/Maxim DS1347" help If you say yes here you get support for the diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c index 641e8e8a0dd7..ccfc9d43eb1e 100644 --- a/drivers/rtc/rtc-ds1347.c +++ b/drivers/rtc/rtc-ds1347.c @@ -18,6 +18,7 @@ #include #include #include +#include /* Registers in ds1347 rtc */ @@ -32,37 +33,28 @@ #define DS1347_STATUS_REG 0x17 #define DS1347_CLOCK_BURST 0x3F -static int ds1347_read_reg(struct device *dev, unsigned char address, - unsigned char *data) -{ - struct spi_device *spi = to_spi_device(dev); - - *data = address | 0x80; - - return spi_write_then_read(spi, data, 1, data, 1); -} - -static int ds1347_write_reg(struct device *dev, unsigned char address, - unsigned char data) -{ - struct spi_device *spi = to_spi_device(dev); - unsigned char buf[2]; - - buf[0] = address & 0x7F; - buf[1] = data; +static const struct regmap_range ds1347_ranges[] = { + { + .range_min = DS1347_SECONDS_REG, + .range_max = DS1347_STATUS_REG, + }, +}; - return spi_write_then_read(spi, buf, 2, NULL, 0); -} +static const struct regmap_access_table ds1347_access_table = { + .yes_ranges = ds1347_ranges, + .n_yes_ranges = ARRAY_SIZE(ds1347_ranges), +}; static int ds1347_read_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); + struct regmap *map; int err; unsigned char buf[8]; - buf[0] = DS1347_CLOCK_BURST | 0x80; + map = spi_get_drvdata(spi); - err = spi_write_then_read(spi, buf, 1, buf, 8); + err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8); if (err) return err; @@ -80,25 +72,27 @@ static int ds1347_read_time(struct device *dev, struct rtc_time *dt) static int ds1347_set_time(struct device *dev, struct rtc_time *dt) { struct spi_device *spi = to_spi_device(dev); - unsigned char buf[9]; + struct regmap *map; + unsigned char buf[8]; + + map = spi_get_drvdata(spi); - buf[0] = DS1347_CLOCK_BURST & 0x7F; - buf[1] = bin2bcd(dt->tm_sec); - buf[2] = bin2bcd(dt->tm_min); - buf[3] = (bin2bcd(dt->tm_hour) & 0x3F); - buf[4] = bin2bcd(dt->tm_mday); - buf[5] = bin2bcd(dt->tm_mon + 1); - buf[6] = bin2bcd(dt->tm_wday + 1); + buf[0] = bin2bcd(dt->tm_sec); + buf[1] = bin2bcd(dt->tm_min); + buf[2] = (bin2bcd(dt->tm_hour) & 0x3F); + buf[3] = bin2bcd(dt->tm_mday); + buf[4] = bin2bcd(dt->tm_mon + 1); + buf[5] = bin2bcd(dt->tm_wday + 1); /* year in linux is from 1900 i.e in range of 100 in rtc it is from 00 to 99 */ dt->tm_year = dt->tm_year % 100; - buf[7] = bin2bcd(dt->tm_year); - buf[8] = bin2bcd(0x00); + buf[6] = bin2bcd(dt->tm_year); + buf[7] = bin2bcd(0x00); /* write the rtc settings */ - return spi_write_then_read(spi, buf, 9, NULL, 0); + return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8); } static const struct rtc_class_ops ds1347_rtc_ops = { @@ -109,35 +103,53 @@ static const struct rtc_class_ops ds1347_rtc_ops = { static int ds1347_probe(struct spi_device *spi) { struct rtc_device *rtc; - unsigned char data; + struct regmap_config config; + struct regmap *map; + unsigned int data; int res; + memset(&config, 0, sizeof(config)); + config.reg_bits = 8; + config.val_bits = 8; + config.read_flag_mask = 0x80; + config.max_register = 0x3F; + config.wr_table = &ds1347_access_table; + /* spi setup with ds1347 in mode 3 and bits per word as 8 */ spi->mode = SPI_MODE_3; spi->bits_per_word = 8; spi_setup(spi); + map = devm_regmap_init_spi(spi, &config); + + if (IS_ERR(map)) { + dev_err(&spi->dev, "ds1347 regmap init spi failed\n"); + return PTR_ERR(map); + } + + spi_set_drvdata(spi, map); + /* RTC Settings */ - res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data); + res = regmap_read(map, DS1347_SECONDS_REG, &data); if (res) return res; /* Disable the write protect of rtc */ - ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); + regmap_read(map, DS1347_CONTROL_REG, &data); data = data & ~(1<<7); - ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data); + regmap_write(map, DS1347_CONTROL_REG, data); /* Enable the oscillator , disable the oscillator stop flag, and glitch filter to reduce current consumption */ - ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); + regmap_read(map, DS1347_STATUS_REG, &data); data = data & 0x1B; - ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data); + regmap_write(map, DS1347_STATUS_REG, data); /* display the settings */ - ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data); + regmap_read(map, DS1347_CONTROL_REG, &data); dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data); - ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data); + regmap_read(map, DS1347_STATUS_REG, &data); dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data); rtc = devm_rtc_device_register(&spi->dev, "ds1347", @@ -146,8 +158,6 @@ static int ds1347_probe(struct spi_device *spi) if (IS_ERR(rtc)) return PTR_ERR(rtc); - spi_set_drvdata(spi, rtc); - return 0; } -- cgit v1.2.3-59-g8ed1b From 473195f80f3940afc8a1461831d74f9a44bba538 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Fri, 9 Sep 2016 18:03:54 +0800 Subject: rtc: ac100: Add NULL checking for devm_kzalloc call devm_kzalloc can return NULL, add NULL checking to prevent NULL pointer dereference. Signed-off-by: Axel Lin Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-ac100.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c index 2d0bb02a25d9..9e336184491c 100644 --- a/drivers/rtc/rtc-ac100.c +++ b/drivers/rtc/rtc-ac100.c @@ -554,6 +554,9 @@ static int ac100_rtc_probe(struct platform_device *pdev) int ret; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + platform_set_drvdata(pdev, chip); chip->dev = &pdev->dev; chip->regmap = ac100->regmap; -- cgit v1.2.3-59-g8ed1b From 00f7f90c51dfc2403257e2c7410d453e72bf6a41 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 22 Sep 2016 11:48:00 +0200 Subject: rtc: cmos: avoid unused function warning A bug fix for the ACPI side of this driver caused a harmless build warning: drivers/rtc/rtc-cmos.c:1115:13: error: 'cmos_check_acpi_rtc_status' defined but not used [-Werror=unused-function] static void cmos_check_acpi_rtc_status(struct device *dev, We can avoid the warning and simplify the driver at the same time by removing the #ifdef for CONFIG_PM and rely on the SIMPLE_DEV_PM_OPS() to set everything up correctly. cmos_resume() has to get marked as __maybe_unused so we don't introduce another warning, and the two variants of cmos_poweroff() can get merged into one using an IS_ENABLED() check. Fixes: 983bf1256edb ("rtc: cmos: Clear ACPI-driven alarms upon resume") Signed-off-by: Arnd Bergmann Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-cmos.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 2943a0d58a3a..dd3d59806ffa 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -848,8 +848,6 @@ static int cmos_aie_poweroff(struct device *dev) return retval; } -#ifdef CONFIG_PM - static int cmos_suspend(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -898,11 +896,12 @@ static int cmos_suspend(struct device *dev) */ static inline int cmos_poweroff(struct device *dev) { + if (!IS_ENABLED(CONFIG_PM)) + return -ENOSYS; + return cmos_suspend(dev); } -#ifdef CONFIG_PM_SLEEP - static void cmos_check_wkalrm(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -922,7 +921,7 @@ static void cmos_check_wkalrm(struct device *dev) static void cmos_check_acpi_rtc_status(struct device *dev, unsigned char *rtc_control); -static int cmos_resume(struct device *dev) +static int __maybe_unused cmos_resume(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; @@ -975,16 +974,6 @@ static int cmos_resume(struct device *dev) return 0; } -#endif -#else - -static inline int cmos_poweroff(struct device *dev) -{ - return -ENOSYS; -} - -#endif - static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); /*----------------------------------------------------------------*/ @@ -1278,9 +1267,7 @@ static struct platform_driver cmos_platform_driver = { .shutdown = cmos_platform_shutdown, .driver = { .name = driver_name, -#ifdef CONFIG_PM .pm = &cmos_pm_ops, -#endif .of_match_table = of_match_ptr(of_cmos_match), } }; -- cgit v1.2.3-59-g8ed1b From 02d9d3cbac53bd59fe18fc32adc7bc7cde1037ae Mon Sep 17 00:00:00 2001 From: Harman Kalra Date: Thu, 22 Sep 2016 01:29:48 +0530 Subject: ata: Replace BUG() with BUG_ON(). Replace BUG() with BUG_ON(). Caught by coccinelle. Signed-off-by: Harman Kalra Signed-off-by: Tejun Heo --- drivers/ata/pata_octeon_cf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 27245957eee3..475a00669427 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) div = 8; T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate()); - if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) - BUG(); + BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T)); t1 = timing.setup; if (t1) -- cgit v1.2.3-59-g8ed1b From 70a19b7e406da9dc5cea20fedba443374a5cae70 Mon Sep 17 00:00:00 2001 From: Harman Kalra Date: Thu, 22 Sep 2016 01:18:31 +0530 Subject: pata_at91: Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Signed-off-by: Harman Kalra Signed-off-by: Tejun Heo --- drivers/ata/pata_at91.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 9f27b14009f9..1611e0e8d767 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c @@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev) field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC); fields.mode = devm_regmap_field_alloc(dev, smc, field); - if (IS_ERR(fields.mode)) - return PTR_ERR(fields.mode); - return 0; + return PTR_ERR_OR_ZERO(fields.mode); } static int pata_at91_probe(struct platform_device *pdev) -- cgit v1.2.3-59-g8ed1b From 40f1ebd4e0997e81ac2eb3b2f64d6133051afe86 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Sat, 24 Sep 2016 12:10:06 +0530 Subject: soc/fsl/qe: Use resource_size Use the function resource_size instead of explicit computation. Problem found using Coccinelle. Signed-off-by: Vaishali Thakkar Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/qe_tdm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index 5e48b1470178..124e222a0d18 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, } if (siram_init_flag == 0) { - memset_io(utdm->siram, 0, res->end - res->start + 1); + memset_io(utdm->siram, 0, resource_size(res)); siram_init_flag = 1; } -- cgit v1.2.3-59-g8ed1b From 5066943a5170838e58e07163d32ba45b0b360417 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Fri, 23 Sep 2016 10:20:31 +0800 Subject: soc/fsl/qe: Use of_adress_to_resource() in get_qe_base() modify get_qe_base function with of_address_to_resource instead of of_get_property and of_translate_address. Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/qe.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 7026507e6f1d..2707a827261b 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -69,8 +69,8 @@ static phys_addr_t qebase = -1; phys_addr_t get_qe_base(void) { struct device_node *qe; - int size; - const u32 *prop; + int ret; + struct resource res; if (qebase != -1) return qebase; @@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void) return qebase; } - prop = of_get_property(qe, "reg", &size); - if (prop && size >= sizeof(*prop)) - qebase = of_translate_address(qe, prop); + ret = of_address_to_resource(qe, 0, &res); + if (!ret) + qebase = res.start; of_node_put(qe); return qebase; -- cgit v1.2.3-59-g8ed1b From 74c269f69cdae83253be7f65840dd1ae589d5e99 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 5 Aug 2016 10:56:41 +0200 Subject: fsl/qe: use of_property_read_bool Use of_property_read_bool to check for the existence of a property. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression e1,e2; statement S2,S1; @@ - if (of_get_property(e1,e2,NULL)) + if (of_property_read_bool(e1,e2)) S1 else S2 // Signed-off-by: Julia Lawall Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/qe_tdm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index 124e222a0d18..a1048b44e6b9 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, utdm->tdm_port = val; ut_info->uf_info.tdm_num = utdm->tdm_port; - if (of_get_property(np, "fsl,tdm-internal-loopback", NULL)) + if (of_property_read_bool(np, "fsl,tdm-internal-loopback")) utdm->tdm_mode = TDM_INTERNAL_LOOPBACK; else utdm->tdm_mode = TDM_NORMAL; -- cgit v1.2.3-59-g8ed1b From 4d486e0083796b54d5aeddd7a5794f897fca1008 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 16 Aug 2016 08:26:20 +0200 Subject: soc/fsl/qe: fix Oops on CPM1 (and likely CPM2) Commit 0e6e01ff694ee ("CPM/QE: use genalloc to manage CPM/QE muram") has changed the way muram is managed. genalloc uses kmalloc(), hence requires the SLAB to be up and running. On powerpc 8xx, cpm_reset() is called early during startup. cpm_reset() then calls cpm_muram_init() before SLAB is available, hence the following Oops. cpm_reset() cannot be called during initcalls because the CPM is needed for console. This patch removes the call to cpm_muram_init() from cpm_reset(). cpm_muram_init() will be called from a new function called cpm_init() which is declared as subsys_initcall, unless cpm_muram_alloc() is called earlier for the serial console in which case cpm_muram_init() will be called from there. The reason for calling it from two places is that some drivers (e.g. i2c-cpm) need some of the initialisations done by cpm_muram_init() but don't call cpm_muram_alloc(). The console driver calls cpm_muram_alloc() but some platforms might not use the CPM serial ports for console. [ 0.000000] Unable to handle kernel paging request for data at address 0x00000008 [ 0.000000] Faulting instruction address: 0xc01acce0 [ 0.000000] Oops: Kernel access of bad area, sig: 11 [#1] [ 0.000000] PREEMPT CMPC885 [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.4.14-g0886ed8 #5 [ 0.000000] task: c05183e0 ti: c0536000 task.ti: c0536000 [ 0.000000] NIP: c01acce0 LR: c0011068 CTR: 00000000 [ 0.000000] REGS: c0537e50 TRAP: 0300 Not tainted (4.4.14-s3k-dev-g0886ed8-svn) [ 0.000000] MSR: 00001032 CR: 28044428 XER: 00000000 [ 0.000000] DAR: 00000008 DSISR: c0000000 GPR00: c0011068 c0537f00 c05183e0 00000000 00009000 ffffffff 00000bc0 ffffffff GPR08: ff003000 ff00b000 ff003bbf 00000000 22044422 100d43a8 00000000 07ff94e8 GPR16: 00000000 07bb5d70 00000000 07ff81f4 07ff81f4 07ff81f4 00000000 00000000 GPR24: 07ffb3a0 07fe7628 c0550000 c7ffa190 c0540000 ff003bbf 00000000 00000001 [ 0.000000] NIP [c01acce0] gen_pool_add_virt+0x14/0xdc [ 0.000000] LR [c0011068] cpm_muram_init+0xd4/0x18c [ 0.000000] Call Trace: [ 0.000000] [c0537f00] [00000200] 0x200 (unreliable) [ 0.000000] [c0537f20] [c0011068] cpm_muram_init+0xd4/0x18c [ 0.000000] [c0537f70] [c0494684] cpm_reset+0xb4/0xc8 [ 0.000000] [c0537f90] [c0494c64] cmpc885_setup_arch+0x10/0x30 [ 0.000000] [c0537fa0] [c0493cd4] setup_arch+0x130/0x168 [ 0.000000] [c0537fb0] [c04906bc] start_kernel+0x88/0x380 [ 0.000000] [c0537ff0] [c0002224] start_here+0x38/0x98 [ 0.000000] Instruction dump: [ 0.000000] 91430010 91430014 80010014 83e1000c 7c0803a6 38210010 4e800020 7c0802a6 [ 0.000000] 9421ffe0 bf61000c 90010024 7c7e1b78 <80630008> 7c9c2378 7cc31c30 3863001f [ 0.000000] ---[ end trace dc8fa200cb88537f ]--- fixes: 0e6e01ff694ee ("CPM/QE: use genalloc to manage CPM/QE muram") Cc: stable@vger.linux.org Signed-off-by: Christophe Leroy [scottwood: Removed some string changes unrelated to bugfix] Signed-off-by: Scott Wood --- arch/powerpc/sysdev/cpm1.c | 2 -- arch/powerpc/sysdev/cpm2.c | 4 ---- arch/powerpc/sysdev/cpm_common.c | 15 +++++++++++++++ drivers/soc/fsl/qe/qe_common.c | 8 ++++++++ 4 files changed, 23 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 3c0eb9b25535..986cd111d4df 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -233,8 +233,6 @@ void __init cpm_reset(void) else out_be32(&siu_conf->sc_sdcr, 1); immr_unmap(siu_conf); - - cpm_muram_init(); } static DEFINE_SPINLOCK(cmd_lock); diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c index 8dc1e24f3c23..f78ff841652c 100644 --- a/arch/powerpc/sysdev/cpm2.c +++ b/arch/powerpc/sysdev/cpm2.c @@ -66,10 +66,6 @@ void __init cpm2_reset(void) cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); #endif - /* Reclaim the DP memory for our use. - */ - cpm_muram_init(); - /* Tell everyone where the comm processor resides. */ cpmp = &cpm2_immr->im_cpm; diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 947f42007734..51bf749a4f3a 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -37,6 +37,21 @@ #include #endif +static int __init cpm_init(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); + if (!np) + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2"); + if (!np) + return -ENODEV; + cpm_muram_init(); + of_node_put(np); + return 0; +} +subsys_initcall(cpm_init); + #ifdef CONFIG_PPC_EARLY_DEBUG_CPM static u32 __iomem *cpm_udbg_txdesc; static u8 __iomem *cpm_udbg_txbuf; diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 41eff805a904..104e68d9b84f 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -70,6 +70,11 @@ int cpm_muram_init(void) } muram_pool = gen_pool_create(0, -1); + if (!muram_pool) { + pr_err("Cannot allocate memory pool for CPM/QE muram"); + ret = -ENOMEM; + goto out_muram; + } muram_pbase = of_translate_address(np, zero); if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { pr_err("Cannot translate zero through CPM muram node"); @@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size, struct muram_block *entry; unsigned long start; + if (!muram_pool && cpm_muram_init()) + goto out2; + start = gen_pool_alloc_algo(muram_pool, size, algo, data); if (!start) goto out2; -- cgit v1.2.3-59-g8ed1b From 5dc6f3fedee58efa343e822558fc3e2f0eb2ad1f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 6 Sep 2016 00:52:16 +0200 Subject: soc/fsl/qe: fix gpio save_regs functions of_mm_gpiochip_add_data() calls mm_gc->save_regs() before setting the data. Therefore ->save_regs() cannot use gpiochip_get_data() An Oops is encountered without this fix. fixes: 1e714e54b5ca5 ("powerpc: qe_lib-gpio: use gpiochip data pointer") Signed-off-by: Christophe Leroy Cc: Reviewed-by: Linus Walleij Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/gpio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index 333eb2215a57..0aaf429f31d5 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -41,7 +41,8 @@ struct qe_gpio_chip { static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) { - struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc); + struct qe_gpio_chip *qe_gc = + container_of(mm_gc, struct qe_gpio_chip, mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; qe_gc->cpdata = in_be32(®s->cpdata); -- cgit v1.2.3-59-g8ed1b From 1f9c0a77278bc9ec7df14bbd799fccfabff1010a Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 22 Sep 2016 18:04:08 +0300 Subject: soc/fsl: Introduce DPAA 1.x BMan device driver This driver enables the Freescale DPAA 1.x Buffer Manager block. BMan is a hardware accelerator that manages buffer pools. It allows CPUs and other accelerators connected to the SoC datapath to acquire and release buffers during data processing. Signed-off-by: Roy Pledge Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/Kconfig | 28 ++ drivers/soc/fsl/qbman/Makefile | 2 + drivers/soc/fsl/qbman/bman.c | 797 ++++++++++++++++++++++++++++++++++++ drivers/soc/fsl/qbman/bman_ccsr.c | 263 ++++++++++++ drivers/soc/fsl/qbman/bman_portal.c | 219 ++++++++++ drivers/soc/fsl/qbman/bman_priv.h | 80 ++++ drivers/soc/fsl/qbman/dpaa_sys.h | 103 +++++ include/soc/fsl/bman.h | 129 ++++++ 8 files changed, 1621 insertions(+) create mode 100644 drivers/soc/fsl/qbman/Kconfig create mode 100644 drivers/soc/fsl/qbman/Makefile create mode 100644 drivers/soc/fsl/qbman/bman.c create mode 100644 drivers/soc/fsl/qbman/bman_ccsr.c create mode 100644 drivers/soc/fsl/qbman/bman_portal.c create mode 100644 drivers/soc/fsl/qbman/bman_priv.h create mode 100644 drivers/soc/fsl/qbman/dpaa_sys.h create mode 100644 include/soc/fsl/bman.h (limited to 'drivers') diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig new file mode 100644 index 000000000000..8f2df64224bf --- /dev/null +++ b/drivers/soc/fsl/qbman/Kconfig @@ -0,0 +1,28 @@ +menuconfig FSL_DPAA + bool "Freescale DPAA 1.x support" + depends on FSL_SOC_BOOKE + select GENERIC_ALLOCATOR + help + The Freescale Data Path Acceleration Architecture (DPAA) is a set of + hardware components on specific QorIQ multicore processors. + This architecture provides the infrastructure to support simplified + sharing of networking interfaces and accelerators by multiple CPUs. + The major h/w blocks composing DPAA are BMan and QMan. + + The Buffer Manager (BMan) is a hardware buffer pool management block + that allows software and accelerators on the datapath to acquire and + release buffers in order to build frames. + + The Queue Manager (QMan) is a hardware queue management block + that allows software and accelerators on the datapath to enqueue and + dequeue frames in order to communicate. + +if FSL_DPAA + +config FSL_DPAA_CHECKING + bool "Additional driver checking" + help + Compiles in additional checks, to sanity-check the drivers and + any use of the exported API. Not recommended for performance. + +endif # FSL_DPAA diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile new file mode 100644 index 000000000000..855c3ac0f89e --- /dev/null +++ b/drivers/soc/fsl/qbman/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o bman_portal.o \ + bman.o diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c new file mode 100644 index 000000000000..ffa48fdbb1a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman.c @@ -0,0 +1,797 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +#define IRQNAME "BMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define BM_REG_RCR_PI_CINH 0x0000 +#define BM_REG_RCR_CI_CINH 0x0004 +#define BM_REG_RCR_ITR 0x0008 +#define BM_REG_CFG 0x0100 +#define BM_REG_SCN(n) (0x0200 + ((n) << 2)) +#define BM_REG_ISR 0x0e00 +#define BM_REG_IER 0x0e04 +#define BM_REG_ISDR 0x0e08 +#define BM_REG_IIR 0x0e0c + +/* Cache-enabled register offsets */ +#define BM_CL_CR 0x0000 +#define BM_CL_RR0 0x0100 +#define BM_CL_RR1 0x0140 +#define BM_CL_RCR 0x1000 +#define BM_CL_RCR_PI_CENA 0x3000 +#define BM_CL_RCR_CI_CENA 0x3100 + +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + */ +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ + bm_rcr_pci = 0, /* PI index, cache-inhibited */ + bm_rcr_pce = 1, /* PI index, cache-enabled */ + bm_rcr_pvb = 2 /* valid-bit */ +}; +enum bm_rcr_cmode { /* s/w-only */ + bm_rcr_cci, /* CI index, cache-inhibited */ + bm_rcr_cce /* CI index, cache-enabled */ +}; + + +/* --- Portal structures --- */ + +#define BM_RCR_SIZE 8 + +/* Release Command */ +struct bm_rcr_entry { + union { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ + u8 __reserved1[62]; + }; + struct bm_buffer bufs[8]; + }; +}; +#define BM_RCR_VERB_VBIT 0x80 +#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ +#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 +#define BM_RCR_VERB_CMD_BPID_MULTI 0x30 +#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ + +struct bm_rcr { + struct bm_rcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum bm_rcr_pmode pmode; + enum bm_rcr_cmode cmode; +#endif +}; + +/* MC (Management Command) command */ +struct bm_mc_command { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 bpid; /* used by acquire command */ + u8 __reserved[62]; +}; +#define BM_MCC_VERB_VBIT 0x80 +#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ +#define BM_MCC_VERB_CMD_ACQUIRE 0x10 +#define BM_MCC_VERB_CMD_QUERY 0x40 +#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ + +/* MC result, Acquire and Query Response */ +union bm_mc_result { + struct { + u8 verb; + u8 bpid; + u8 __reserved[62]; + }; + struct bm_buffer bufs[8]; +}; +#define BM_MCR_VERB_VBIT 0x80 +#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK +#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE +#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY +#define BM_MCR_VERB_CMD_ERR_INVALID 0x60 +#define BM_MCR_VERB_CMD_ERR_ECC 0x70 +#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ +#define BM_MCR_TIMEOUT 10000 /* us */ + +struct bm_mc { + struct bm_mc_command *cr; + union bm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can only be _mc_start()ed */ + mc_idle, + /* Can only be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +struct bm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ +}; + +struct bm_portal { + struct bm_addr addr; + struct bm_rcr rcr; + struct bm_mc mc; +} ____cacheline_aligned; + +/* Cache-inhibited register access. */ +static inline u32 bm_in(struct bm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ci + offset); +} + +static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) +{ + __raw_writel(val, p->addr.ci + offset); +} + +/* Cache Enabled Portal Access */ +static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} + +static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} + +static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ce + offset); +} + +struct bman_portal { + struct bm_portal p; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + /* probing time config params for cpu-affine portals */ + const struct bm_portal_config *config; + char irqname[MAX_IRQNAME]; +}; + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); + +static inline struct bman_portal *get_affine_portal(void) +{ + return &get_cpu_var(bman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(bman_affine_portal); +} + +/* + * This object type refers to a pool, it isn't *the* pool. There may be + * more than one such object per BMan buffer pool, eg. if different users of the + * pool are operating via different portals. + */ +struct bman_pool { + /* index of the buffer pool to encapsulate (0-63) */ + u32 bpid; + /* Used for hash-table admin when using depletion notifications. */ + struct bman_portal *portal; + struct bman_pool *next; +}; + +static u32 poll_portal_slow(struct bman_portal *p, u32 is); + +static irqreturn_t portal_isr(int irq, void *ptr) +{ + struct bman_portal *p = ptr; + struct bm_portal *portal = &p->p; + u32 clear = p->irq_sources; + u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + clear |= poll_portal_slow(p, is); + bm_out(portal, BM_REG_ISR, clear); + return IRQ_HANDLED; +} + +/* --- RCR API --- */ + +#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) +#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~RCR_CARRY; + + return (struct bm_rcr_entry *)addr; +} + +#ifdef CONFIG_FSL_DPAA_CHECKING +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int rcr_ptr2idx(struct bm_rcr_entry *e) +{ + return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); +} +#endif + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void rcr_inc(struct bm_rcr *rcr) +{ + /* increment to the next RCR pointer and handle overflow and 'vbit' */ + struct bm_rcr_entry *partial = rcr->cursor + 1; + + rcr->cursor = rcr_carryclear(partial); + if (partial != rcr->cursor) + rcr->vbit ^= BM_RCR_VERB_VBIT; +} + +static int bm_rcr_get_avail(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + return rcr->available; +} + +static int bm_rcr_get_fill(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + return BM_RCR_SIZE - 1 - rcr->available; +} + +static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) +{ + struct bm_rcr *rcr = &portal->rcr; + + rcr->ithresh = ithresh; + bm_out(portal, BM_REG_RCR_ITR, ithresh); +} + +static void bm_rcr_cce_prefetch(struct bm_portal *portal) +{ + __maybe_unused struct bm_rcr *rcr = &portal->rcr; + + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); +} + +static u8 bm_rcr_cce_update(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); + rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); + bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); + diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) +{ + struct bm_rcr *rcr = &portal->rcr; + + DPAA_ASSERT(!rcr->busy); + if (!rcr->available) + return NULL; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 1; +#endif + dpaa_zero(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_rcr *rcr = &portal->rcr; + struct bm_rcr_entry *rcursor; + + DPAA_ASSERT(rcr->busy); + DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); + DPAA_ASSERT(rcr->available >= 1); + dma_wmb(); + rcursor = rcr->cursor; + rcursor->_ncw_verb = myverb | rcr->vbit; + dpaa_flush(rcursor); + rcr_inc(rcr); + rcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; +#endif +} + +static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, + enum bm_rcr_cmode cmode) +{ + struct bm_rcr *rcr = &portal->rcr; + u32 cfg; + u8 pi; + + rcr->ring = portal->addr.ce + BM_CL_RCR; + rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + rcr->cursor = rcr->ring + pi; + rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? + BM_RCR_VERB_VBIT : 0; + rcr->available = BM_RCR_SIZE - 1 + - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); + rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + rcr->busy = 0; + rcr->pmode = pmode; + rcr->cmode = cmode; +#endif + cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) + | (pmode & 0x3); /* BCSP_CFG::RPM */ + bm_out(portal, BM_REG_CFG, cfg); + return 0; +} + +static void bm_rcr_finish(struct bm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_rcr *rcr = &portal->rcr; + int i; + + DPAA_ASSERT(!rcr->busy); + + i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr_ptr2idx(rcr->cursor)) + pr_crit("losing uncommited RCR entries\n"); + + i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); + if (i != rcr->ci) + pr_crit("missing existing RCR completions\n"); + if (rcr->ci != rcr_ptr2idx(rcr->cursor)) + pr_crit("RCR destroyed unquiesced\n"); +#endif +} + +/* --- Management command API --- */ +static int bm_mc_init(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + BM_CL_CR; + mc->rr = portal->addr.ce + BM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? + 0 : 1; + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static void bm_mc_finish(struct bm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct bm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == mc_idle); + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} + +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) +{ + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) +{ + struct bm_mc *mc = &portal->mc; + union bm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= BM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + +static inline int bm_mc_result_timeout(struct bm_portal *portal, + union bm_mc_result **mcr) +{ + int timeout = BM_MCR_TIMEOUT; + + do { + *mcr = bm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); + + return timeout; +} + +/* Disable all BSCN interrupts for the portal */ +static void bm_isr_bscn_disable(struct bm_portal *portal) +{ + bm_out(portal, BM_REG_SCN(0), 0); + bm_out(portal, BM_REG_SCN(1), 0); +} + +static int bman_create_portal(struct bman_portal *portal, + const struct bm_portal_config *c) +{ + struct bm_portal *p; + int ret; + + p = &portal->p; + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { + dev_err(c->dev, "RCR initialisation failed\n"); + goto fail_rcr; + } + if (bm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* + * Default to all BPIDs disabled, we enable as required at + * run-time. + */ + bm_isr_bscn_disable(p); + + /* Write-to-clear any stale interrupt status bits */ + bm_out(p, BM_REG_ISDR, 0xffffffff); + portal->irq_sources = 0; + bm_out(p, BM_REG_IER, 0); + bm_out(p, BM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } + + /* Need RCR to be empty before continuing */ + ret = bm_rcr_get_fill(p); + if (ret) { + dev_err(c->dev, "RCR unclean\n"); + goto fail_rcr_empty; + } + /* Success */ + portal->config = c; + + bm_out(p, BM_REG_ISDR, 0); + bm_out(p, BM_REG_IIR, 0); + + return 0; + +fail_rcr_empty: +fail_affinity: + free_irq(c->irq, portal); +fail_irq: + bm_mc_finish(p); +fail_mc: + bm_rcr_finish(p); +fail_rcr: + return -EIO; +} + +struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) +{ + struct bman_portal *portal; + int err; + + portal = &per_cpu(bman_affine_portal, c->cpu); + err = bman_create_portal(portal, c); + if (err) + return NULL; + + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + + return portal; +} + +static u32 poll_portal_slow(struct bman_portal *p, u32 is) +{ + u32 ret = is; + + if (is & BM_PIRQ_RCRI) { + bm_rcr_cce_update(&p->p); + bm_rcr_set_ithresh(&p->p, 0); + bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); + is &= ~BM_PIRQ_RCRI; + } + + /* There should be no status register bits left undefined */ + DPAA_ASSERT(!is); + return ret; +} + +int bman_p_irqsource_add(struct bman_portal *p, u32 bits) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); + bm_out(&p->p, BM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); + return 0; +} + +static int bm_shutdown_pool(u32 bpid) +{ + struct bm_mc_command *bm_cmd; + union bm_mc_result *bm_res; + + while (1) { + struct bman_portal *p = get_affine_portal(); + /* Acquire buffers until empty */ + bm_cmd = bm_mc_start(&p->p); + bm_cmd->bpid = bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); + if (!bm_mc_result_timeout(&p->p, &bm_res)) { + put_affine_portal(); + pr_crit("BMan Acquire Command timedout\n"); + return -ETIMEDOUT; + } + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { + put_affine_portal(); + /* Pool is empty */ + return 0; + } + put_affine_portal(); + } + + return 0; +} + +struct gen_pool *bm_bpalloc; + +static int bm_alloc_bpid_range(u32 *result, u32 count) +{ + unsigned long addr; + + addr = gen_pool_alloc(bm_bpalloc, count); + if (!addr) + return -ENOMEM; + + *result = addr & ~DPAA_GENALLOC_OFF; + + return 0; +} + +static int bm_release_bpid(u32 bpid) +{ + int ret; + + ret = bm_shutdown_pool(bpid); + if (ret) { + pr_debug("BPID %d leaked\n", bpid); + return ret; + } + + gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); + return 0; +} + +struct bman_pool *bman_new_pool(void) +{ + struct bman_pool *pool = NULL; + u32 bpid; + + if (bm_alloc_bpid_range(&bpid, 1)) + return NULL; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + goto err; + + pool->bpid = bpid; + + return pool; +err: + bm_release_bpid(bpid); + kfree(pool); + return NULL; +} +EXPORT_SYMBOL(bman_new_pool); + +void bman_free_pool(struct bman_pool *pool) +{ + bm_release_bpid(pool->bpid); + + kfree(pool); +} +EXPORT_SYMBOL(bman_free_pool); + +int bman_get_bpid(const struct bman_pool *pool) +{ + return pool->bpid; +} +EXPORT_SYMBOL(bman_get_bpid); + +static void update_rcr_ci(struct bman_portal *p, int avail) +{ + if (avail) + bm_rcr_cce_prefetch(&p->p); + else + bm_rcr_cce_update(&p->p); +} + +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) +{ + struct bman_portal *p; + struct bm_rcr_entry *r; + unsigned long irqflags; + int avail, timeout = 1000; /* 1ms */ + int i = num - 1; + + DPAA_ASSERT(num > 0 && num <= 8); + + do { + p = get_affine_portal(); + local_irq_save(irqflags); + avail = bm_rcr_get_avail(&p->p); + if (avail < 2) + update_rcr_ci(p, avail); + r = bm_rcr_start(&p->p); + local_irq_restore(irqflags); + put_affine_portal(); + if (likely(r)) + break; + + udelay(1); + } while (--timeout); + + if (unlikely(!timeout)) + return -ETIMEDOUT; + + p = get_affine_portal(); + local_irq_save(irqflags); + /* + * we can copy all but the first entry, as this can trigger badness + * with the valid-bit + */ + bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); + bm_buffer_set_bpid(r->bufs, pool->bpid); + if (i) + memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); + + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | + (num & BM_RCR_VERB_BUFCOUNT_MASK)); + + local_irq_restore(irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_release); + +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_command *mcc; + union bm_mc_result *mcr; + int ret; + + DPAA_ASSERT(num > 0 && num <= 8); + + mcc = bm_mc_start(&p->p); + mcc->bpid = pool->bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); + if (!bm_mc_result_timeout(&p->p, &mcr)) { + put_affine_portal(); + pr_crit("BMan Acquire Timeout\n"); + return -ETIMEDOUT; + } + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; + if (bufs) + memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); + + put_affine_portal(); + if (ret != num) + ret = -ENOMEM; + return ret; +} +EXPORT_SYMBOL(bman_acquire); + +const struct bm_portal_config * +bman_get_bm_portal_config(const struct bman_portal *portal) +{ + return portal->config; +} diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c new file mode 100644 index 000000000000..9deb0524543f --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -0,0 +1,263 @@ +/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +u16 bman_ip_rev; +EXPORT_SYMBOL(bman_ip_rev); + +/* Register offsets */ +#define REG_FBPR_FPC 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FBPR_BARE 0x0c00 +#define REG_FBPR_BAR 0x0c04 +#define REG_FBPR_AR 0x0c10 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_ERR_ISDR 0x0e08 + +/* Used by all error interrupt registers except 'inhibit' */ +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ + +struct bman_hwerr_txt { + u32 mask; + const char *txt; +}; + +static const struct bman_hwerr_txt bman_hwerr_txts[] = { + { BM_EIRQ_IVCI, "Invalid Command Verb" }, + { BM_EIRQ_FLWI, "FBPR Low Watermark" }, + { BM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { BM_EIRQ_SBEI, "Single-bit ECC Error" }, + { BM_EIRQ_BSCN, "Pool State Change Notification" }, +}; + +/* Only trigger low water mark interrupt once only */ +#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI + +/* Pointer to the start of the BMan's CCSR space */ +static u32 __iomem *bm_ccsr_start; + +static inline u32 bm_ccsr_in(u32 offset) +{ + return ioread32be(bm_ccsr_start + offset/4); +} +static inline void bm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, bm_ccsr_start + offset/4); +} + +static void bm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = bm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +/* signal transactions for FBPRs with higher priority */ +#define FBPR_AR_RPRIO_HI BIT(30) + +static void bm_set_memory(u64 ba, u32 size) +{ + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && + is_power_of_2(size)); + /* choke if '[e]ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); + bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); + bm_ccsr_out(REG_FBPR_AR, exp - 1); +} + +/* + * Location and size of BMan private memory + * + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +static dma_addr_t fbpr_a; +static size_t fbpr_sz; + +static int bman_fbpr(struct reserved_mem *rmem) +{ + fbpr_a = rmem->base; + fbpr_sz = rmem->size; + + WARN_ON(!(fbpr_a && fbpr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); + +static irqreturn_t bman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = bm_ccsr_in(REG_ERR_IER); + isr_val = bm_ccsr_in(REG_ERR_ISR); + ecsr_val = bm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) { + if (bman_hwerr_txts[i].mask & isr_mask) { + dev_err_ratelimited(dev, "ErrInt: %s\n", + bman_hwerr_txts[i].txt); + if (bman_hwerr_txts[i].mask & ecsr_val) { + /* Re-arm error capture registers */ + bm_ccsr_out(REG_ECSR, ecsr_val); + } + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + bman_hwerr_txts[i].mask); + ier_val &= ~bman_hwerr_txts[i].mask; + bm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + bm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int fsl_bman_probe(struct platform_device *pdev) +{ + int ret, err_irq; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + u16 id, bm_pool_cnt; + u8 major, minor; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + bm_ccsr_start = devm_ioremap(dev, res->start, + res->end - res->start + 1); + if (!bm_ccsr_start) + return -ENXIO; + + bm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + bman_ip_rev = BMAN_REV10; + bm_pool_cnt = BM_POOL_MAX; + } else if (major == 2 && minor == 0) { + bman_ip_rev = BMAN_REV20; + bm_pool_cnt = 8; + } else if (major == 2 && minor == 1) { + bman_ip_rev = BMAN_REV21; + bm_pool_cnt = BM_POOL_MAX; + } else { + dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n", + id, major, minor); + return -ENODEV; + } + + bm_set_memory(fbpr_a, fbpr_sz); + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s IRQ\n", node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + /* Disable Buffer Pool State Change */ + bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN); + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + bm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + bm_ccsr_out(REG_ERR_IER, 0xffffffff); + + bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc"); + if (IS_ERR(bm_bpalloc)) { + ret = PTR_ERR(bm_bpalloc); + dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret); + return ret; + } + + /* seed BMan resource pool */ + ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1); + if (ret) { + dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n", + 0, bm_pool_cnt - 1, ret); + return ret; + } + + return 0; +}; + +static const struct of_device_id fsl_bman_ids[] = { + { + .compatible = "fsl,bman", + }, + {} +}; + +static struct platform_driver fsl_bman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_bman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_bman_probe, +}; + +builtin_platform_driver(fsl_bman_driver); diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c new file mode 100644 index 000000000000..6579cc18811a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -0,0 +1,219 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +static struct bman_portal *affine_bportals[NR_CPUS]; +static struct cpumask portal_cpus; +/* protect bman global registers and global data shared among portals */ +static DEFINE_SPINLOCK(bman_lock); + +static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) +{ + struct bman_portal *p = bman_create_affine_portal(pcfg); + + if (!p) { + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", + __func__, pcfg->cpu); + return NULL; + } + + bman_p_irqsource_add(p, BM_PIRQ_RCRI); + affine_bportals[pcfg->cpu] = p; + + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); + + return p; +} + +static void bman_offline_cpu(unsigned int cpu) +{ + struct bman_portal *p = affine_bportals[cpu]; + const struct bm_portal_config *pcfg; + + if (!p) + return; + + pcfg = bman_get_bm_portal_config(p); + if (!pcfg) + return; + + irq_set_affinity(pcfg->irq, cpumask_of(0)); +} + +static void bman_online_cpu(unsigned int cpu) +{ + struct bman_portal *p = affine_bportals[cpu]; + const struct bm_portal_config *pcfg; + + if (!p) + return; + + pcfg = bman_get_bm_portal_config(p); + if (!pcfg) + return; + + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); +} + +static int bman_hotplug_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + bman_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + bman_offline_cpu(cpu); + } + + return NOTIFY_OK; +} + +static struct notifier_block bman_hotplug_cpu_notifier = { + .notifier_call = bman_hotplug_cpu_callback, +}; + +static int bman_portal_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct bm_portal_config *pcfg; + struct resource *addr_phys[2]; + void __iomem *va; + int irq, cpu; + + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) + return -ENOMEM; + + pcfg->dev = dev; + + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CE); + if (!addr_phys[0]) { + dev_err(dev, "Can't get %s property 'reg::CE'\n", + node->full_name); + return -ENXIO; + } + + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CI); + if (!addr_phys[1]) { + dev_err(dev, "Can't get %s property 'reg::CI'\n", + node->full_name); + return -ENXIO; + } + + pcfg->cpu = -1; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Can't get %s IRQ'\n", node->full_name); + return -ENXIO; + } + pcfg->irq = irq; + + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); + if (!va) + goto err_ioremap1; + + pcfg->addr_virt[DPAA_PORTAL_CE] = va; + + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + if (!va) + goto err_ioremap2; + + pcfg->addr_virt[DPAA_PORTAL_CI] = va; + + spin_lock(&bman_lock); + cpu = cpumask_next_zero(-1, &portal_cpus); + if (cpu >= nr_cpu_ids) { + /* unassigned portal, skip init */ + spin_unlock(&bman_lock); + return 0; + } + + cpumask_set_cpu(cpu, &portal_cpus); + spin_unlock(&bman_lock); + pcfg->cpu = cpu; + + if (!init_pcfg(pcfg)) + goto err_ioremap2; + + /* clear irq affinity if assigned cpu is offline */ + if (!cpu_online(cpu)) + bman_offline_cpu(cpu); + + return 0; + +err_ioremap2: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); +err_ioremap1: + dev_err(dev, "ioremap failed\n"); + return -ENXIO; +} + +static const struct of_device_id bman_portal_ids[] = { + { + .compatible = "fsl,bman-portal", + }, + {} +}; +MODULE_DEVICE_TABLE(of, bman_portal_ids); + +static struct platform_driver bman_portal_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = bman_portal_ids, + }, + .probe = bman_portal_probe, +}; + +static int __init bman_portal_driver_register(struct platform_driver *drv) +{ + int ret; + + ret = platform_driver_register(drv); + if (ret < 0) + return ret; + + register_hotcpu_notifier(&bman_hotplug_cpu_notifier); + + return 0; +} + +module_driver(bman_portal_driver, + bman_portal_driver_register, platform_driver_unregister); diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h new file mode 100644 index 000000000000..f6896a2f6d90 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_priv.h @@ -0,0 +1,80 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "dpaa_sys.h" + +#include + +/* Portal processing (interrupt) sources */ +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ + +/* Revision info (for errata and feature handling) */ +#define BMAN_REV10 0x0100 +#define BMAN_REV20 0x0200 +#define BMAN_REV21 0x0201 +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ + +extern struct gen_pool *bm_bpalloc; + +struct bm_portal_config { + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; + /* Allow these to be joined in lists */ + struct list_head list; + struct device *dev; + /* User-visible portal configuration settings */ + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; +}; + +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config); +/* + * The below bman_p_***() variant might be called in a situation that the cpu + * which the portal affine to is not online yet. + * @bman_portal specifies which portal the API will use. + */ +int bman_p_irqsource_add(struct bman_portal *p, u32 bits); + +/* + * Used by all portal interrupt registers except 'inhibit' + * This mask contains all the "irqsource" bits visible to API users + */ +#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI + +const struct bm_portal_config * +bman_get_bm_portal_config(const struct bman_portal *portal); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h new file mode 100644 index 000000000000..b63fd72295c6 --- /dev/null +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -0,0 +1,103 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DPAA_SYS_H +#define __DPAA_SYS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* For 2-element tables related to cache-inhibited and cache-enabled mappings */ +#define DPAA_PORTAL_CE 0 +#define DPAA_PORTAL_CI 1 + +#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) +#error "Unsupported Cacheline Size" +#endif + +static inline void dpaa_flush(void *p) +{ +#ifdef CONFIG_PPC + flush_dcache_range((unsigned long)p, (unsigned long)p+64); +#elif defined(CONFIG_ARM32) + __cpuc_flush_dcache_area(p, 64); +#elif defined(CONFIG_ARM64) + __flush_dcache_area(p, 64); +#endif +} + +#define dpaa_invalidate(p) dpaa_flush(p) + +#define dpaa_zero(p) memset(p, 0, 64) + +static inline void dpaa_touch_ro(void *p) +{ +#if (L1_CACHE_BYTES == 32) + prefetch(p+32); +#endif + prefetch(p); +} + +/* Commonly used combo */ +static inline void dpaa_invalidate_touch_ro(void *p) +{ + dpaa_invalidate(p); + dpaa_touch_ro(p); +} + + +#ifdef CONFIG_FSL_DPAA_CHECKING +#define DPAA_ASSERT(x) WARN_ON(!(x)) +#else +#define DPAA_ASSERT(x) +#endif + +/* cyclic helper for rings */ +static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Offset applied to genalloc pools due to zero being an error return */ +#define DPAA_GENALLOC_OFF 0x80000000 + +#endif /* __DPAA_SYS_H */ diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h new file mode 100644 index 000000000000..eaaf56df4086 --- /dev/null +++ b/include/soc/fsl/bman.h @@ -0,0 +1,129 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FSL_BMAN_H +#define __FSL_BMAN_H + +/* wrapper for 48-bit buffers */ +struct bm_buffer { + union { + struct { + __be16 bpid; /* hi 8-bits reserved */ + __be16 hi; /* High 16-bits of 48-bit address */ + __be32 lo; /* Low 32-bits of 48-bit address */ + }; + __be64 data; + }; +} __aligned(8); +/* + * Restore the 48 bit address previously stored in BMan + * hardware pools as a dma_addr_t + */ +static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) +{ + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; +} + +static inline u64 bm_buffer_get64(const struct bm_buffer *buf) +{ + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; +} + +static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr) +{ + buf->hi = cpu_to_be16(upper_32_bits(addr)); + buf->lo = cpu_to_be32(lower_32_bits(addr)); +} + +static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf) +{ + return be16_to_cpu(buf->bpid) & 0xff; +} + +static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid) +{ + buf->bpid = cpu_to_be16(bpid & 0xff); +} + +/* Managed portal, high-level i/face */ + +/* Portal and Buffer Pools */ +struct bman_portal; +struct bman_pool; + +#define BM_POOL_MAX 64 /* max # of buffer pools */ + +/** + * bman_new_pool - Allocates a Buffer Pool object + * + * Creates a pool object, and returns a reference to it or NULL on error. + */ +struct bman_pool *bman_new_pool(void); + +/** + * bman_free_pool - Deallocates a Buffer Pool object + * @pool: the pool object to release + */ +void bman_free_pool(struct bman_pool *pool); + +/** + * bman_get_bpid - Returns a pool object's BPID. + * @pool: the pool object + * + * The returned value is the index of the encapsulated buffer pool, + * in the range of [0, @BM_POOL_MAX-1]. + */ +int bman_get_bpid(const struct bman_pool *pool); + +/** + * bman_release - Release buffer(s) to the buffer pool + * @pool: the buffer pool object to release to + * @bufs: an array of buffers to release + * @num: the number of buffers in @bufs (1-8) + * + * Adds the given buffers to RCR entries. If the RCR ring is unresponsive, + * the function will return -ETIMEDOUT. Otherwise, it returns zero. + */ +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num); + +/** + * bman_acquire - Acquire buffer(s) from a buffer pool + * @pool: the buffer pool object to acquire from + * @bufs: array for storing the acquired buffers + * @num: the number of buffers desired (@bufs is at least this big) + * + * Issues an "Acquire" command via the portal's management command interface. + * The return value will be the number of buffers obtained from the pool, or a + * negative error code if a h/w error or pool starvation was encountered. In + * the latter case, the content of @bufs is undefined. + */ +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); + +#endif /* __FSL_BMAN_H */ -- cgit v1.2.3-59-g8ed1b From c535e923bb97a4b361e89a6383693482057f8b0c Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 22 Sep 2016 18:04:09 +0300 Subject: soc/fsl: Introduce DPAA 1.x QMan device driver This driver enables the Freescale DPAA 1.x Queue Manager block. QMan is a hardware accelerator that manages frame queues. It allows CPUs and other accelerators connected to the SoC datapath to enqueue and dequeue ethernet frames, thus providing the infrastructure for data exchange among CPUs and datapath accelerators. Signed-off-by: Roy Pledge Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/Makefile | 5 +- drivers/soc/fsl/qbman/qman.c | 2881 +++++++++++++++++++++++++++++++++++ drivers/soc/fsl/qbman/qman_ccsr.c | 808 ++++++++++ drivers/soc/fsl/qbman/qman_portal.c | 355 +++++ drivers/soc/fsl/qbman/qman_priv.h | 371 +++++ include/soc/fsl/qman.h | 1074 +++++++++++++ 6 files changed, 5492 insertions(+), 2 deletions(-) create mode 100644 drivers/soc/fsl/qbman/qman.c create mode 100644 drivers/soc/fsl/qbman/qman_ccsr.c create mode 100644 drivers/soc/fsl/qbman/qman_portal.c create mode 100644 drivers/soc/fsl/qbman/qman_priv.h create mode 100644 include/soc/fsl/qman.h (limited to 'drivers') diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile index 855c3ac0f89e..6e0ee30b2834 100644 --- a/drivers/soc/fsl/qbman/Makefile +++ b/drivers/soc/fsl/qbman/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o bman_portal.o \ - bman.o +obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ + bman_portal.o qman_portal.o \ + bman.o qman.o diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c new file mode 100644 index 000000000000..119054bc922b --- /dev/null +++ b/drivers/soc/fsl/qbman/qman.c @@ -0,0 +1,2881 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +#define DQRR_MAXFILL 15 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ +#define IRQNAME "QMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ +#define QMAN_POLL_LIMIT 32 +#define QMAN_PIRQ_DQRR_ITHRESH 12 +#define QMAN_PIRQ_MR_ITHRESH 4 +#define QMAN_PIRQ_IPERIOD 100 + +/* Portal register assists */ + +/* Cache-inhibited register offsets */ +#define QM_REG_EQCR_PI_CINH 0x0000 +#define QM_REG_EQCR_CI_CINH 0x0004 +#define QM_REG_EQCR_ITR 0x0008 +#define QM_REG_DQRR_PI_CINH 0x0040 +#define QM_REG_DQRR_CI_CINH 0x0044 +#define QM_REG_DQRR_ITR 0x0048 +#define QM_REG_DQRR_DCAP 0x0050 +#define QM_REG_DQRR_SDQCR 0x0054 +#define QM_REG_DQRR_VDQCR 0x0058 +#define QM_REG_DQRR_PDQCR 0x005c +#define QM_REG_MR_PI_CINH 0x0080 +#define QM_REG_MR_CI_CINH 0x0084 +#define QM_REG_MR_ITR 0x0088 +#define QM_REG_CFG 0x0100 +#define QM_REG_ISR 0x0e00 +#define QM_REG_IER 0x0e04 +#define QM_REG_ISDR 0x0e08 +#define QM_REG_IIR 0x0e0c +#define QM_REG_ITPR 0x0e14 + +/* Cache-enabled register offsets */ +#define QM_CL_EQCR 0x0000 +#define QM_CL_DQRR 0x1000 +#define QM_CL_MR 0x2000 +#define QM_CL_EQCR_PI_CENA 0x3000 +#define QM_CL_EQCR_CI_CENA 0x3100 +#define QM_CL_DQRR_PI_CENA 0x3200 +#define QM_CL_DQRR_CI_CENA 0x3300 +#define QM_CL_MR_PI_CENA 0x3400 +#define QM_CL_MR_CI_CENA 0x3500 +#define QM_CL_CR 0x3800 +#define QM_CL_RR0 0x3900 +#define QM_CL_RR1 0x3940 + +/* + * BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses and data-dependencies. Use of barrier()s + * or other order-preserving primitives simply degrade performance. Hence the + * use of the __raw_*() interfaces, which simply ensure that the compiler treats + * the portal registers as volatile + */ + +/* Cache-enabled ring access */ +#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) + +/* + * Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * dmode == h/w dequeue mode. + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only + * As for "enum qm_dqrr_dmode", it should be self-explanatory. + */ +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ + qm_eqcr_pci = 0, /* PI index, cache-inhibited */ + qm_eqcr_pce = 1, /* PI index, cache-enabled */ + qm_eqcr_pvb = 2 /* valid-bit */ +}; +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ + qm_dqrr_dpull = 1 /* PDQCR */ +}; +enum qm_dqrr_pmode { /* s/w-only */ + qm_dqrr_pci, /* reads DQRR_PI_CINH */ + qm_dqrr_pce, /* reads DQRR_PI_CENA */ + qm_dqrr_pvb /* reads valid-bit */ +}; +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ + qm_dqrr_cci = 0, /* CI index, cache-inhibited */ + qm_dqrr_cce = 1, /* CI index, cache-enabled */ + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ +}; +enum qm_mr_pmode { /* s/w-only */ + qm_mr_pci, /* reads MR_PI_CINH */ + qm_mr_pce, /* reads MR_PI_CENA */ + qm_mr_pvb /* reads valid-bit */ +}; +enum qm_mr_cmode { /* matches QCSP_CFG::MM */ + qm_mr_cci = 0, /* CI index, cache-inhibited */ + qm_mr_cce = 1 /* CI index, cache-enabled */ +}; + +/* --- Portal structures --- */ + +#define QM_EQCR_SIZE 8 +#define QM_DQRR_SIZE 16 +#define QM_MR_SIZE 8 + +/* "Enqueue Command" */ +struct qm_eqcr_entry { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 dca; + u16 seqnum; + u32 orp; /* 24-bit */ + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + u8 __reserved3[32]; +} __packed; +#define QM_EQCR_VERB_VBIT 0x80 +#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ +#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 +#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ +#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ +#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ + +struct qm_eqcr { + struct qm_eqcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + u32 busy; + enum qm_eqcr_pmode pmode; +#endif +}; + +struct qm_dqrr { + const struct qm_dqrr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_dqrr_dmode dmode; + enum qm_dqrr_pmode pmode; + enum qm_dqrr_cmode cmode; +#endif +}; + +struct qm_mr { + union qm_mr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum qm_mr_pmode pmode; + enum qm_mr_cmode cmode; +#endif +}; + +/* MC (Management Command) command */ +/* "Query FQ" */ +struct qm_mcc_queryfq { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; +/* "Alter FQ State Commands " */ +struct qm_mcc_alterfq { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2; + u8 count; /* number of consecutive FQID */ + u8 __reserved3[10]; + u32 context_b; /* frame queue context b */ + u8 __reserved4[40]; +} __packed; + +/* "Query CGR" */ +struct qm_mcc_querycgr { + u8 _ncw_verb; + u8 __reserved1[30]; + u8 cgid; + u8 __reserved2[32]; +}; + +struct qm_mcc_querywq { + u8 _ncw_verb; + u8 __reserved; + /* select channel if verb != QUERYWQ_DEDICATED */ + u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ + u8 __reserved2[60]; +} __packed; + +#define QM_MCC_VERB_VBIT 0x80 +#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_MCC_VERB_INITFQ_PARKED 0x40 +#define QM_MCC_VERB_INITFQ_SCHED 0x41 +#define QM_MCC_VERB_QUERYFQ 0x44 +#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ +#define QM_MCC_VERB_QUERYWQ 0x46 +#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 +#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ +#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ +#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ +#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ +#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ +#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ +#define QM_MCC_VERB_INITCGR 0x50 +#define QM_MCC_VERB_MODIFYCGR 0x51 +#define QM_MCC_VERB_CGRTESTWRITE 0x52 +#define QM_MCC_VERB_QUERYCGR 0x58 +#define QM_MCC_VERB_QUERYCONGESTION 0x59 +union qm_mc_command { + struct { + u8 _ncw_verb; /* writes to this are non-coherent */ + u8 __reserved[63]; + }; + struct qm_mcc_initfq initfq; + struct qm_mcc_queryfq queryfq; + struct qm_mcc_alterfq alterfq; + struct qm_mcc_initcgr initcgr; + struct qm_mcc_querycgr querycgr; + struct qm_mcc_querywq querywq; + struct qm_mcc_queryfq_np queryfq_np; +}; + +/* MC (Management Command) result */ +/* "Query FQ" */ +struct qm_mcr_queryfq { + u8 verb; + u8 result; + u8 __reserved1[8]; + struct qm_fqd fqd; /* the FQD fields are here */ + u8 __reserved2[30]; +} __packed; + +/* "Alter FQ State Commands" */ +struct qm_mcr_alterfq { + u8 verb; + u8 result; + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[61]; +}; +#define QM_MCR_VERB_RRID 0x80 +#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK +#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED +#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED +#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ +#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP +#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ +#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED +#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED +#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE +#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE +#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS +#define QM_MCR_RESULT_NULL 0x00 +#define QM_MCR_RESULT_OK 0xf0 +#define QM_MCR_RESULT_ERR_FQID 0xf1 +#define QM_MCR_RESULT_ERR_FQSTATE 0xf2 +#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ +#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 +#define QM_MCR_RESULT_PENDING 0xf8 +#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff +#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ +#define QM_MCR_TIMEOUT 10000 /* us */ +union qm_mc_result { + struct { + u8 verb; + u8 result; + u8 __reserved1[62]; + }; + struct qm_mcr_queryfq queryfq; + struct qm_mcr_alterfq alterfq; + struct qm_mcr_querycgr querycgr; + struct qm_mcr_querycongestion querycongestion; + struct qm_mcr_querywq querywq; + struct qm_mcr_queryfq_np queryfq_np; +}; + +struct qm_mc { + union qm_mc_command *cr; + union qm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPAA_CHECKING + enum { + /* Can be _mc_start()ed */ + qman_mc_idle, + /* Can be _mc_commit()ed or _mc_abort()ed */ + qman_mc_user, + /* Can only be _mc_retry()ed */ + qman_mc_hw + } state; +#endif +}; + +struct qm_addr { + void __iomem *ce; /* cache-enabled */ + void __iomem *ci; /* cache-inhibited */ +}; + +struct qm_portal { + /* + * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to + * and including 'mc' fits within a cacheline (yay!). The 'config' part + * is setup-only, so isn't a cause for a concern. In other words, don't + * rearrange this structure on a whim, there be dragons ... + */ + struct qm_addr addr; + struct qm_eqcr eqcr; + struct qm_dqrr dqrr; + struct qm_mr mr; + struct qm_mc mc; +} ____cacheline_aligned; + +/* Cache-inhibited register access. */ +static inline u32 qm_in(struct qm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ci + offset); +} + +static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) +{ + __raw_writel(val, p->addr.ci + offset); +} + +/* Cache Enabled Portal Access */ +static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) +{ + dpaa_invalidate(p->addr.ce + offset); +} + +static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) +{ + dpaa_touch_ro(p->addr.ce + offset); +} + +static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) +{ + return __raw_readl(p->addr.ce + offset); +} + +/* --- EQCR API --- */ + +#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) +#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~EQCR_CARRY; + + return (struct qm_eqcr_entry *)addr; +} + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static int eqcr_ptr2idx(struct qm_eqcr_entry *e) +{ + return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void eqcr_inc(struct qm_eqcr *eqcr) +{ + /* increment to the next EQCR pointer and handle overflow and 'vbit' */ + struct qm_eqcr_entry *partial = eqcr->cursor + 1; + + eqcr->cursor = eqcr_carryclear(partial); + if (partial != eqcr->cursor) + eqcr->vbit ^= QM_EQCR_VERB_VBIT; +} + +static inline int qm_eqcr_init(struct qm_portal *portal, + enum qm_eqcr_pmode pmode, + unsigned int eq_stash_thresh, + int eq_stash_prio) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u32 cfg; + u8 pi; + + eqcr->ring = portal->addr.ce + QM_CL_EQCR; + eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + eqcr->cursor = eqcr->ring + pi; + eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? + QM_EQCR_VERB_VBIT : 0; + eqcr->available = QM_EQCR_SIZE - 1 - + dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); + eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; + eqcr->pmode = pmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ + (eq_stash_prio << 26) | /* QCSP_CFG: EP */ + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; +} + +static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) +{ + return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; +} + +static inline void qm_eqcr_finish(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + + DPAA_ASSERT(!eqcr->busy); + if (pi != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("losing uncommited EQCR entries\n"); + if (ci != eqcr->ci) + pr_crit("missing existing EQCR completions\n"); + if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) + pr_crit("EQCR destroyed unquiesced\n"); +} + +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal + *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) + return NULL; + +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; +} + +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal + *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci; + + DPAA_ASSERT(!eqcr->busy); + if (!eqcr->available) { + old_ci = eqcr->ci; + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & + (QM_EQCR_SIZE - 1); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + if (!diff) + return NULL; + } +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 1; +#endif + dpaa_zero(eqcr->cursor); + return eqcr->cursor; +} + +static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) +{ + DPAA_ASSERT(eqcr->busy); + DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); + DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); + DPAA_ASSERT(eqcr->available >= 1); +} + +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + struct qm_eqcr_entry *eqcursor; + + eqcr_commit_checks(eqcr); + DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); + dma_wmb(); + eqcursor = eqcr->cursor; + eqcursor->_ncw_verb = myverb | eqcr->vbit; + dpaa_flush(eqcursor); + eqcr_inc(eqcr); + eqcr->available--; +#ifdef CONFIG_FSL_DPAA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) +{ + qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); +} + +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + + eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); + diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + eqcr->ithresh = ithresh; + qm_out(portal, QM_REG_EQCR_ITR, ithresh); +} + +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + return eqcr->available; +} + +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) +{ + struct qm_eqcr *eqcr = &portal->eqcr; + + return QM_EQCR_SIZE - 1 - eqcr->available; +} + +/* --- DQRR API --- */ + +#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) +#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) + +static const struct qm_dqrr_entry *dqrr_carryclear( + const struct qm_dqrr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~DQRR_CARRY; + + return (const struct qm_dqrr_entry *)addr; +} + +static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) +{ + return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); +} + +static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) +{ + return dqrr_carryclear(e + 1); +} + +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) +{ + qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | + ((mf & (QM_DQRR_SIZE - 1)) << 20)); +} + +static inline int qm_dqrr_init(struct qm_portal *portal, + const struct qm_portal_config *config, + enum qm_dqrr_dmode dmode, + enum qm_dqrr_pmode pmode, + enum qm_dqrr_cmode cmode, u8 max_fill) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + u32 cfg; + + /* Make sure the DQRR will be idle when we enable */ + qm_out(portal, QM_REG_DQRR_SDQCR, 0); + qm_out(portal, QM_REG_DQRR_VDQCR, 0); + qm_out(portal, QM_REG_DQRR_PDQCR, 0); + dqrr->ring = portal->addr.ce + QM_CL_DQRR; + dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->cursor = dqrr->ring + dqrr->ci; + dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); + dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? + QM_DQRR_VERB_VBIT : 0; + dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + dqrr->dmode = dmode; + dqrr->pmode = pmode; + dqrr->cmode = cmode; +#endif + /* Invalidate every ring entry before beginning */ + for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) + dpaa_invalidate(qm_cl(dqrr->ring, cfg)); + cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ + ((dmode & 1) << 18) | /* DP */ + ((cmode & 3) << 16) | /* DCM */ + 0xa0 | /* RE+SE */ + (0 ? 0x40 : 0) | /* Ignore RP */ + (0 ? 0x10 : 0); /* Ignore SP */ + qm_out(portal, QM_REG_CFG, cfg); + qm_dqrr_set_maxfill(portal, max_fill); + return 0; +} + +static inline void qm_dqrr_finish(struct qm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_dqrr *dqrr = &portal->dqrr; + + if (dqrr->cmode != qm_dqrr_cdc && + dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) + pr_crit("Ignoring completed DQRR entries\n"); +#endif +} + +static inline const struct qm_dqrr_entry *qm_dqrr_current( + struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + if (!dqrr->fill) + return NULL; + return dqrr->cursor; +} + +static inline u8 qm_dqrr_next(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->fill); + dqrr->cursor = dqrr_inc(dqrr->cursor); + return --dqrr->fill; +} + +static inline void qm_dqrr_pvb_update(struct qm_portal *portal) +{ + struct qm_dqrr *dqrr = &portal->dqrr; + struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); + + DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); +#ifndef CONFIG_FSL_PAMU + /* + * If PAMU is not available we need to invalidate the cache. + * When PAMU is available the cache is updated by stash + */ + dpaa_invalidate_touch_ro(res); +#endif + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); + if (!dqrr->pi) + dqrr->vbit ^= QM_DQRR_VERB_VBIT; + dqrr->fill++; + } +} + +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, + const struct qm_dqrr_entry *dq, + int park) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + int idx = dqrr_ptr2idx(dq); + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPAA_ASSERT((dqrr->ring + idx) == dq); + DPAA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ + idx); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) +{ + __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; + + DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ + (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) +{ + qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); +} + +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) +{ + qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); +} + +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_DQRR_ITR, ithresh); +} + +/* --- MR API --- */ + +#define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) +#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) + +static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) +{ + uintptr_t addr = (uintptr_t)p; + + addr &= ~MR_CARRY; + + return (union qm_mr_entry *)addr; +} + +static inline int mr_ptr2idx(const union qm_mr_entry *e) +{ + return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); +} + +static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) +{ + return mr_carryclear(e + 1); +} + +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, + enum qm_mr_cmode cmode) +{ + struct qm_mr *mr = &portal->mr; + u32 cfg; + + mr->ring = portal->addr.ce + QM_CL_MR; + mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); + mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); + mr->cursor = mr->ring + mr->ci; + mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); + mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) + ? QM_MR_VERB_VBIT : 0; + mr->ithresh = qm_in(portal, QM_REG_MR_ITR); +#ifdef CONFIG_FSL_DPAA_CHECKING + mr->pmode = pmode; + mr->cmode = cmode; +#endif + cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | + ((cmode & 1) << 8); /* QCSP_CFG:MM */ + qm_out(portal, QM_REG_CFG, cfg); + return 0; +} + +static inline void qm_mr_finish(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (mr->ci != mr_ptr2idx(mr->cursor)) + pr_crit("Ignoring completed MR entries\n"); +} + +static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + if (!mr->fill) + return NULL; + return mr->cursor; +} + +static inline int qm_mr_next(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->fill); + mr->cursor = mr_inc(mr->cursor); + return --mr->fill; +} + +static inline void qm_mr_pvb_update(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); + + DPAA_ASSERT(mr->pmode == qm_mr_pvb); + /* + * when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". + */ + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); + if (!mr->pi) + mr->vbit ^= QM_MR_VERB_VBIT; + mr->fill++; + res = mr_inc(res); + } + dpaa_invalidate_touch_ro(res); +} + +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) +{ + struct qm_mr *mr = &portal->mr; + + DPAA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = mr_ptr2idx(mr->cursor); + qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(portal, QM_REG_MR_ITR, ithresh); +} + +/* --- Management command API --- */ + +static inline int qm_mc_init(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + mc->cr = portal->addr.ce + QM_CL_CR; + mc->rr = portal->addr.ce + QM_CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) + ? 0 : 1; + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif + return 0; +} + +static inline void qm_mc_finish(struct qm_portal *portal) +{ +#ifdef CONFIG_FSL_DPAA_CHECKING + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); + if (mc->state != qman_mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + + DPAA_ASSERT(mc->state == qman_mc_idle); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_user; +#endif + dpaa_zero(mc->cr); + return mc->cr; +} + +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_user); + dma_wmb(); + mc->cr->_ncw_verb = myverb | mc->vbit; + dpaa_flush(mc->cr); + dpaa_invalidate_touch_ro(rr); +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_hw; +#endif +} + +static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) +{ + struct qm_mc *mc = &portal->mc; + union qm_mc_result *rr = mc->rr + mc->rridx; + + DPAA_ASSERT(mc->state == qman_mc_hw); + /* + * The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... + */ + if (!__raw_readb(&rr->verb)) { + dpaa_invalidate_touch_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPAA_CHECKING + mc->state = qman_mc_idle; +#endif + return rr; +} + +static inline int qm_mc_result_timeout(struct qm_portal *portal, + union qm_mc_result **mcr) +{ + int timeout = QM_MCR_TIMEOUT; + + do { + *mcr = qm_mc_result(portal); + if (*mcr) + break; + udelay(1); + } while (--timeout); + + return timeout; +} + +static inline void fq_set(struct qman_fq *fq, u32 mask) +{ + set_bits(mask, &fq->flags); +} + +static inline void fq_clear(struct qman_fq *fq, u32 mask) +{ + clear_bits(mask, &fq->flags); +} + +static inline int fq_isset(struct qman_fq *fq, u32 mask) +{ + return fq->flags & mask; +} + +static inline int fq_isclear(struct qman_fq *fq, u32 mask) +{ + return !(fq->flags & mask); +} + +struct qman_portal { + struct qm_portal p; + /* PORTAL_BITS_*** - dynamic, strictly internal */ + unsigned long bits; + /* interrupt sources processed by portal_isr(), configurable */ + unsigned long irq_sources; + u32 use_eqcr_ci_stashing; + /* only 1 volatile dequeue at a time */ + struct qman_fq *vdqcr_owned; + u32 sdqcr; + /* probing time config params for cpu-affine portals */ + const struct qm_portal_config *config; + /* needed for providing a non-NULL device to dma_map_***() */ + struct platform_device *pdev; + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ + struct qman_cgrs *cgrs; + /* linked-list of CSCN handlers. */ + struct list_head cgr_cbs; + /* list lock */ + spinlock_t cgr_lock; + struct work_struct congestion_work; + struct work_struct mr_work; + char irqname[MAX_IRQNAME]; +}; + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static u16 affine_channels[NR_CPUS]; +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); +struct qman_portal *affine_portals[NR_CPUS]; + +static inline struct qman_portal *get_affine_portal(void) +{ + return &get_cpu_var(qman_affine_portal); +} + +static inline void put_affine_portal(void) +{ + put_cpu_var(qman_affine_portal); +} + +static struct workqueue_struct *qm_portal_wq; + +int qman_wq_alloc(void) +{ + qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); + if (!qm_portal_wq) + return -ENOMEM; + return 0; +} + +/* + * This is what everything can wait on, even if it migrates to a different cpu + * to the one whose affine portal it is waiting on. + */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static struct qman_fq **fq_table; +static u32 num_fqids; + +int qman_alloc_fq_table(u32 _num_fqids) +{ + num_fqids = _num_fqids; + + fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); + if (!fq_table) + return -ENOMEM; + + pr_debug("Allocated fq lookup table at %p, entry count %u\n", + fq_table, num_fqids * 2); + return 0; +} + +static struct qman_fq *idx_to_fq(u32 idx) +{ + struct qman_fq *fq; + +#ifdef CONFIG_FSL_DPAA_CHECKING + if (WARN_ON(idx >= num_fqids * 2)) + return NULL; +#endif + fq = fq_table[idx]; + DPAA_ASSERT(!fq || idx == fq->idx); + + return fq; +} + +/* + * Only returns full-service fq objects, not enqueue-only + * references (QMAN_FQ_FLAG_NO_MODIFY). + */ +static struct qman_fq *fqid_to_fq(u32 fqid) +{ + return idx_to_fq(fqid * 2); +} + +static struct qman_fq *tag_to_fq(u32 tag) +{ +#if BITS_PER_LONG == 64 + return idx_to_fq(tag); +#else + return (struct qman_fq *)tag; +#endif +} + +static u32 fq_to_tag(struct qman_fq *fq) +{ +#if BITS_PER_LONG == 64 + return fq->idx; +#else + return (u32)fq; +#endif +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is); +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit); +static void qm_congestion_task(struct work_struct *work); +static void qm_mr_process_task(struct work_struct *work); + +static irqreturn_t portal_isr(int irq, void *ptr) +{ + struct qman_portal *p = ptr; + + u32 clear = QM_DQAVAIL_MASK | p->irq_sources; + u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + + if (unlikely(!is)) + return IRQ_NONE; + + /* DQRR-handling if it's interrupt-driven */ + if (is & QM_PIRQ_DQRI) + __poll_portal_fast(p, QMAN_POLL_LIMIT); + /* Handling of anything else that's interrupt-driven */ + clear |= __poll_portal_slow(p, is); + qm_out(&p->p, QM_REG_ISR, clear); + return IRQ_HANDLED; +} + +static int drain_mr_fqrni(struct qm_portal *p) +{ + const union qm_mr_entry *msg; +loop: + msg = qm_mr_current(p); + if (!msg) { + /* + * if MR was full and h/w had other FQRNI entries to produce, we + * need to allow it time to produce those entries once the + * existing entries are consumed. A worst-case situation + * (fully-loaded system) means h/w sequencers may have to do 3-4 + * other things before servicing the portal's MR pump, each of + * which (if slow) may take ~50 qman cycles (which is ~200 + * processor cycles). So rounding up and then multiplying this + * worst-case estimate by a factor of 10, just to be + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume + * one entry at a time, so h/w has an opportunity to produce new + * entries well before the ring has been fully consumed, so + * we're being *really* paranoid here. + */ + u64 now, then = jiffies; + + do { + now = jiffies; + } while ((then + 10000) > now); + msg = qm_mr_current(p); + if (!msg) + return 0; + } + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { + /* We aren't draining anything but FQRNIs */ + pr_err("Found verb 0x%x in MR\n", msg->verb); + return -1; + } + qm_mr_next(p); + qm_mr_cci_consume(p, 1); + goto loop; +} + +static int qman_create_portal(struct qman_portal *portal, + const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qm_portal *p; + char buf[16]; + int ret; + u32 isdr; + + p = &portal->p; + +#ifdef CONFIG_FSL_PAMU + /* PAMU is required for stashing */ + portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); +#else + portal->use_eqcr_ci_stashing = 0; +#endif + /* + * prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference + */ + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; + /* + * If CI-stashing is used, the current defaults use a threshold of 3, + * and stash with high-than-DQRR priority. + */ + if (qm_eqcr_init(p, qm_eqcr_pvb, + portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { + dev_err(c->dev, "EQCR initialisation failed\n"); + goto fail_eqcr; + } + if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, + qm_dqrr_cdc, DQRR_MAXFILL)) { + dev_err(c->dev, "DQRR initialisation failed\n"); + goto fail_dqrr; + } + if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { + dev_err(c->dev, "MR initialisation failed\n"); + goto fail_mr; + } + if (qm_mc_init(p)) { + dev_err(c->dev, "MC initialisation failed\n"); + goto fail_mc; + } + /* static interrupt-gating controls */ + qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); + qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); + qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); + if (!portal->cgrs) + goto fail_cgrs; + /* initial snapshot is no-depletion */ + qman_cgrs_init(&portal->cgrs[1]); + if (cgrs) + portal->cgrs[0] = *cgrs; + else + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + INIT_LIST_HEAD(&portal->cgr_cbs); + spin_lock_init(&portal->cgr_lock); + INIT_WORK(&portal->congestion_work, qm_congestion_task); + INIT_WORK(&portal->mr_work, qm_mr_process_task); + portal->bits = 0; + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; + sprintf(buf, "qportal-%d", c->channel); + portal->pdev = platform_device_alloc(buf, -1); + if (!portal->pdev) + goto fail_devalloc; + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) + goto fail_devadd; + ret = platform_device_add(portal->pdev); + if (ret) + goto fail_devadd; + isdr = 0xffffffff; + qm_out(p, QM_REG_ISDR, isdr); + portal->irq_sources = 0; + qm_out(p, QM_REG_IER, 0); + qm_out(p, QM_REG_ISR, 0xffffffff); + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { + dev_err(c->dev, "request_irq() failed\n"); + goto fail_irq; + } + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { + dev_err(c->dev, "irq_set_affinity() failed\n"); + goto fail_affinity; + } + + /* Need EQCR to be empty before continuing */ + isdr &= ~QM_PIRQ_EQCI; + qm_out(p, QM_REG_ISDR, isdr); + ret = qm_eqcr_get_fill(p); + if (ret) { + dev_err(c->dev, "EQCR unclean\n"); + goto fail_eqcr_empty; + } + isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); + qm_out(p, QM_REG_ISDR, isdr); + if (qm_dqrr_current(p)) { + dev_err(c->dev, "DQRR unclean\n"); + qm_dqrr_cdc_consume_n(p, 0xffff); + } + if (qm_mr_current(p) && drain_mr_fqrni(p)) { + /* special handling, drain just in case it's a few FQRNIs */ + const union qm_mr_entry *e = qm_mr_current(p); + + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", + e->verb, e->ern.rc, e->ern.fd.addr_lo); + goto fail_dqrr_mr_empty; + } + /* Success */ + portal->config = c; + qm_out(p, QM_REG_ISDR, 0); + qm_out(p, QM_REG_IIR, 0); + /* Write a sane SDQCR */ + qm_dqrr_sdqcr_set(p, portal->sdqcr); + return 0; + +fail_dqrr_mr_empty: +fail_eqcr_empty: +fail_affinity: + free_irq(c->irq, portal); +fail_irq: + platform_device_del(portal->pdev); +fail_devadd: + platform_device_put(portal->pdev); +fail_devalloc: + kfree(portal->cgrs); +fail_cgrs: + qm_mc_finish(p); +fail_mc: + qm_mr_finish(p); +fail_mr: + qm_dqrr_finish(p); +fail_dqrr: + qm_eqcr_finish(p); +fail_eqcr: + return -EIO; +} + +struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, + const struct qman_cgrs *cgrs) +{ + struct qman_portal *portal; + int err; + + portal = &per_cpu(qman_affine_portal, c->cpu); + err = qman_create_portal(portal, c, cgrs); + if (err) + return NULL; + + spin_lock(&affine_mask_lock); + cpumask_set_cpu(c->cpu, &affine_mask); + affine_channels[c->cpu] = c->channel; + affine_portals[c->cpu] = portal; + spin_unlock(&affine_mask_lock); + + return portal; +} + +static void qman_destroy_portal(struct qman_portal *qm) +{ + const struct qm_portal_config *pcfg; + + /* Stop dequeues on the portal */ + qm_dqrr_sdqcr_set(&qm->p, 0); + + /* + * NB we do this to "quiesce" EQCR. If we add enqueue-completions or + * something related to QM_PIRQ_EQCI, this may need fixing. + * Also, due to the prefetching model used for CI updates in the enqueue + * path, this update will only invalidate the CI cacheline *after* + * working on it, so we need to call this twice to ensure a full update + * irrespective of where the enqueue processing was at when the teardown + * began. + */ + qm_eqcr_cce_update(&qm->p); + qm_eqcr_cce_update(&qm->p); + pcfg = qm->config; + + free_irq(pcfg->irq, qm); + + kfree(qm->cgrs); + qm_mc_finish(&qm->p); + qm_mr_finish(&qm->p); + qm_dqrr_finish(&qm->p); + qm_eqcr_finish(&qm->p); + + platform_device_del(qm->pdev); + platform_device_put(qm->pdev); + + qm->config = NULL; +} + +const struct qm_portal_config *qman_destroy_affine_portal(void) +{ + struct qman_portal *qm = get_affine_portal(); + const struct qm_portal_config *pcfg; + int cpu; + + pcfg = qm->config; + cpu = pcfg->cpu; + + qman_destroy_portal(qm); + + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +/* Inline helper to reduce nesting in __poll_portal_slow() */ +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg, u8 verb) +{ + switch (verb) { + case QM_MR_VERB_FQRL: + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); + fq_clear(fq, QMAN_FQ_STATE_ORL); + break; + case QM_MR_VERB_FQRN: + DPAA_ASSERT(fq->state == qman_fq_state_parked || + fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); + fq_clear(fq, QMAN_FQ_STATE_CHANGING); + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + fq->state = qman_fq_state_retired; + break; + case QM_MR_VERB_FQPN: + DPAA_ASSERT(fq->state == qman_fq_state_sched); + DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); + fq->state = qman_fq_state_parked; + } +} + +static void qm_congestion_task(struct work_struct *work) +{ + struct qman_portal *p = container_of(work, struct qman_portal, + congestion_work); + struct qman_cgrs rr, c; + union qm_mc_result *mcr; + struct qman_cgr *cgr; + + spin_lock(&p->cgr_lock); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + spin_unlock(&p->cgr_lock); + dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); + return; + } + /* mask out the ones I'm not interested in */ + qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, + &p->cgrs[0]); + /* check previous snapshot for delta, enter/exit congestion */ + qman_cgrs_xor(&c, &rr, &p->cgrs[1]); + /* update snapshot */ + qman_cgrs_cp(&p->cgrs[1], &rr); + /* Invoke callback */ + list_for_each_entry(cgr, &p->cgr_cbs, node) + if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) + cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); + spin_unlock(&p->cgr_lock); +} + +static void qm_mr_process_task(struct work_struct *work) +{ + struct qman_portal *p = container_of(work, struct qman_portal, + mr_work); + const union qm_mr_entry *msg; + struct qman_fq *fq; + u8 verb, num = 0; + + preempt_disable(); + + while (1) { + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (!msg) + break; + + verb = msg->verb & QM_MR_VERB_TYPE_MASK; + /* The message is a software ERN iff the 0x20 bit is clear */ + if (verb & 0x20) { + switch (verb) { + case QM_MR_VERB_FQRNI: + /* nada, we drop FQRNIs on the floor */ + break; + case QM_MR_VERB_FQRN: + case QM_MR_VERB_FQRL: + /* Lookup in the retirement table */ + fq = fqid_to_fq(msg->fq.fqid); + if (WARN_ON(!fq)) + break; + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_FQPN: + /* Parked */ + fq = tag_to_fq(msg->fq.contextB); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); + break; + case QM_MR_VERB_DC_ERN: + /* DCP ERN */ + pr_crit_once("Leaking DCP ERNs!\n"); + break; + default: + pr_crit("Invalid MR verb 0x%02x\n", verb); + } + } else { + /* Its a software ERN */ + fq = tag_to_fq(msg->ern.tag); + fq->cb.ern(p, fq, msg); + } + num++; + qm_mr_next(&p->p); + } + + qm_mr_cci_consume(&p->p, num); + preempt_enable(); +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is) +{ + if (is & QM_PIRQ_CSCI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->congestion_work); + } + + if (is & QM_PIRQ_EQRI) { + qm_eqcr_cce_update(&p->p); + qm_eqcr_set_ithresh(&p->p, 0); + wake_up(&affine_queue); + } + + if (is & QM_PIRQ_MRI) { + queue_work_on(smp_processor_id(), qm_portal_wq, + &p->mr_work); + } + + return is; +} + +/* + * remove some slowish-path stuff from the "fast path" and make sure it isn't + * inlined. + */ +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) +{ + p->vdqcr_owned = NULL; + fq_clear(fq, QMAN_FQ_STATE_VDQCR); + wake_up(&affine_queue); +} + +/* + * The only states that would conflict with other things if they ran at the + * same time on the same cpu are: + * + * (i) setting/clearing vdqcr_owned, and + * (ii) clearing the NE (Not Empty) flag. + * + * Both are safe. Because; + * + * (i) this clearing can only occur after qman_volatile_dequeue() has set the + * vdqcr_owned field (which it does before setting VDQCR), and + * qman_volatile_dequeue() blocks interrupts and preemption while this is + * done so that we can't interfere. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as + * with (i) that API prevents us from interfering until it's safe. + * + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett + * advantage comes from this function not having to "lock" anything at all. + * + * Note also that the callbacks are invoked at points which are safe against the + * above potential conflicts, but that this function itself is not re-entrant + * (this is because the function tracks one end of each FIFO in the portal and + * we do *not* want to lock that). So the consequence is that it is safe for + * user callbacks to call into any QMan API. + */ +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit) +{ + const struct qm_dqrr_entry *dq; + struct qman_fq *fq; + enum qman_cb_dqrr_result res; + unsigned int limit = 0; + + do { + qm_dqrr_pvb_update(&p->p); + dq = qm_dqrr_current(&p->p); + if (!dq) + break; + + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { + /* + * VDQCR: don't trust contextB as the FQ may have + * been configured for h/w consumption and we're + * draining it post-retirement. + */ + fq = p->vdqcr_owned; + /* + * We only set QMAN_FQ_STATE_NE when retiring, so we + * only need to check for clearing it when doing + * volatile dequeues. It's one less thing to check + * in the critical path (SDQCR). + */ + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + fq_clear(fq, QMAN_FQ_STATE_NE); + /* + * This is duplicated from the SDQCR code, but we + * have stuff to do before *and* after this callback, + * and we don't want multiple if()s in the critical + * path (SDQCR). + */ + res = fq->cb.dqrr(p, fq, dq); + if (res == qman_cb_dqrr_stop) + break; + /* Check for VDQCR completion */ + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) + clear_vdqcr(p, fq); + } else { + /* SDQCR: contextB points to the FQ */ + fq = tag_to_fq(dq->contextB); + /* Now let the callback do its stuff */ + res = fq->cb.dqrr(p, fq, dq); + /* + * The callback can request that we exit without + * consuming this entry nor advancing; + */ + if (res == qman_cb_dqrr_stop) + break; + } + /* Interpret 'dq' from a driver perspective. */ + /* + * Parking isn't possible unless HELDACTIVE was set. NB, + * FORCEELIGIBLE implies HELDACTIVE, so we only need to + * check for HELDACTIVE to cover both. + */ + DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || + (res != qman_cb_dqrr_park)); + /* just means "skip it, I'll consume it myself later on" */ + if (res != qman_cb_dqrr_defer) + qm_dqrr_cdc_consume_1ptr(&p->p, dq, + res == qman_cb_dqrr_park); + /* Move forward */ + qm_dqrr_next(&p->p); + /* + * Entry processed and consumed, increment our counter. The + * callback can request that we exit after consuming the + * entry, and we also exit if we reach our processing limit, + * so loop back only if neither of these conditions is met. + */ + } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); + + return limit; +} + +void qman_p_irqsource_add(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_add); + +void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) +{ + unsigned long irqflags; + u32 ier; + + /* + * Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. + */ + local_irq_save(irqflags); + bits &= QM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + qm_out(&p->p, QM_REG_IER, p->irq_sources); + ier = qm_in(&p->p, QM_REG_IER); + /* + * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. + */ + qm_out(&p->p, QM_REG_ISR, ~ier); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_irqsource_remove); + +const cpumask_t *qman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(qman_affine_cpus); + +u16 qman_affine_channel(int cpu) +{ + if (cpu < 0) { + struct qman_portal *portal = get_affine_portal(); + + cpu = portal->config->cpu; + put_affine_portal(); + } + WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); + return affine_channels[cpu]; +} +EXPORT_SYMBOL(qman_affine_channel); + +struct qman_portal *qman_get_affine_portal(int cpu) +{ + return affine_portals[cpu]; +} +EXPORT_SYMBOL(qman_get_affine_portal); + +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) +{ + return __poll_portal_fast(p, limit); +} +EXPORT_SYMBOL(qman_p_poll_dqrr); + +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) +{ + unsigned long irqflags; + + local_irq_save(irqflags); + pools &= p->config->pools; + p->sdqcr |= pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + local_irq_restore(irqflags); +} +EXPORT_SYMBOL(qman_p_static_dequeue_add); + +/* Frame queue API */ + +static const char *mcr_result_str(u8 result) +{ + switch (result) { + case QM_MCR_RESULT_NULL: + return "QM_MCR_RESULT_NULL"; + case QM_MCR_RESULT_OK: + return "QM_MCR_RESULT_OK"; + case QM_MCR_RESULT_ERR_FQID: + return "QM_MCR_RESULT_ERR_FQID"; + case QM_MCR_RESULT_ERR_FQSTATE: + return "QM_MCR_RESULT_ERR_FQSTATE"; + case QM_MCR_RESULT_ERR_NOTEMPTY: + return "QM_MCR_RESULT_ERR_NOTEMPTY"; + case QM_MCR_RESULT_PENDING: + return "QM_MCR_RESULT_PENDING"; + case QM_MCR_RESULT_ERR_BADCOMMAND: + return "QM_MCR_RESULT_ERR_BADCOMMAND"; + } + return ""; +} + +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) +{ + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { + int ret = qman_alloc_fqid(&fqid); + + if (ret) + return ret; + } + fq->fqid = fqid; + fq->flags = flags; + fq->state = qman_fq_state_oos; + fq->cgr_groupid = 0; + + /* A context_b of 0 is allegedly special, so don't use that fqid */ + if (fqid == 0 || fqid >= num_fqids) { + WARN(1, "bad fqid %d\n", fqid); + return -EINVAL; + } + + fq->idx = fqid * 2; + if (flags & QMAN_FQ_FLAG_NO_MODIFY) + fq->idx++; + + WARN_ON(fq_table[fq->idx]); + fq_table[fq->idx] = fq; + + return 0; +} +EXPORT_SYMBOL(qman_create_fq); + +void qman_destroy_fq(struct qman_fq *fq) +{ + /* + * We don't need to lock the FQ as it is a pre-condition that the FQ be + * quiesced. Instead, run some checks. + */ + switch (fq->state) { + case qman_fq_state_parked: + case qman_fq_state_oos: + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) + qman_release_fqid(fq->fqid); + + DPAA_ASSERT(fq_table[fq->idx]); + fq_table[fq->idx] = NULL; + return; + default: + break; + } + DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); +} +EXPORT_SYMBOL(qman_destroy_fq); + +u32 qman_fq_fqid(struct qman_fq *fq) +{ + return fq->fqid; +} +EXPORT_SYMBOL(qman_fq_fqid); + +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + u8 res, myverb; + int ret = 0; + + myverb = (flags & QMAN_INITFQ_FLAG_SCHED) + ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; + + if (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + /* And can't be set at the same time as TDTHRESH */ + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + return -EINVAL; + } + /* Issue an INITFQ_[PARKED|SCHED] management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + (fq->state != qman_fq_state_oos && + fq->state != qman_fq_state_parked)) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initfq = *opts; + mcc->initfq.fqid = fq->fqid; + mcc->initfq.count = 0; + /* + * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * demux pointer. Otherwise, the caller-provided value is allowed to + * stand, don't overwrite it. + */ + if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { + dma_addr_t phys_fq; + + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; + mcc->initfq.fqd.context_b = fq_to_tag(fq); + /* + * and the physical address - NB, if the user wasn't trying to + * set CONTEXTA, clear the stashing settings. + */ + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + memset(&mcc->initfq.fqd.context_a, 0, + sizeof(mcc->initfq.fqd.context_a)); + } else { + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), + DMA_TO_DEVICE); + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); + } + } + if (flags & QMAN_INITFQ_FLAG_LOCAL) { + int wq = 0; + + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + wq = 4; + } + qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); + } + qm_mc_commit(&p->p, myverb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "MCR timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + if (opts) { + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + fq_set(fq, QMAN_FQ_STATE_CGR_EN); + else + fq_clear(fq, QMAN_FQ_STATE_CGR_EN); + } + if (opts->we_mask & QM_INITFQ_WE_CGID) + fq->cgr_groupid = opts->fqd.cgid; + } + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? + qman_fq_state_sched : qman_fq_state_parked; + +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_init_fq); + +int qman_schedule_fq(struct qman_fq *fq) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; + + if (fq->state != qman_fq_state_parked) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + /* Issue a ALTERFQ_SCHED management command */ + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state != qman_fq_state_parked) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(p->config->dev, "ALTER_SCHED timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_sched; +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_schedule_fq); + +int qman_retire_fq(struct qman_fq *fq, u32 *flags) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret; + u8 res; + + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_sched) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || + fq->state == qman_fq_state_retired || + fq->state == qman_fq_state_oos) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; + /* + * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, + * and defer the flags until FQRNI or FQRN (respectively) show up. But + * "Friendly" is to process OK immediately, and not set CHANGING. We do + * friendly, otherwise the caller doesn't necessarily have a fully + * "retired" FQ on return even if the retirement was immediate. However + * this does mean some code duplication between here and + * fq_state_change(). + */ + if (res == QM_MCR_RESULT_OK) { + ret = 0; + /* Process 'fq' right away, we'll ignore FQRNI */ + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + if (flags) + *flags = fq->flags; + fq->state = qman_fq_state_retired; + if (fq->cb.fqs) { + /* + * Another issue with supporting "immediate" retirement + * is that we're forced to drop FQRNIs, because by the + * time they're seen it may already be "too late" (the + * fq may have been OOS'd and free()'d already). But if + * the upper layer wants a callback whether it's + * immediate or not, we have to fake a "MR" entry to + * look like an FQRNI... + */ + union qm_mr_entry msg; + + msg.verb = QM_MR_VERB_FQRNI; + msg.fq.fqs = mcr->alterfq.fqs; + msg.fq.fqid = fq->fqid; + msg.fq.contextB = fq_to_tag(fq); + fq->cb.fqs(p, fq, &msg); + } + } else if (res == QM_MCR_RESULT_PENDING) { + ret = 1; + fq_set(fq, QMAN_FQ_STATE_CHANGING); + } else { + ret = -EIO; + } +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_retire_fq); + +int qman_oos_fq(struct qman_fq *fq) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p; + int ret = 0; + + if (fq->state != qman_fq_state_retired) + return -EINVAL; +#ifdef CONFIG_FSL_DPAA_CHECKING + if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) + return -EINVAL; +#endif + p = get_affine_portal(); + if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || + fq->state != qman_fq_state_retired) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_oos; +out: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_oos_fq); + +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + if (mcr->result == QM_MCR_RESULT_OK) + *fqd = mcr->queryfq.fqd; + else + ret = -EIO; +out: + put_affine_portal(); + return ret; +} + +static int qman_query_fq_np(struct qman_fq *fq, + struct qm_mcr_queryfq_np *np) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + if (mcr->result == QM_MCR_RESULT_OK) + *np = mcr->queryfq_np; + else if (mcr->result == QM_MCR_RESULT_ERR_FQID) + ret = -ERANGE; + else + ret = -EIO; +out: + put_affine_portal(); + return ret; +} + +static int qman_query_cgr(struct qman_cgr *cgr, + struct qm_mcr_querycgr *cgrd) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + int ret = 0; + + mcc = qm_mc_start(&p->p); + mcc->querycgr.cgid = cgr->cgrid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); + if (mcr->result == QM_MCR_RESULT_OK) + *cgrd = mcr->querycgr; + else { + dev_err(p->config->dev, "QUERY_CGR failed: %s\n", + mcr_result_str(mcr->result)); + ret = -EIO; + } +out: + put_affine_portal(); + return ret; +} + +int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) +{ + struct qm_mcr_querycgr query_cgr; + int err; + + err = qman_query_cgr(cgr, &query_cgr); + if (err) + return err; + + *result = !!query_cgr.cgr.cs; + return 0; +} +EXPORT_SYMBOL(qman_query_cgr_congested); + +/* internal function used as a wait_event() expression */ +static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) +{ + unsigned long irqflags; + int ret = -EBUSY; + + local_irq_save(irqflags); + if (p->vdqcr_owned) + goto out; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + goto out; + + fq_set(fq, QMAN_FQ_STATE_VDQCR); + p->vdqcr_owned = fq; + qm_dqrr_vdqcr_set(&p->p, vdqcr); + ret = 0; +out: + local_irq_restore(irqflags); + return ret; +} + +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) +{ + int ret; + + *p = get_affine_portal(); + ret = set_p_vdqcr(*p, fq, vdqcr); + put_affine_portal(); + return ret; +} + +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, + u32 vdqcr, u32 flags) +{ + int ret = 0; + + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + ret = wait_event_interruptible(affine_queue, + !set_vdqcr(p, fq, vdqcr)); + else + wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); + return ret; +} + +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) +{ + struct qman_portal *p; + int ret; + + if (fq->state != qman_fq_state_parked && + fq->state != qman_fq_state_retired) + return -EINVAL; + if (vdqcr & QM_VDQCR_FQID_MASK) + return -EINVAL; + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + return -EBUSY; + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; + if (flags & QMAN_VOLATILE_FLAG_WAIT) + ret = wait_vdqcr_start(&p, fq, vdqcr, flags); + else + ret = set_vdqcr(&p, fq, vdqcr); + if (ret) + return ret; + /* VDQCR is set */ + if (flags & QMAN_VOLATILE_FLAG_FINISH) { + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + /* + * NB: don't propagate any error - the caller wouldn't + * know whether the VDQCR was issued or not. A signal + * could arrive after returning anyway, so the caller + * can check signal_pending() if that's an issue. + */ + wait_event_interruptible(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + else + wait_event(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + } + return 0; +} +EXPORT_SYMBOL(qman_volatile_dequeue); + +static void update_eqcr_ci(struct qman_portal *p, u8 avail) +{ + if (avail) + qm_eqcr_cce_prefetch(&p->p); + else + qm_eqcr_cce_update(&p->p); +} + +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags; + u8 avail; + + p = get_affine_portal(); + local_irq_save(irqflags); + + if (p->use_eqcr_ci_stashing) { + /* + * The stashing case is easy, only update if we need to in + * order to try and liberate ring entries. + */ + eq = qm_eqcr_start_stash(&p->p); + } else { + /* + * The non-stashing case is harder, need to prefetch ahead of + * time. + */ + avail = qm_eqcr_get_avail(&p->p); + if (avail < 2) + update_eqcr_ci(p, avail); + eq = qm_eqcr_start_no_stash(&p->p); + } + + if (unlikely(!eq)) + goto out; + + eq->fqid = fq->fqid; + eq->tag = fq_to_tag(fq); + eq->fd = *fd; + + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); +out: + local_irq_restore(irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(qman_enqueue); + +static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + union qm_mc_command *mcc; + union qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + u8 verb = QM_MCC_VERB_MODIFYCGR; + int ret = 0; + + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initcgr = *opts; + mcc->initcgr.cgid = cgr->cgrid; + if (flags & QMAN_CGR_FLAG_USE_INIT) + verb = QM_MCC_VERB_INITCGR; + qm_mc_commit(&p->p, verb); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); + if (mcr->result != QM_MCR_RESULT_OK) + ret = -EIO; + +out: + put_affine_portal(); + return ret; +} + +#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) +#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) + +static u8 qman_cgr_cpus[CGR_NUM]; + +void qman_init_cgr_all(void) +{ + struct qman_cgr cgr; + int err_cnt = 0; + + for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { + if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) + err_cnt++; + } + + if (err_cnt) + pr_err("Warning: %d error%s while initialising CGR h/w\n", + err_cnt, (err_cnt > 1) ? "s" : ""); +} + +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts = {}; + int ret; + struct qman_portal *p; + + /* + * We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. + */ + if (cgr->cgrid >= CGR_NUM) + return -EINVAL; + + preempt_disable(); + p = get_affine_portal(); + qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); + preempt_enable(); + + cgr->chan = p->config->channel; + spin_lock(&p->cgr_lock); + + if (opts) { + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) + goto out; + if (opts) + local_opts = *opts; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = + QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); + else + /* Overwrite TARG */ + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | + TARG_MASK(p); + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + /* send init if flags indicate so */ + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, + &local_opts); + else + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + goto out; + } + + list_add(&cgr->node, &p->cgr_cbs); + + /* Determine if newly added object requires its callback to be called */ + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* we can't go back, so proceed and return success */ + dev_err(p->config->dev, "CGR HW state partially modified\n"); + ret = 0; + goto out; + } + if (cgr->cb && cgr_state.cgr.cscn_en && + qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) + cgr->cb(p, cgr, 1); +out: + spin_unlock(&p->cgr_lock); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_create_cgr); + +int qman_delete_cgr(struct qman_cgr *cgr) +{ + unsigned long irqflags; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret = 0; + struct qman_cgr *i; + struct qman_portal *p = get_affine_portal(); + + if (cgr->chan != p->config->channel) { + /* attempt to delete from other portal than creator */ + dev_err(p->config->dev, "CGR not owned by current portal"); + dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", + cgr->chan, p->config->channel); + + ret = -EINVAL; + goto put_portal; + } + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* + * If there are no other CGR objects for this CGRID in the list, + * update CSCN_TARG accordingly + */ + list_for_each_entry(i, &p->cgr_cbs, node) + if (i->cgrid == cgr->cgrid && i->cb) + goto release_lock; + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); + goto release_lock; + } + /* Overwrite TARG */ + local_opts.we_mask = QM_CGR_WE_CSCN_TARG; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); + else + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & + ~(TARG_MASK(p)); + ret = qm_modify_cgr(cgr, 0, &local_opts); + if (ret) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); +put_portal: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_delete_cgr); + +struct cgr_comp { + struct qman_cgr *cgr; + struct completion completion; +}; + +static int qman_delete_cgr_thread(void *p) +{ + struct cgr_comp *cgr_comp = (struct cgr_comp *)p; + int ret; + + ret = qman_delete_cgr(cgr_comp->cgr); + complete(&cgr_comp->completion); + + return ret; +} + +void qman_delete_cgr_safe(struct qman_cgr *cgr) +{ + struct task_struct *thread; + struct cgr_comp cgr_comp; + + preempt_disable(); + if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { + init_completion(&cgr_comp.completion); + cgr_comp.cgr = cgr; + thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, + "cgr_del"); + + if (IS_ERR(thread)) + goto out; + + kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); + wake_up_process(thread); + wait_for_completion(&cgr_comp.completion); + preempt_enable(); + return; + } +out: + qman_delete_cgr(cgr); + preempt_enable(); +} +EXPORT_SYMBOL(qman_delete_cgr_safe); + +/* Cleanup FQs */ + +static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) +{ + const union qm_mr_entry *msg; + int found = 0; + + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + while (msg) { + if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) + found = 1; + qm_mr_next(p); + qm_mr_cci_consume_to_current(p); + qm_mr_pvb_update(p); + msg = qm_mr_current(p); + } + return found; +} + +static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, + bool wait) +{ + const struct qm_dqrr_entry *dqrr; + int found = 0; + + do { + qm_dqrr_pvb_update(p); + dqrr = qm_dqrr_current(p); + if (!dqrr) + cpu_relax(); + } while (wait && !dqrr); + + while (dqrr) { + if (dqrr->fqid == fqid && (dqrr->stat & s)) + found = 1; + qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); + qm_dqrr_pvb_update(p); + qm_dqrr_next(p); + dqrr = qm_dqrr_current(p); + } + return found; +} + +#define qm_mr_drain(p, V) \ + _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) + +#define qm_dqrr_drain(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) + +#define qm_dqrr_drain_wait(p, f, S) \ + _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) + +#define qm_dqrr_drain_nomatch(p) \ + _qm_dqrr_consume_and_match(p, 0, 0, false) + +static int qman_shutdown_fq(u32 fqid) +{ + struct qman_portal *p; + struct device *dev; + union qm_mc_command *mcc; + union qm_mc_result *mcr; + int orl_empty, drain = 0, ret = 0; + u32 channel, wq, res; + u8 state; + + p = get_affine_portal(); + dev = p->config->dev; + /* Determine the state of the FQID */ + mcc = qm_mc_start(&p->p); + mcc->queryfq_np.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; + if (state == QM_MCR_NP_STATE_OOS) + goto out; /* Already OOS, no need to do anymore checks */ + + /* Query which channel the FQ is using */ + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + /* Need to store these since the MCR gets reused */ + channel = qm_fqd_get_chan(&mcr->queryfq.fqd); + wq = qm_fqd_get_wq(&mcr->queryfq.fqd); + + switch (state) { + case QM_MCR_NP_STATE_TEN_SCHED: + case QM_MCR_NP_STATE_TRU_SCHED: + case QM_MCR_NP_STATE_ACTIVE: + case QM_MCR_NP_STATE_PARKED: + orl_empty = 0; + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + dev_err(dev, "QUERYFQ_NP timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; /* Make a copy as we reuse MCR below */ + + if (res == QM_MCR_RESULT_PENDING) { + /* + * Need to wait for the FQRN in the message ring, which + * will only occur once the FQ has been drained. In + * order for the FQ to drain the portal needs to be set + * to dequeue from the channel the FQ is scheduled on + */ + int found_fqrn = 0; + u16 dequeue_wq = 0; + + /* Flag that we need to drain FQ */ + drain = 1; + + if (channel >= qm_channel_pool1 && + channel < qm_channel_pool1 + 15) { + /* Pool channel, enable the bit in the portal */ + dequeue_wq = (channel - + qm_channel_pool1 + 1)<<4 | wq; + } else if (channel < qm_channel_pool1) { + /* Dedicated channel */ + dequeue_wq = wq; + } else { + dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", + fqid, channel); + ret = -EBUSY; + goto out; + } + /* Set the sdqcr to drain this channel */ + if (channel < qm_channel_pool1) + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_DEDICATED); + else + qm_dqrr_sdqcr_set(&p->p, + QM_SDQCR_TYPE_ACTIVE | + QM_SDQCR_CHANNELS_POOL_CONV + (channel)); + do { + /* Keep draining DQRR while checking the MR*/ + qm_dqrr_drain_nomatch(&p->p); + /* Process message ring too */ + found_fqrn = qm_mr_drain(&p->p, FQRN); + cpu_relax(); + } while (!found_fqrn); + + } + if (res != QM_MCR_RESULT_OK && + res != QM_MCR_RESULT_PENDING) { + dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", + fqid, res); + ret = -EIO; + goto out; + } + if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { + /* + * ORL had no entries, no need to wait until the + * ERNs come in + */ + orl_empty = 1; + } + /* + * Retirement succeeded, check to see if FQ needs + * to be drained + */ + if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { + /* FQ is Not Empty, drain using volatile DQ commands */ + do { + u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); + + qm_dqrr_vdqcr_set(&p->p, vdqcr); + /* + * Wait for a dequeue and process the dequeues, + * making sure to empty the ring completely + */ + } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); + } + qm_dqrr_sdqcr_set(&p->p, 0); + + while (!orl_empty) { + /* Wait for the ORL to have been completely drained */ + orl_empty = qm_mr_drain(&p->p, FQRL); + cpu_relax(); + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) { + dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_RETIRED: + /* Send OOS Command */ + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + if (!qm_mc_result_timeout(&p->p, &mcr)) { + ret = -ETIMEDOUT; + goto out; + } + + DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_OOS); + if (mcr->result) { + dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", + fqid, mcr->result); + ret = -EIO; + goto out; + } + break; + + case QM_MCR_NP_STATE_OOS: + /* Done */ + break; + + default: + ret = -EIO; + } + +out: + put_affine_portal(); + return ret; +} + +const struct qm_portal_config *qman_get_qm_portal_config( + struct qman_portal *portal) +{ + return portal->config; +} + +struct gen_pool *qm_fqalloc; /* FQID allocator */ +struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +struct gen_pool *qm_cgralloc; /* CGR ID allocator */ + +static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) +{ + unsigned long addr; + + addr = gen_pool_alloc(p, cnt); + if (!addr) + return -ENOMEM; + + *result = addr & ~DPAA_GENALLOC_OFF; + + return 0; +} + +int qman_alloc_fqid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_fqalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_fqid_range); + +int qman_alloc_pool_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_qpalloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_pool_range); + +int qman_alloc_cgrid_range(u32 *result, u32 count) +{ + return qman_alloc_range(qm_cgralloc, result, count); +} +EXPORT_SYMBOL(qman_alloc_cgrid_range); + +int qman_release_fqid(u32 fqid) +{ + int ret = qman_shutdown_fq(fqid); + + if (ret) { + pr_debug("FQID %d leaked\n", fqid); + return ret; + } + + gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_fqid); + +static int qpool_cleanup(u32 qp) +{ + /* + * We query all FQDs starting from + * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs + * whose destination channel is the pool-channel being released. + * When a non-OOS FQD is found we attempt to clean it up + */ + struct qman_fq fq = { + .fqid = QM_FQID_RANGE_START + }; + int err; + + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err) + /* FQID range exceeded, found no problems */ + return 0; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return 0; + if (qm_fqd_get_chan(&fqd) == qp) { + /* The channel is the FQ's target, clean it */ + err = qman_shutdown_fq(fq.fqid); + if (err) + /* + * Couldn't shut down the FQ + * so the pool must be leaked + */ + return err; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} + +int qman_release_pool(u32 qp) +{ + int ret; + + ret = qpool_cleanup(qp); + if (ret) { + pr_debug("CHID %d leaked\n", qp); + return ret; + } + + gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_pool); + +static int cgr_cleanup(u32 cgrid) +{ + /* + * query all FQDs starting from FQID 1 until we get an "invalid FQID" + * error, looking for non-OOS FQDs whose CGR is the CGR being released + */ + struct qman_fq fq = { + .fqid = 1 + }; + int err; + + do { + struct qm_mcr_queryfq_np np; + + err = qman_query_fq_np(&fq, &np); + if (err) + /* FQID range exceeded, found no problems */ + return 0; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { + struct qm_fqd fqd; + + err = qman_query_fq(&fq, &fqd); + if (WARN_ON(err)) + return 0; + if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && + fqd.cgid == cgrid) { + pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", + cgrid, fq.fqid); + return -EIO; + } + } + /* Move to the next FQID */ + fq.fqid++; + } while (1); +} + +int qman_release_cgrid(u32 cgrid) +{ + int ret; + + ret = cgr_cleanup(cgrid); + if (ret) { + pr_debug("CGRID %d leaked\n", cgrid); + return ret; + } + + gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); + return 0; +} +EXPORT_SYMBOL(qman_release_cgrid); diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c new file mode 100644 index 000000000000..0cace9e0077e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -0,0 +1,808 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +u16 qman_ip_rev; +EXPORT_SYMBOL(qman_ip_rev); +u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; +EXPORT_SYMBOL(qm_channel_pool1); + +/* Register offsets */ +#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) +#define REG_DD_CFG 0x0200 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) +#define REG_PFDR_FPC 0x0400 +#define REG_PFDR_FP_HEAD 0x0404 +#define REG_PFDR_FP_TAIL 0x0408 +#define REG_PFDR_FP_LWIT 0x0410 +#define REG_PFDR_CFG 0x0414 +#define REG_SFDR_CFG 0x0500 +#define REG_SFDR_IN_USE 0x0504 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) +#define REG_WQ_DEF_ENC_WQID 0x0630 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ +#define REG_CM_CFG 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_ECIR2 0x0a0c +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_MCR 0x0b00 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) +#define REG_MISC_CFG 0x0be0 +#define REG_HID_CFG 0x0bf0 +#define REG_IDLE_STAT 0x0bf4 +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FQD_BARE 0x0c00 +#define REG_PFDR_BARE 0x0c20 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_QCSP_BARE 0x0c80 +#define REG_QCSP_BAR 0x0c84 +#define REG_CI_SCHED_CFG 0x0d00 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_CI_RLM_AVG 0x0d14 +#define REG_ERR_ISR 0x0e00 +#define REG_ERR_IER 0x0e04 +#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) +#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) +#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) + +/* Assists for QMAN_MCR */ +#define MCR_INIT_PFDR 0x01000000 +#define MCR_get_rslt(v) (u8)((v) >> 24) +#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0)) +#define MCR_rslt_ok(r) ((r) == 0xf0) +#define MCR_rslt_eaccess(r) ((r) == 0xf8) +#define MCR_rslt_inval(r) ((r) == 0xff) + +/* + * Corenet initiator settings. Stash request queues are 4-deep to match cores + * ability to snarf. Stash priority is 3, other priorities are 2. + */ +#define QM_CI_SCHED_CFG_SRCCIV 4 +#define QM_CI_SCHED_CFG_SRQ_W 3 +#define QM_CI_SCHED_CFG_RW_W 2 +#define QM_CI_SCHED_CFG_BMAN_W 2 +/* write SRCCIV enable */ +#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31) + +/* Follows WQ_CS_CFG0-5 */ +enum qm_wq_class { + qm_wq_portal = 0, + qm_wq_pool = 1, + qm_wq_fman0 = 2, + qm_wq_fman1 = 3, + qm_wq_caam = 4, + qm_wq_pme = 5, + qm_wq_first = qm_wq_portal, + qm_wq_last = qm_wq_pme +}; + +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ +enum qm_memory { + qm_memory_fqd, + qm_memory_pfdr +}; + +/* Used by all error interrupt registers except 'inhibit' */ +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ +#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */ +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ +#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */ +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ + +/* QMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ + QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI) +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \ + QM_EIRQ_IFSI) + +struct qm_ecir { + u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */ +}; + +static bool qm_ecir_is_dcp(const struct qm_ecir *p) +{ + return p->info & BIT(29); +} + +static int qm_ecir_get_pnum(const struct qm_ecir *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_ecir_get_fqid(const struct qm_ecir *p) +{ + return p->info & (BIT(24) - 1); +} + +struct qm_ecir2 { + u32 info; /* ptyp[31], res[10-30], pnum[0-9] */ +}; + +static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p) +{ + return p->info & BIT(31); +} + +static int qm_ecir2_get_pnum(const struct qm_ecir2 *p) +{ + return p->info & (BIT(10) - 1); +} + +struct qm_eadr { + u32 info; /* memid[24-27], eadr[0-11] */ + /* v3: memid[24-28], eadr[0-15] */ +}; + +static int qm_eadr_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0xf; +} + +static int qm_eadr_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(12) - 1); +} + +static int qm_eadr_v3_get_memid(const struct qm_eadr *p) +{ + return (p->info >> 24) & 0x1f; +} + +static int qm_eadr_v3_get_eadr(const struct qm_eadr *p) +{ + return p->info & (BIT(16) - 1); +} + +struct qman_hwerr_txt { + u32 mask; + const char *txt; +}; + + +static const struct qman_hwerr_txt qman_hwerr_txts[] = { + { QM_EIRQ_CIDE, "Corenet Initiator Data Error" }, + { QM_EIRQ_CTDE, "Corenet Target Data Error" }, + { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" }, + { QM_EIRQ_PLWI, "PFDR Low Watermark" }, + { QM_EIRQ_MBEI, "Multi-bit ECC Error" }, + { QM_EIRQ_SBEI, "Single-bit ECC Error" }, + { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" }, + { QM_EIRQ_ICVI, "Invalid Command Verb" }, + { QM_EIRQ_IFSI, "Invalid Flow Control State" }, + { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" }, + { QM_EIRQ_IDFI, "Invalid Dequeue FQ" }, + { QM_EIRQ_IDSI, "Invalid Dequeue Source" }, + { QM_EIRQ_IDQI, "Invalid Dequeue Queue" }, + { QM_EIRQ_IECE, "Invalid Enqueue Configuration" }, + { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" }, + { QM_EIRQ_IESI, "Invalid Enqueue State" }, + { QM_EIRQ_IECI, "Invalid Enqueue Channel" }, + { QM_EIRQ_IEQI, "Invalid Enqueue Queue" }, +}; + +struct qman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +static const struct qman_error_info_mdata error_mdata[] = { + { 0x01FF, 24, "FQD cache tag memory 0" }, + { 0x01FF, 24, "FQD cache tag memory 1" }, + { 0x01FF, 24, "FQD cache tag memory 2" }, + { 0x01FF, 24, "FQD cache tag memory 3" }, + { 0x0FFF, 512, "FQD cache memory" }, + { 0x07FF, 128, "SFDR memory" }, + { 0x01FF, 72, "WQ context memory" }, + { 0x00FF, 240, "CGR memory" }, + { 0x00FF, 302, "Internal Order Restoration List memory" }, + { 0x01FF, 256, "SW portal ring memory" }, +}; + +#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) + +/* + * TODO: unimplemented registers + * + * Keeping a list here of QMan registers I have not yet covered; + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 + */ + +/* Pointer to the start of the QMan's CCSR space */ +static u32 __iomem *qm_ccsr_start; +/* A SDQCR mask comprising all the available/visible pool channels */ +static u32 qm_pools_sdqcr; + +static inline u32 qm_ccsr_in(u32 offset) +{ + return ioread32be(qm_ccsr_start + offset/4); +} + +static inline void qm_ccsr_out(u32 offset, u32 val) +{ + iowrite32be(val, qm_ccsr_start + offset/4); +} + +u32 qm_get_pools_sdqcr(void) +{ + return qm_pools_sdqcr; +} + +enum qm_dc_portal { + qm_dc_portal_fman0 = 0, + qm_dc_portal_fman1 = 1 +}; + +static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd) +{ + DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 || + portal == qm_dc_portal_fman1); + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x1000 : 0) | (sernd & 0x3ff)); + else + qm_ccsr_out(REG_DCP_CFG(portal), + (ed ? 0x100 : 0) | (sernd & 0x1f)); +} + +static void qm_set_wq_scheduling(enum qm_wq_class wq_class, + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, + u8 csw5, u8 csw6, u8 csw7) +{ + qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | + ((csw6 & 0x7) << 4) | (csw7 & 0x7)); +} + +static void qm_set_hid(void) +{ + qm_ccsr_out(REG_HID_CFG, 0); +} + +static void qm_set_corenet_initiator(void) +{ + qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN | + (QM_CI_SCHED_CFG_SRCCIV << 24) | + (QM_CI_SCHED_CFG_SRQ_W << 8) | + (QM_CI_SCHED_CFG_RW_W << 4) | + QM_CI_SCHED_CFG_BMAN_W); +} + +static void qm_get_version(u16 *id, u8 *major, u8 *minor) +{ + u32 v = qm_ccsr_in(REG_IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +#define PFDR_AR_EN BIT(31) +static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) +{ + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; + u32 exp = ilog2(size); + + /* choke if size isn't within range */ + DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && + is_power_of_2(size)); + /* choke if 'ba' has lower-alignment than 'size' */ + DPAA_ASSERT(!(ba & (size - 1))); + qm_ccsr_out(offset, upper_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); + qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); +} + +static void qm_set_pfdr_threshold(u32 th, u8 k) +{ + qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff); + qm_ccsr_out(REG_PFDR_CFG, k); +} + +static void qm_set_sfdr_threshold(u16 th) +{ + qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff); +} + +static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) +{ + u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + + DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); + /* Make sure the command interface is 'idle' */ + if (!MCR_rslt_idle(rslt)) { + dev_crit(dev, "QMAN_MCR isn't idle"); + WARN_ON(1); + } + + /* Write the MCR command params then the verb */ + qm_ccsr_out(REG_MCP(0), pfdr_start); + /* + * TODO: remove this - it's a workaround for a model bug that is + * corrected in more recent versions. We use the workaround until + * everyone has upgraded. + */ + qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16); + dma_wmb(); + qm_ccsr_out(REG_MCR, MCR_INIT_PFDR); + /* Poll for the result */ + do { + rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR)); + } while (!MCR_rslt_idle(rslt)); + if (MCR_rslt_ok(rslt)) + return 0; + if (MCR_rslt_eaccess(rslt)) + return -EACCES; + if (MCR_rslt_inval(rslt)) + return -EINVAL; + dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); + return -ENODEV; +} + +/* + * Ideally we would use the DMA API to turn rmem->base into a DMA address + * (especially if iommu translations ever get involved). Unfortunately, the + * DMA API currently does not allow mapping anything that is not backed with + * a struct page. + */ +static dma_addr_t fqd_a, pfdr_a; +static size_t fqd_sz, pfdr_sz; + +static int qman_fqd(struct reserved_mem *rmem) +{ + fqd_a = rmem->base; + fqd_sz = rmem->size; + + WARN_ON(!(fqd_a && fqd_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); + +static int qman_pfdr(struct reserved_mem *rmem) +{ + pfdr_a = rmem->base; + pfdr_sz = rmem->size; + + WARN_ON(!(pfdr_a && pfdr_sz)); + + return 0; +} +RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); + +static unsigned int qm_get_fqid_maxcnt(void) +{ + return fqd_sz / 64; +} + +/* + * Flush this memory range from data cache so that QMAN originated + * transactions for this memory region could be marked non-coherent. + */ +static int zero_priv_mem(struct device *dev, struct device_node *node, + phys_addr_t addr, size_t sz) +{ + /* map as cacheable, non-guarded */ + void __iomem *tmpp = ioremap_prot(addr, sz, 0); + + memset_io(tmpp, 0, sz); + flush_dcache_range((unsigned long)tmpp, + (unsigned long)tmpp + sz); + iounmap(tmpp); + + return 0; +} + +static void log_edata_bits(struct device *dev, u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + dev_warn(dev, "ErrInt, EDATA:\n"); + i = bit_count / 32; + if (bit_count % 32) { + i++; + mask = ~(mask << bit_count % 32); + } + j = 16 - i; + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask); + j++; + for (; j < 16; j++) + dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j))); +} + +static void log_additional_error_info(struct device *dev, u32 isr_val, + u32 ecsr_val) +{ + struct qm_ecir ecir_val; + struct qm_eadr eadr_val; + int memid; + + ecir_val.info = qm_ccsr_in(REG_ECIR); + /* Is portal info valid */ + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + struct qm_ecir2 ecir2_val; + + ecir2_val.info = qm_ccsr_in(REG_ECIR2); + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP", + qm_ecir2_get_pnum(&ecir2_val)); + } + if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_v3_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_v3_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } else { + if (ecsr_val & PORTAL_ECSR_ERR) { + dev_warn(dev, "ErrInt: %s id %d\n", + qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP", + qm_ecir_get_pnum(&ecir_val)); + } + if (ecsr_val & FQID_ECSR_ERR) + dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n", + qm_ecir_get_fqid(&ecir_val)); + + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.info = qm_ccsr_in(REG_EADR); + memid = qm_eadr_get_memid(&eadr_val); + dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[memid].txt, + error_mdata[memid].addr_mask + & qm_eadr_get_eadr(&eadr_val)); + log_edata_bits(dev, error_mdata[memid].bits); + } + } +} + +static irqreturn_t qman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + struct device *dev = ptr; + + ier_val = qm_ccsr_in(REG_ERR_IER); + isr_val = qm_ccsr_in(REG_ERR_ISR); + ecsr_val = qm_ccsr_in(REG_ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) { + if (qman_hwerr_txts[i].mask & isr_mask) { + dev_err_ratelimited(dev, "ErrInt: %s\n", + qman_hwerr_txts[i].txt); + if (qman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(dev, isr_mask, + ecsr_val); + /* Re-arm error capture registers */ + qm_ccsr_out(REG_ECSR, ecsr_val); + } + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) { + dev_dbg(dev, "Disabling error 0x%x\n", + qman_hwerr_txts[i].mask); + ier_val &= ~qman_hwerr_txts[i].mask; + qm_ccsr_out(REG_ERR_IER, ier_val); + } + } + } + qm_ccsr_out(REG_ERR_ISR, isr_val); + + return IRQ_HANDLED; +} + +static int qman_init_ccsr(struct device *dev) +{ + int i, err; + + /* FQD memory */ + qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); + /* PFDR memory */ + qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); + err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); + if (err) + return err; + /* thresholds */ + qm_set_pfdr_threshold(512, 64); + qm_set_sfdr_threshold(128); + /* clear stale PEBI bit from interrupt status register */ + qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI); + /* corenet initiator settings */ + qm_set_corenet_initiator(); + /* HID settings */ + qm_set_hid(); + /* Set scheduling weights to defaults */ + for (i = qm_wq_first; i <= qm_wq_last; i++) + qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0); + /* We are not prepared to accept ERNs for hardware enqueues */ + qm_set_dc(qm_dc_portal_fman0, 1, 0); + qm_set_dc(qm_dc_portal_fman1, 1, 0); + return 0; +} + +#define LIO_CFG_LIODN_MASK 0x0fff0000 +void qman_liodn_fixup(u16 channel) +{ + static int done; + static u32 liodn_offset; + u32 before, after; + int idx = channel - QM_CHANNEL_SWPORTAL0; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx)); + else + before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx)); + if (!done) { + liodn_offset = before & LIO_CFG_LIODN_MASK; + done = 1; + return; + } + after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset; + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) + qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after); + else + qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after); +} + +#define IO_CFG_SDEST_MASK 0x00ff0000 +void qman_set_sdest(u16 channel, unsigned int cpu_idx) +{ + int idx = channel - QM_CHANNEL_SWPORTAL0; + u32 before, after; + + if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) { + before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx)); + /* Each pair of vcpu share the same SRQ(SDEST) */ + cpu_idx /= 2; + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after); + } else { + before = qm_ccsr_in(REG_QCSP_IO_CFG(idx)); + after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16); + qm_ccsr_out(REG_QCSP_IO_CFG(idx), after); + } +} + +static int qman_resource_init(struct device *dev) +{ + int pool_chan_num, cgrid_num; + int ret, i; + + switch (qman_ip_rev >> 8) { + case 1: + pool_chan_num = 15; + cgrid_num = 256; + break; + case 2: + pool_chan_num = 3; + cgrid_num = 64; + break; + case 3: + pool_chan_num = 15; + cgrid_num = 256; + break; + default: + return -ENODEV; + } + + ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF, + pool_chan_num, -1); + if (ret) { + dev_err(dev, "Failed to seed pool channels (%d)\n", ret); + return ret; + } + + ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1); + if (ret) { + dev_err(dev, "Failed to seed CGRID range (%d)\n", ret); + return ret; + } + + /* parse pool channels into the SDQCR mask */ + for (i = 0; i < cgrid_num; i++) + qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i); + + ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF, + qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1); + if (ret) { + dev_err(dev, "Failed to seed FQID range (%d)\n", ret); + return ret; + } + + return 0; +} + +static int fsl_qman_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + int ret, err_irq; + u16 id; + u8 major, minor; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", + node->full_name); + return -ENXIO; + } + qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); + if (!qm_ccsr_start) + return -ENXIO; + + qm_get_version(&id, &major, &minor); + if (major == 1 && minor == 0) { + dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n"); + return -ENODEV; + } else if (major == 1 && minor == 1) + qman_ip_rev = QMAN_REV11; + else if (major == 1 && minor == 2) + qman_ip_rev = QMAN_REV12; + else if (major == 2 && minor == 0) + qman_ip_rev = QMAN_REV20; + else if (major == 3 && minor == 0) + qman_ip_rev = QMAN_REV30; + else if (major == 3 && minor == 1) + qman_ip_rev = QMAN_REV31; + else { + dev_err(dev, "Unknown QMan version\n"); + return -ENODEV; + } + + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) + qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; + + ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); + WARN_ON(ret); + if (ret) + return -ENODEV; + + ret = qman_init_ccsr(dev); + if (ret) { + dev_err(dev, "CCSR setup failed\n"); + return ret; + } + + err_irq = platform_get_irq(pdev, 0); + if (err_irq <= 0) { + dev_info(dev, "Can't get %s property 'interrupts'\n", + node->full_name); + return -ENODEV; + } + ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err", + dev); + if (ret) { + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", + ret, node->full_name); + return ret; + } + + /* + * Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). + */ + qm_ccsr_out(REG_ERR_ISR, 0xffffffff); + /* Enable Error Interrupts */ + qm_ccsr_out(REG_ERR_IER, 0xffffffff); + + qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc"); + if (IS_ERR(qm_fqalloc)) { + ret = PTR_ERR(qm_fqalloc); + dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc"); + if (IS_ERR(qm_qpalloc)) { + ret = PTR_ERR(qm_qpalloc); + dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret); + return ret; + } + + qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc"); + if (IS_ERR(qm_cgralloc)) { + ret = PTR_ERR(qm_cgralloc); + dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret); + return ret; + } + + ret = qman_resource_init(dev); + if (ret) + return ret; + + ret = qman_alloc_fq_table(qm_get_fqid_maxcnt()); + if (ret) + return ret; + + ret = qman_wq_alloc(); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id fsl_qman_ids[] = { + { + .compatible = "fsl,qman", + }, + {} +}; + +static struct platform_driver fsl_qman_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = fsl_qman_ids, + .suppress_bind_attrs = true, + }, + .probe = fsl_qman_probe, +}; + +builtin_platform_driver(fsl_qman_driver); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c new file mode 100644 index 000000000000..148614388fca --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -0,0 +1,355 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +/* Enable portal interupts (as opposed to polling mode) */ +#define CONFIG_FSL_DPA_PIRQ_SLOW 1 +#define CONFIG_FSL_DPA_PIRQ_FAST 1 + +static struct cpumask portal_cpus; +/* protect qman global registers and global data shared among portals */ +static DEFINE_SPINLOCK(qman_lock); + +static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) +{ +#ifdef CONFIG_FSL_PAMU + struct device *dev = pcfg->dev; + int window_count = 1; + struct iommu_domain_geometry geom_attr; + struct pamu_stash_attribute stash_attr; + int ret; + + pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); + if (!pcfg->iommu_domain) { + dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); + goto no_iommu; + } + geom_attr.aperture_start = 0; + geom_attr.aperture_end = + ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1; + geom_attr.force_aperture = true; + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY, + &geom_attr); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; + } + ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, + &window_count); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_domain_free; + } + stash_attr.cpu = cpu; + stash_attr.cache = PAMU_ATTR_CACHE_L1; + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_STASH, + &stash_attr); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", + __func__, ret); + goto out_domain_free; + } + ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, + IOMMU_READ | IOMMU_WRITE); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_window_enable() = %d", + __func__, ret); + goto out_domain_free; + } + ret = iommu_attach_device(pcfg->iommu_domain, dev); + if (ret < 0) { + dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, + ret); + goto out_domain_free; + } + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_ENABLE, + &window_count); + if (ret < 0) { + dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, + ret); + goto out_detach_device; + } + +no_iommu: +#endif + qman_set_sdest(pcfg->channel, cpu); + + return; + +#ifdef CONFIG_FSL_PAMU +out_detach_device: + iommu_detach_device(pcfg->iommu_domain, NULL); +out_domain_free: + iommu_domain_free(pcfg->iommu_domain); + pcfg->iommu_domain = NULL; +#endif +} + +static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) +{ + struct qman_portal *p; + u32 irq_sources = 0; + + /* We need the same LIODN offset for all portals */ + qman_liodn_fixup(pcfg->channel); + + pcfg->iommu_domain = NULL; + portal_set_cpu(pcfg, pcfg->cpu); + + p = qman_create_affine_portal(pcfg, NULL); + if (!p) { + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", + __func__, pcfg->cpu); + return NULL; + } + + /* Determine what should be interrupt-vs-poll driven */ +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI | + QM_PIRQ_CSCI; +#endif +#ifdef CONFIG_FSL_DPA_PIRQ_FAST + irq_sources |= QM_PIRQ_DQRI; +#endif + qman_p_irqsource_add(p, irq_sources); + + spin_lock(&qman_lock); + if (cpumask_equal(&portal_cpus, cpu_possible_mask)) { + /* all assigned portals are initialized now */ + qman_init_cgr_all(); + } + spin_unlock(&qman_lock); + + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); + + return p; +} + +static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, + unsigned int cpu) +{ +#ifdef CONFIG_FSL_PAMU /* TODO */ + struct pamu_stash_attribute stash_attr; + int ret; + + if (pcfg->iommu_domain) { + stash_attr.cpu = cpu; + stash_attr.cache = PAMU_ATTR_CACHE_L1; + ret = iommu_domain_set_attr(pcfg->iommu_domain, + DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); + if (ret < 0) { + dev_err(pcfg->dev, + "Failed to update pamu stash setting\n"); + return; + } + } +#endif + qman_set_sdest(pcfg->channel, cpu); +} + +static void qman_offline_cpu(unsigned int cpu) +{ + struct qman_portal *p; + const struct qm_portal_config *pcfg; + + p = affine_portals[cpu]; + if (p) { + pcfg = qman_get_qm_portal_config(p); + if (pcfg) { + irq_set_affinity(pcfg->irq, cpumask_of(0)); + qman_portal_update_sdest(pcfg, 0); + } + } +} + +static void qman_online_cpu(unsigned int cpu) +{ + struct qman_portal *p; + const struct qm_portal_config *pcfg; + + p = affine_portals[cpu]; + if (p) { + pcfg = qman_get_qm_portal_config(p); + if (pcfg) { + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); + qman_portal_update_sdest(pcfg, cpu); + } + } +} + +static int qman_hotplug_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + qman_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + qman_offline_cpu(cpu); + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block qman_hotplug_cpu_notifier = { + .notifier_call = qman_hotplug_cpu_callback, +}; + +static int qman_portal_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct qm_portal_config *pcfg; + struct resource *addr_phys[2]; + const u32 *channel; + void __iomem *va; + int irq, len, cpu; + + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) + return -ENOMEM; + + pcfg->dev = dev; + + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CE); + if (!addr_phys[0]) { + dev_err(dev, "Can't get %s property 'reg::CE'\n", + node->full_name); + return -ENXIO; + } + + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, + DPAA_PORTAL_CI); + if (!addr_phys[1]) { + dev_err(dev, "Can't get %s property 'reg::CI'\n", + node->full_name); + return -ENXIO; + } + + channel = of_get_property(node, "cell-index", &len); + if (!channel || (len != 4)) { + dev_err(dev, "Can't get %s property 'cell-index'\n", + node->full_name); + return -ENXIO; + } + pcfg->channel = *channel; + pcfg->cpu = -1; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Can't get %s IRQ\n", node->full_name); + return -ENXIO; + } + pcfg->irq = irq; + + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); + if (!va) + goto err_ioremap1; + + pcfg->addr_virt[DPAA_PORTAL_CE] = va; + + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + if (!va) + goto err_ioremap2; + + pcfg->addr_virt[DPAA_PORTAL_CI] = va; + + pcfg->pools = qm_get_pools_sdqcr(); + + spin_lock(&qman_lock); + cpu = cpumask_next_zero(-1, &portal_cpus); + if (cpu >= nr_cpu_ids) { + /* unassigned portal, skip init */ + spin_unlock(&qman_lock); + return 0; + } + + cpumask_set_cpu(cpu, &portal_cpus); + spin_unlock(&qman_lock); + pcfg->cpu = cpu; + + if (!init_pcfg(pcfg)) + goto err_ioremap2; + + /* clear irq affinity if assigned cpu is offline */ + if (!cpu_online(cpu)) + qman_offline_cpu(cpu); + + return 0; + +err_ioremap2: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); +err_ioremap1: + dev_err(dev, "ioremap failed\n"); + return -ENXIO; +} + +static const struct of_device_id qman_portal_ids[] = { + { + .compatible = "fsl,qman-portal", + }, + {} +}; +MODULE_DEVICE_TABLE(of, qman_portal_ids); + +static struct platform_driver qman_portal_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = qman_portal_ids, + }, + .probe = qman_portal_probe, +}; + +static int __init qman_portal_driver_register(struct platform_driver *drv) +{ + int ret; + + ret = platform_driver_register(drv); + if (ret < 0) + return ret; + + register_hotcpu_notifier(&qman_hotplug_cpu_notifier); + + return 0; +} + +module_driver(qman_portal_driver, + qman_portal_driver_register, platform_driver_unregister); diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h new file mode 100644 index 000000000000..5cf821e623a9 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -0,0 +1,371 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "dpaa_sys.h" + +#include +#include + +#if defined(CONFIG_FSL_PAMU) +#include +#endif + +struct qm_mcr_querywq { + u8 verb; + u8 result; + u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ + u8 __reserved[28]; + u32 wq_len[8]; +} __packed; + +static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) +{ + return wq->channel_wq >> 3; +} + +struct __qm_mcr_querycongestion { + u32 state[8]; +}; + +/* "Query Congestion Group State" */ +struct qm_mcr_querycongestion { + u8 verb; + u8 result; + u8 __reserved[30]; + /* Access this struct using qman_cgrs_get() */ + struct __qm_mcr_querycongestion state; +} __packed; + +/* "Query CGR" */ +struct qm_mcr_querycgr { + u8 verb; + u8 result; + u16 __reserved1; + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[6]; + u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ + u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + u8 __reserved3[3]; + u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ + u32 a_bcnt_lo; /* low 32-bits of 40-bit */ + u32 cscn_targ_swp[4]; +} __packed; + +static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; +} +static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) +{ + return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; +} + +/* "Query FQ Non-Programmable Fields" */ +struct qm_mcc_queryfq_np { + u8 _ncw_verb; + u8 __reserved1[3]; + u32 fqid; /* 24-bit */ + u8 __reserved2[56]; +} __packed; + +struct qm_mcr_queryfq_np { + u8 verb; + u8 result; + u8 __reserved1; + u8 state; /* QM_MCR_NP_STATE_*** */ + u32 fqd_link; /* 24-bit, _res2[24-31] */ + u16 odp_seq; /* 14-bit, _res3[14-15] */ + u16 orp_nesn; /* 14-bit, _res4[14-15] */ + u16 orp_ea_hseq; /* 15-bit, _res5[15] */ + u16 orp_ea_tseq; /* 15-bit, _res6[15] */ + u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */ + u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */ + u32 pfdr_hptr; /* 24-bit, _res9[24-31] */ + u32 pfdr_tptr; /* 24-bit, _res10[24-31] */ + u8 __reserved2[5]; + u8 is; /* 1-bit, _res12[1-7] */ + u16 ics_surp; + u32 byte_cnt; + u32 frm_cnt; /* 24-bit, _res13[24-31] */ + u32 __reserved3; + u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ + u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ + u16 __reserved4; + u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ + u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ + u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ +} __packed; + +#define QM_MCR_NP_STATE_FE 0x10 +#define QM_MCR_NP_STATE_R 0x08 +#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ +#define QM_MCR_NP_STATE_OOS 0x00 +#define QM_MCR_NP_STATE_RETIRED 0x01 +#define QM_MCR_NP_STATE_TEN_SCHED 0x02 +#define QM_MCR_NP_STATE_TRU_SCHED 0x03 +#define QM_MCR_NP_STATE_PARKED 0x04 +#define QM_MCR_NP_STATE_ACTIVE 0x05 +#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ +#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ +#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ +#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ +#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ + +enum qm_mcr_queryfq_np_masks { + qm_mcr_fqd_link_mask = BIT(24)-1, + qm_mcr_odp_seq_mask = BIT(14)-1, + qm_mcr_orp_nesn_mask = BIT(14)-1, + qm_mcr_orp_ea_hseq_mask = BIT(15)-1, + qm_mcr_orp_ea_tseq_mask = BIT(15)-1, + qm_mcr_orp_ea_hptr_mask = BIT(24)-1, + qm_mcr_orp_ea_tptr_mask = BIT(24)-1, + qm_mcr_pfdr_hptr_mask = BIT(24)-1, + qm_mcr_pfdr_tptr_mask = BIT(24)-1, + qm_mcr_is_mask = BIT(1)-1, + qm_mcr_frm_cnt_mask = BIT(24)-1, +}; +#define qm_mcr_np_get(np, field) \ + ((np)->field & (qm_mcr_##field##_mask)) + +/* Congestion Groups */ + +/* + * This wrapper represents a bit-array for the state of the 256 QMan congestion + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore + * those that don't concern us. We harness the structure and accessor details + * already used in the management command to query congestion groups. + */ +#define CGR_BITS_PER_WORD 5 +#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD) +#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f)) +#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) + +struct qman_cgrs { + struct __qm_mcr_querycongestion q; +}; + +static inline void qman_cgrs_init(struct qman_cgrs *c) +{ + memset(c, 0, sizeof(*c)); +} + +static inline void qman_cgrs_fill(struct qman_cgrs *c) +{ + memset(c, 0xff, sizeof(*c)); +} + +static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) +{ + return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); +} + +static inline void qman_cgrs_cp(struct qman_cgrs *dest, + const struct qman_cgrs *src) +{ + *dest = *src; +} + +static inline void qman_cgrs_and(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; + + for (ret = 0; ret < 8; ret++) + *_d++ = *_a++ & *_b++; +} + +static inline void qman_cgrs_xor(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.state; + const u32 *_a = a->q.state; + const u32 *_b = b->q.state; + + for (ret = 0; ret < 8; ret++) + *_d++ = *_a++ ^ *_b++; +} + +void qman_init_cgr_all(void); + +struct qm_portal_config { + /* + * Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. + */ + void __iomem *addr_virt[2]; + struct device *dev; + struct iommu_domain *iommu_domain; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + /* portal is affined to this cpu */ + int cpu; + /* portal interrupt line */ + int irq; + /* + * the portal's dedicated channel id, used initialising + * frame queues to target this portal when scheduled + */ + u16 channel; + /* + * mask of pool channels this portal has dequeue access to + * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) + */ + u32 pools; +}; + +/* Revision info (for errata and feature handling) */ +#define QMAN_REV11 0x0101 +#define QMAN_REV12 0x0102 +#define QMAN_REV20 0x0200 +#define QMAN_REV30 0x0300 +#define QMAN_REV31 0x0301 +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ + +#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ +extern struct gen_pool *qm_fqalloc; /* FQID allocator */ +extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */ +extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */ +u32 qm_get_pools_sdqcr(void); + +int qman_wq_alloc(void); +void qman_liodn_fixup(u16 channel); +void qman_set_sdest(u16 channel, unsigned int cpu_idx); + +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs); +const struct qm_portal_config *qman_destroy_affine_portal(void); + +/* + * qman_query_fq - Queries FQD fields (via h/w query command) + * @fq: the frame queue object to be queried + * @fqd: storage for the queried FQD fields + */ +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); + +/* + * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. + */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) + +#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ +#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ +#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ + +/* + * qman_volatile_dequeue - Issue a volatile dequeue command + * @fq: the frame queue object to dequeue from + * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options + * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() + * + * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. + * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and + * the VDQCR is already in use, otherwise returns non-zero for failure. If + * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once + * the VDQCR command has finished executing (ie. once the callback for the last + * DQRR entry resulting from the VDQCR command has been called). If not using + * the FINISH flag, completion can be determined either by detecting the + * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits + * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting + * for the QMAN_FQ_STATE_VDQCR bit to disappear. + */ +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); + +int qman_alloc_fq_table(u32 num_fqids); + +/* QMan s/w corenet portal, low-level i/face */ + +/* + * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one + * dequeue TYPE. Choose TOKEN (8-bit). + * If SOURCE == CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SOURCE == SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + */ +#define QM_SDQCR_SOURCE_CHANNELS 0x0 +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 +#define QM_SDQCR_COUNT_EXACT1 0x0 +#define QM_SDQCR_COUNT_UPTO3 0x20000000 +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_SDQCR_TYPE_MASK 0x03000000 +#define QM_SDQCR_TYPE_NULL 0x0 +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_SDQCR_TYPE_ACTIVE 0x03000000 +#define QM_SDQCR_TOKEN_MASK 0x00ff0000 +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) + +/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */ +#define QM_VDQCR_FQID_MASK 0x00ffffff +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) + +/* + * Used by all portal interrupt registers except 'inhibit' + * Channels with frame availability + */ +#define QM_PIRQ_DQAVAIL 0x0000ffff + +/* The DQAVAIL interrupt fields break down into these bits; */ +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ +#define QM_DQAVAIL_MASK 0xffff +/* This mask contains all the "irqsource" bits visible to API users */ +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) + +extern struct qman_portal *affine_portals[NR_CPUS]; +const struct qm_portal_config *qman_get_qm_portal_config( + struct qman_portal *portal); diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h new file mode 100644 index 000000000000..37f3eb001a16 --- /dev/null +++ b/include/soc/fsl/qman.h @@ -0,0 +1,1074 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FSL_QMAN_H +#define __FSL_QMAN_H + +#include + +/* Hardware constants */ +#define QM_CHANNEL_SWPORTAL0 0 +#define QMAN_CHANNEL_POOL1 0x21 +#define QMAN_CHANNEL_POOL1_REV3 0x401 +extern u16 qm_channel_pool1; + +/* Portal processing (interrupt) sources */ +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ +/* + * This mask contains all the interrupt sources that need handling except DQRI, + * ie. that if present should trigger slow-path processing. + */ +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ + QM_PIRQ_MRI) + +/* For qman_static_dequeue_*** APIs */ +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff +/* for n in [1,15] */ +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +/* for conversion from n of qm_channel */ +static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) +{ + return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); +} + +/* --- QMan data structures (and associated constants) --- */ + +/* "Frame Descriptor (FD)" */ +struct qm_fd { + union { + struct { + u8 cfg8b_w1; + u8 bpid; /* Buffer Pool ID */ + u8 cfg8b_w3; + u8 addr_hi; /* high 8-bits of 40-bit address */ + __be32 addr_lo; /* low 32-bits of 40-bit address */ + } __packed; + __be64 data; + }; + __be32 cfg; /* format, offset, length / congestion */ + union { + __be32 cmd; + __be32 status; + }; +} __aligned(8); + +#define QM_FD_FORMAT_SG BIT(31) +#define QM_FD_FORMAT_LONG BIT(30) +#define QM_FD_FORMAT_COMPOUND BIT(29) +#define QM_FD_FORMAT_MASK GENMASK(31, 29) +#define QM_FD_OFF_SHIFT 20 +#define QM_FD_OFF_MASK GENMASK(28, 20) +#define QM_FD_LEN_MASK GENMASK(19, 0) +#define QM_FD_LEN_BIG_MASK GENMASK(28, 0) + +enum qm_fd_format { + /* + * 'contig' implies a contiguous buffer, whereas 'sg' implies a + * scatter-gather table. 'big' implies a 29-bit length with no offset + * field, otherwise length is 20-bit and offset is 9-bit. 'compound' + * implies a s/g-like table, where each entry itself represents a frame + * (contiguous or scatter-gather) and the 29-bit "length" is + * interpreted purely for congestion calculations, ie. a "congestion + * weight". + */ + qm_fd_contig = 0, + qm_fd_contig_big = QM_FD_FORMAT_LONG, + qm_fd_sg = QM_FD_FORMAT_SG, + qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, + qm_fd_compound = QM_FD_FORMAT_COMPOUND +}; + +static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) +{ + return be64_to_cpu(fd->data) & 0xffffffffffLLU; +} + +static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) +{ + return be64_to_cpu(fd->data) & 0xffffffffffLLU; +} + +static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr) +{ + fd->addr_hi = upper_32_bits(addr); + fd->addr_lo = cpu_to_be32(lower_32_bits(addr)); +} + +/* + * The 'format' field indicates the interpretation of the remaining + * 29 bits of the 32-bit word. + * If 'format' is _contig or _sg, 20b length and 9b offset. + * If 'format' is _contig_big or _sg_big, 29b length. + * If 'format' is _compound, 29b "congestion weight". + */ +static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK; +} + +static inline int qm_fd_get_offset(const struct qm_fd *fd) +{ + return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT; +} + +static inline int qm_fd_get_length(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK; +} + +static inline int qm_fd_get_len_big(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK; +} + +static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt, + int off, int len) +{ + fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) | + ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK)); +} + +#define qm_fd_set_contig(fd, off, len) \ + qm_fd_set_param(fd, qm_fd_contig, off, len) +#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len) +#define qm_fd_set_contig_big(fd, len) \ + qm_fd_set_param(fd, qm_fd_contig_big, 0, len) +#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len) + +static inline void qm_fd_clear_fd(struct qm_fd *fd) +{ + fd->data = 0; + fd->cfg = 0; + fd->cmd = 0; +} + +/* Scatter/Gather table entry */ +struct qm_sg_entry { + union { + struct { + u8 __reserved1[3]; + u8 addr_hi; /* high 8-bits of 40-bit address */ + __be32 addr_lo; /* low 32-bits of 40-bit address */ + }; + __be64 data; + }; + __be32 cfg; /* E bit, F bit, length */ + u8 __reserved2; + u8 bpid; + __be16 offset; /* 13-bit, _res[13-15]*/ +} __packed; + +#define QM_SG_LEN_MASK GENMASK(29, 0) +#define QM_SG_OFF_MASK GENMASK(12, 0) +#define QM_SG_FIN BIT(30) +#define QM_SG_EXT BIT(31) + +static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) +{ + return be64_to_cpu(sg->data) & 0xffffffffffLLU; +} + +static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) +{ + return be64_to_cpu(sg->data) & 0xffffffffffLLU; +} + +static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr) +{ + sg->addr_hi = upper_32_bits(addr); + sg->addr_lo = cpu_to_be32(lower_32_bits(addr)); +} + +static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_FIN; +} + +static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_EXT; +} + +static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK; +} + +static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len) +{ + sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK); +} + +static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len) +{ + sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK)); +} + +static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK; +} + +/* "Frame Dequeue Response" */ +struct qm_dqrr_entry { + u8 verb; + u8 stat; + u16 seqnum; /* 15-bit */ + u8 tok; + u8 __reserved2[3]; + u32 fqid; /* 24-bit */ + u32 contextB; + struct qm_fd fd; + u8 __reserved4[32]; +} __packed; +#define QM_DQRR_VERB_VBIT 0x80 +#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ +#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ +#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ +#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ +#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ +#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ +#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ + +/* "ERN Message Response" */ +/* "FQ State Change Notification" */ +union qm_mr_entry { + struct { + u8 verb; + u8 __reserved[63]; + }; + struct { + u8 verb; + u8 dca; + u16 seqnum; + u8 rc; /* Rej Code: 8-bit */ + u8 orp_hi; /* ORP: 24-bit */ + u16 orp_lo; + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + u8 __reserved1[32]; + } __packed ern; + struct { + u8 verb; + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[6]; + u32 fqid; /* 24-bit */ + u32 contextB; + u8 __reserved2[48]; + } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ +}; +#define QM_MR_VERB_VBIT 0x80 +/* + * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb + * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished + * from the other MR types by noting if the 0x20 bit is unset. + */ +#define QM_MR_VERB_TYPE_MASK 0x27 +#define QM_MR_VERB_DC_ERN 0x20 +#define QM_MR_VERB_FQRN 0x21 +#define QM_MR_VERB_FQRNI 0x22 +#define QM_MR_VERB_FQRL 0x23 +#define QM_MR_VERB_FQPN 0x24 +#define QM_MR_RC_MASK 0xf0 /* contains one of; */ +#define QM_MR_RC_CGR_TAILDROP 0x00 +#define QM_MR_RC_WRED 0x10 +#define QM_MR_RC_ERROR 0x20 +#define QM_MR_RC_ORPWINDOW_EARLY 0x30 +#define QM_MR_RC_ORPWINDOW_LATE 0x40 +#define QM_MR_RC_FQ_TAILDROP 0x50 +#define QM_MR_RC_ORPWINDOW_RETIRED 0x60 +#define QM_MR_RC_ORP_ZERO 0x70 +#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ + +/* + * An identical structure of FQD fields is present in the "Init FQ" command and + * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. + * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the + * latter has two inlines to assist with converting to/from the mant+exp + * representation. + */ +struct qm_fqd_stashing { + /* See QM_STASHING_EXCL_<...> */ + u8 exclusive; + /* Numbers of cachelines */ + u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */ +}; + +struct qm_fqd_oac { + /* "Overhead Accounting Control", see QM_OAC_<...> */ + u8 oac; /* oac[6-7], _res[0-5] */ + /* Two's-complement value (-128 to +127) */ + s8 oal; /* "Overhead Accounting Length" */ +}; + +struct qm_fqd { + /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */ + u8 orpc; + u8 cgid; + __be16 fq_ctrl; /* See QM_FQCTRL_<...> */ + __be16 dest_wq; /* channel[3-15], wq[0-2] */ + __be16 ics_cred; /* 15-bit */ + /* + * For "Initialize Frame Queue" commands, the write-enable mask + * determines whether 'td' or 'oac_init' is observed. For query + * commands, this field is always 'td', and 'oac_query' (below) reflects + * the Overhead ACcounting values. + */ + union { + __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */ + struct qm_fqd_oac oac_init; + }; + __be32 context_b; + union { + /* Treat it as 64-bit opaque */ + __be64 opaque; + struct { + __be32 hi; + __be32 lo; + }; + /* Treat it as s/w portal stashing config */ + /* see "FQD Context_A field used for [...]" */ + struct { + struct qm_fqd_stashing stashing; + /* + * 48-bit address of FQ context to + * stash, must be cacheline-aligned + */ + __be16 context_hi; + __be32 context_lo; + } __packed; + } context_a; + struct qm_fqd_oac oac_query; +} __packed; + +#define QM_FQD_CHAN_OFF 3 +#define QM_FQD_WQ_MASK GENMASK(2, 0) +#define QM_FQD_TD_EXP_MASK GENMASK(4, 0) +#define QM_FQD_TD_MANT_OFF 5 +#define QM_FQD_TD_MANT_MASK GENMASK(12, 5) +#define QM_FQD_TD_MAX 0xe0000000 +#define QM_FQD_TD_MANT_MAX 0xff +#define QM_FQD_OAC_OFF 6 +#define QM_FQD_AS_OFF 4 +#define QM_FQD_DS_OFF 2 +#define QM_FQD_XS_MASK 0x3 + +/* 64-bit converters for context_hi/lo */ +static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) +{ + return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; +} + +static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) +{ + return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; +} + +static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) +{ + return qm_fqd_stashing_get64(fqd); +} + +static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) +{ + fqd->context_a.context_hi = upper_32_bits(addr); + fqd->context_a.context_lo = lower_32_bits(addr); +} + +static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) +{ + fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr)); + fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); +} + +/* convert a threshold value into mant+exp representation */ +static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val, + int roundup) +{ + u32 e = 0; + int td, oddbit = 0; + + if (val > QM_FQD_TD_MAX) + return -ERANGE; + + while (val > QM_FQD_TD_MANT_MAX) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + + td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK; + td |= (e & QM_FQD_TD_EXP_MASK); + fqd->td = cpu_to_be16(td); + return 0; +} +/* and the other direction */ +static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd) +{ + int td = be16_to_cpu(fqd->td); + + return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF) + << (td & QM_FQD_TD_EXP_MASK); +} + +static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs) +{ + struct qm_fqd_stashing *st = &fqd->context_a.stashing; + + st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) | + ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) | + (cs & QM_FQD_XS_MASK); +} + +static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd) +{ + return fqd->context_a.stashing.cl; +} + +static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val) +{ + fqd->oac_init.oac = val << QM_FQD_OAC_OFF; +} + +static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val) +{ + fqd->oac_init.oal = val; +} + +static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq) +{ + fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) | + (wq & QM_FQD_WQ_MASK)); +} + +static inline int qm_fqd_get_chan(const struct qm_fqd *fqd) +{ + return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF; +} + +static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) +{ + return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK; +} + +/* See "Frame Queue Descriptor (FQD)" */ +/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ +#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ +#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ +#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ +#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ +#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ +#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ +#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ +#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ +#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ +#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ + +/* See "FQD Context_A field used for [...] */ +/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ +#define QM_STASHING_EXCL_ANNOTATION 0x04 +#define QM_STASHING_EXCL_DATA 0x02 +#define QM_STASHING_EXCL_CTX 0x01 + +/* See "Intra Class Scheduling" */ +/* FQD field 'OAC' (Overhead ACcounting) uses these constants */ +#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ +#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ + +/* + * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields + * and associated commands/responses. The WRED parameters are calculated from + * these fields as follows; + * MaxTH = MA * (2 ^ Mn) + * Slope = SA / (2 ^ Sn) + * MaxP = 4 * (Pn + 1) + */ +struct qm_cgr_wr_parm { + /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ + u32 word; +}; +/* + * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding + * management commands, this is padded to a 16-bit structure field, so that's + * how we represent it here. The congestion state threshold is calculated from + * these fields as follows; + * CS threshold = TA * (2 ^ Tn) + */ +struct qm_cgr_cs_thres { + /* _res[13-15], TA[5-12], Tn[0-4] */ + u16 word; +}; +/* + * This identical structure of CGR fields is present in the "Init/Modify CGR" + * commands and the "Query CGR" result. It's suctioned out here into its own + * struct. + */ +struct __qm_mc_cgr { + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; + u8 wr_en_g; /* boolean, use QM_CGR_EN */ + u8 wr_en_y; /* boolean, use QM_CGR_EN */ + u8 wr_en_r; /* boolean, use QM_CGR_EN */ + u8 cscn_en; /* boolean, use QM_CGR_EN */ + union { + struct { + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ + }; + u32 cscn_targ; /* use QM_CGR_TARG_* */ + }; + u8 cstd_en; /* boolean, use QM_CGR_EN */ + u8 cs; /* boolean, only used in query response */ + struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */ + u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ +} __packed; +#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ +#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ +#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ +#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ +#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ +#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ +/* Convert CGR thresholds to/from "cs_thres" format */ +static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) +{ + return ((th->word >> 5) & 0xff) << (th->word & 0x1f); +} + +static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, + int roundup) +{ + u32 e = 0; + int oddbit = 0; + + while (val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + th->word = ((val & 0xff) << 5) | (e & 0x1f); + return 0; +} + +/* "Initialize FQ" */ +struct qm_mcc_initfq { + u8 __reserved1[2]; + u16 we_mask; /* Write Enable Mask */ + u32 fqid; /* 24-bit */ + u16 count; /* Initialises 'count+1' FQDs */ + struct qm_fqd fqd; /* the FQD fields go here */ + u8 __reserved2[30]; +} __packed; +/* "Initialize/Modify CGR" */ +struct qm_mcc_initcgr { + u8 __reserve1[2]; + u16 we_mask; /* Write Enable Mask */ + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[2]; + u8 cgid; + u8 __reserved3[32]; +} __packed; + +/* INITFQ-specific flags */ +#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ +#define QM_INITFQ_WE_OAC 0x0100 +#define QM_INITFQ_WE_ORPC 0x0080 +#define QM_INITFQ_WE_CGID 0x0040 +#define QM_INITFQ_WE_FQCTRL 0x0020 +#define QM_INITFQ_WE_DESTWQ 0x0010 +#define QM_INITFQ_WE_ICSCRED 0x0008 +#define QM_INITFQ_WE_TDTHRESH 0x0004 +#define QM_INITFQ_WE_CONTEXTB 0x0002 +#define QM_INITFQ_WE_CONTEXTA 0x0001 +/* INITCGR/MODIFYCGR-specific flags */ +#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ +#define QM_CGR_WE_WR_PARM_G 0x0400 +#define QM_CGR_WE_WR_PARM_Y 0x0200 +#define QM_CGR_WE_WR_PARM_R 0x0100 +#define QM_CGR_WE_WR_EN_G 0x0080 +#define QM_CGR_WE_WR_EN_Y 0x0040 +#define QM_CGR_WE_WR_EN_R 0x0020 +#define QM_CGR_WE_CSCN_EN 0x0010 +#define QM_CGR_WE_CSCN_TARG 0x0008 +#define QM_CGR_WE_CSTD_EN 0x0004 +#define QM_CGR_WE_CS_THRES 0x0002 +#define QM_CGR_WE_MODE 0x0001 + +#define QMAN_CGR_FLAG_USE_INIT 0x00000001 + + /* Portal and Frame Queues */ +/* Represents a managed portal */ +struct qman_portal; + +/* + * This object type represents QMan frame queue descriptors (FQD), it is + * cacheline-aligned, and initialised by qman_create_fq(). The structure is + * defined further down. + */ +struct qman_fq; + +/* + * This object type represents a QMan congestion group, it is defined further + * down. + */ +struct qman_cgr; + +/* + * This enum, and the callback type that returns it, are used when handling + * dequeued frames via DQRR. Note that for "null" callbacks registered with the + * portal object (for handling dequeues that do not demux because contextB is + * NULL), the return value *MUST* be qman_cb_dqrr_consume. + */ +enum qman_cb_dqrr_result { + /* DQRR entry can be consumed */ + qman_cb_dqrr_consume, + /* Like _consume, but requests parking - FQ must be held-active */ + qman_cb_dqrr_park, + /* Does not consume, for DCA mode only. */ + qman_cb_dqrr_defer, + /* + * Stop processing without consuming this ring entry. Exits the current + * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within + * an interrupt handler, the callback would typically call + * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, + * otherwise the interrupt will reassert immediately. + */ + qman_cb_dqrr_stop, + /* Like qman_cb_dqrr_stop, but consumes the current entry. */ + qman_cb_dqrr_consume_stop +}; +typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr); + +/* + * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They + * are always consumed after the callback returns. + */ +typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, + const union qm_mr_entry *msg); + +/* + * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + + * held-active + held-suspended are just "sched". Things like "retired" will not + * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until + * then, to indicate it's completing and to gate attempts to retry the retire + * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's + * technically impossible in the case of enqueue DCAs (which refer to DQRR ring + * index rather than the FQ that ring entry corresponds to), so repeated park + * commands are allowed (if you're silly enough to try) but won't change FQ + * state, and the resulting park notifications move FQs from "sched" to + * "parked". + */ +enum qman_fq_state { + qman_fq_state_oos, + qman_fq_state_parked, + qman_fq_state_sched, + qman_fq_state_retired +}; + +#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ +#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ +#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ +#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ +#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ +#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ + +/* + * Frame queue objects (struct qman_fq) are stored within memory passed to + * qman_create_fq(), as this allows stashing of caller-provided demux callback + * pointers at no extra cost to stashing of (driver-internal) FQ state. If the + * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, + * they should; + * + * (a) extend the qman_fq structure with their state; eg. + * + * // myfq is allocated and driver_fq callbacks filled in; + * struct my_fq { + * struct qman_fq base; + * int an_extra_field; + * [ ... add other fields to be associated with each FQ ...] + * } *myfq = some_my_fq_allocator(); + * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); + * + * // in a dequeue callback, access extra fields from 'fq' via a cast; + * struct my_fq *myfq = (struct my_fq *)fq; + * do_something_with(myfq->an_extra_field); + * [...] + * + * (b) when and if configuring the FQ for context stashing, specify how ever + * many cachelines are required to stash 'struct my_fq', to accelerate not + * only the QMan driver but the callback as well. + */ + +struct qman_fq_cb { + qman_cb_dqrr dqrr; /* for dequeued frames */ + qman_cb_mr ern; /* for s/w ERNs */ + qman_cb_mr fqs; /* frame-queue state changes*/ +}; + +struct qman_fq { + /* Caller of qman_create_fq() provides these demux callbacks */ + struct qman_fq_cb cb; + /* + * These are internal to the driver, don't touch. In particular, they + * may change, be removed, or extended (so you shouldn't rely on + * sizeof(qman_fq) being a constant). + */ + u32 fqid, idx; + unsigned long flags; + enum qman_fq_state state; + int cgr_groupid; +}; + +/* + * This callback type is used when handling congestion group entry/exit. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. + */ +typedef void (*qman_cb_cgr)(struct qman_portal *qm, + struct qman_cgr *cgr, int congested); + +struct qman_cgr { + /* Set these prior to qman_create_cgr() */ + u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ + qman_cb_cgr cb; + /* These are private to the driver */ + u16 chan; /* portal channel this object is created on */ + struct list_head node; +}; + +/* Flags to qman_create_fq() */ +#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ +#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ +#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ +#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ + +/* Flags to qman_init_fq() */ +#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ +#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ + + /* Portal Management */ +/** + * qman_p_irqsource_add - add processing sources to be interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Adds processing sources that should be interrupt-driven (rather than + * processed via qman_poll_***() functions). + */ +void qman_p_irqsource_add(struct qman_portal *p, u32 bits); + +/** + * qman_p_irqsource_remove - remove processing sources from being int-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Removes processing sources from being interrupt-driven, so that they will + * instead be processed via qman_poll_***() functions. + */ +void qman_p_irqsource_remove(struct qman_portal *p, u32 bits); + +/** + * qman_affine_cpus - return a mask of cpus that have affine portals + */ +const cpumask_t *qman_affine_cpus(void); + +/** + * qman_affine_channel - return the channel ID of an portal + * @cpu: the cpu whose affine portal is the subject of the query + * + * If @cpu is -1, the affine portal for the current CPU will be used. It is a + * bug to call this function for any value of @cpu (other than -1) that is not a + * member of the mask returned from qman_affine_cpus(). + */ +u16 qman_affine_channel(int cpu); + +/** + * qman_get_affine_portal - return the portal pointer affine to cpu + * @cpu: the cpu whose affine portal is the subject of the query + */ +struct qman_portal *qman_get_affine_portal(int cpu); + +/** + * qman_p_poll_dqrr - process DQRR (fast-path) entries + * @limit: the maximum number of DQRR entries to process + * + * Use of this function requires that DQRR processing not be interrupt-driven. + * The return value represents the number of DQRR entries processed. + */ +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit); + +/** + * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) + * + * Adds a set of pool channels to the portal's static dequeue command register + * (SDQCR). The requested pools are limited to those the portal has dequeue + * access to. + */ +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); + + /* FQ management */ +/** + * qman_create_fq - Allocates a FQ + * @fqid: the index of the FQD to encapsulate, must be "Out of Service" + * @flags: bit-mask of QMAN_FQ_FLAG_*** options + * @fq: memory for storing the 'fq', with callbacks filled in + * + * Creates a frame queue object for the given @fqid, unless the + * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is + * dynamically allocated (or the function fails if none are available). Once + * created, the caller should not touch the memory at 'fq' except as extended to + * adjacent memory for user-defined fields (see the definition of "struct + * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to + * pre-existing frame-queues that aren't to be otherwise interfered with, it + * prevents all other modifications to the frame queue. The TO_DCPORTAL flag + * causes the driver to honour any contextB modifications requested in the + * qm_init_fq() API, as this indicates the frame queue will be consumed by a + * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by + * software portals, the contextB field is controlled by the driver and can't be + * modified by the caller. + */ +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); + +/** + * qman_destroy_fq - Deallocates a FQ + * @fq: the frame queue object to release + * + * The memory for this frame queue object ('fq' provided in qman_create_fq()) is + * not deallocated but the caller regains ownership, to do with as desired. The + * FQ must be in the 'out-of-service' or in the 'parked' state. + */ +void qman_destroy_fq(struct qman_fq *fq); + +/** + * qman_fq_fqid - Queries the frame queue ID of a FQ object + * @fq: the frame queue object to query + */ +u32 qman_fq_fqid(struct qman_fq *fq); + +/** + * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" + * @fq: the frame queue object to modify, must be 'parked' or new. + * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options + * @opts: the FQ-modification settings, as defined in the low-level API + * + * The @opts parameter comes from the low-level portal API. Select + * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled + * rather than parked. NB, @opts can be NULL. + * + * Note that some fields and options within @opts may be ignored or overwritten + * by the driver; + * 1. the 'count' and 'fqid' fields are always ignored (this operation only + * affects one frame queue: @fq). + * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated + * 'fqd' structure's 'context_b' field are sometimes overwritten; + * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is + * initialised to a value used by the driver for demux. + * - if context_b is initialised for demux, so is context_a in case stashing + * is requested (see item 4). + * (So caller control of context_b is only possible for TO_DCPORTAL frame queue + * objects.) + * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's + * 'dest::channel' field will be overwritten to match the portal used to issue + * the command. If the WE_DESTWQ write-enable bit had already been set by the + * caller, the channel workqueue will be left as-is, otherwise the write-enable + * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag + * isn't set, the destination channel/workqueue fields and the write-enable bit + * are left as-is. + * 4. if the driver overwrites context_a/b for demux, then if + * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite + * context_a.address fields and will leave the stashing fields provided by the + * user alone, otherwise it will zero out the context_a.stashing fields. + */ +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); + +/** + * qman_schedule_fq - Schedules a FQ + * @fq: the frame queue object to schedule, must be 'parked' + * + * Schedules the frame queue, which must be Parked, which takes it to + * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. + */ +int qman_schedule_fq(struct qman_fq *fq); + +/** + * qman_retire_fq - Retires a FQ + * @fq: the frame queue object to retire + * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately + * + * Retires the frame queue. This returns zero if it succeeds immediately, +1 if + * the retirement was started asynchronously, otherwise it returns negative for + * failure. When this function returns zero, @flags is set to indicate whether + * the retired FQ is empty and/or whether it has any ORL fragments (to show up + * as ERNs). Otherwise the corresponding flags will be known when a subsequent + * FQRN message shows up on the portal's message ring. + * + * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or + * Active state), the completion will be via the message ring as a FQRN - but + * the corresponding callback may occur before this function returns!! Ie. the + * caller should be prepared to accept the callback as the function is called, + * not only once it has returned. + */ +int qman_retire_fq(struct qman_fq *fq, u32 *flags); + +/** + * qman_oos_fq - Puts a FQ "out of service" + * @fq: the frame queue object to be put out-of-service, must be 'retired' + * + * The frame queue must be retired and empty, and if any order restoration list + * was released as ERNs at the time of retirement, they must all be consumed. + */ +int qman_oos_fq(struct qman_fq *fq); + +/** + * qman_enqueue - Enqueue a frame to a frame queue + * @fq: the frame queue object to enqueue to + * @fd: a descriptor of the frame to be enqueued + * + * Fills an entry in the EQCR of portal @qm to enqueue the frame described by + * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' + * field is ignored. The return value is non-zero on error, such as ring full. + */ +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd); + +/** + * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs + * @result: is set by the API to the base FQID of the allocated range + * @count: the number of FQIDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_fqid_range(u32 *result, u32 count); +#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1) + +/** + * qman_release_fqid - Release the specified frame queue ID + * @fqid: the FQID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * FQID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_fqid(u32 fqid); + + /* Pool-channel management */ +/** + * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs + * @result: is set by the API to the base pool-channel ID of the allocated range + * @count: the number of pool-channel IDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_pool_range(u32 *result, u32 count); +#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1) + +/** + * qman_release_pool - Release the specified pool-channel ID + * @id: the pool-chan ID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * pool-channel ID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_pool(u32 id); + + /* CGR management */ +/** + * qman_create_cgr - Register a congestion group object + * @cgr: the 'cgr' object, with fields filled in + * @flags: QMAN_CGR_FLAG_* values + * @opts: optional state of CGR settings + * + * Registers this object to receiving congestion entry/exit callbacks on the + * portal affine to the cpu portal on which this API is executed. If opts is + * NULL then only the callback (cgr->cb) function is registered. If @flags + * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset + * any unspecified parameters) will be used rather than a modify hw hardware + * (which only modifies the specified parameters). + */ +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts); + +/** + * qman_delete_cgr - Deregisters a congestion group object + * @cgr: the 'cgr' object to deregister + * + * "Unplugs" this CGR object from the portal affine to the cpu on which this API + * is executed. This must be excuted on the same affine portal on which it was + * created. + */ +int qman_delete_cgr(struct qman_cgr *cgr); + +/** + * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU + * @cgr: the 'cgr' object to deregister + * + * This will select the proper CPU and run there qman_delete_cgr(). + */ +void qman_delete_cgr_safe(struct qman_cgr *cgr); + +/** + * qman_query_cgr_congested - Queries CGR's congestion status + * @cgr: the 'cgr' object to query + * @result: returns 'cgr's congestion status, 1 (true) if congested + */ +int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result); + +/** + * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs + * @result: is set by the API to the base CGR ID of the allocated range + * @count: the number of CGR IDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_cgrid_range(u32 *result, u32 count); +#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1) + +/** + * qman_release_cgrid - Release the specified CGR ID + * @id: the CGR ID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * CGR ID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_cgrid(u32 id); + +#endif /* __FSL_QMAN_H */ -- cgit v1.2.3-59-g8ed1b From 97e0d385b13998980252ff63123d8ebc4138db0a Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 22 Sep 2016 18:04:10 +0300 Subject: soc/bman: Add self-test for BMan driver Add a self test for the DPAA 1.x Buffer Manager driver. This test ensures that the driver can properly acquire and release buffers using the BMan portal infrastructure. Signed-off-by: Roy Pledge Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/Kconfig | 16 ++++ drivers/soc/fsl/qbman/Makefile | 4 + drivers/soc/fsl/qbman/bman_test.c | 53 ++++++++++++ drivers/soc/fsl/qbman/bman_test.h | 35 ++++++++ drivers/soc/fsl/qbman/bman_test_api.c | 151 ++++++++++++++++++++++++++++++++++ 5 files changed, 259 insertions(+) create mode 100644 drivers/soc/fsl/qbman/bman_test.c create mode 100644 drivers/soc/fsl/qbman/bman_test.h create mode 100644 drivers/soc/fsl/qbman/bman_test_api.c (limited to 'drivers') diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index 8f2df64224bf..e6da6a2f396d 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig @@ -25,4 +25,20 @@ config FSL_DPAA_CHECKING Compiles in additional checks, to sanity-check the drivers and any use of the exported API. Not recommended for performance. +config FSL_BMAN_TEST + tristate "BMan self-tests" + help + Compile the BMan self-test code. These tests will + exercise the BMan APIs to confirm functionality + of both the software drivers and hardware device. + +config FSL_BMAN_TEST_API + bool "High-level API self-test" + depends on FSL_BMAN_TEST + default y + help + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine + to the cpu(s) the test executes on). + endif # FSL_DPAA diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile index 6e0ee30b2834..714dd97484b3 100644 --- a/drivers/soc/fsl/qbman/Makefile +++ b/drivers/soc/fsl/qbman/Makefile @@ -1,3 +1,7 @@ obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ bman_portal.o qman_portal.o \ bman.o qman.o + +obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o +bman-test-y = bman_test.o +bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c new file mode 100644 index 000000000000..09b1c960b26a --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.c @@ -0,0 +1,53 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("BMan testing"); + +static int test_init(void) +{ +#ifdef CONFIG_FSL_BMAN_TEST_API + int loop = 1; + + while (loop--) + bman_test_api(); +#endif + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h new file mode 100644 index 000000000000..037ed342add4 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test.h @@ -0,0 +1,35 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_priv.h" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +void bman_test_api(void); diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c new file mode 100644 index 000000000000..6f6bdd154fe3 --- /dev/null +++ b/drivers/soc/fsl/qbman/bman_test_api.c @@ -0,0 +1,151 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +#define NUM_BUFS 93 +#define LOOPS 3 +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU + +static struct bman_pool *pool; +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; +static int bufs_received; + +static void bufs_init(void) +{ + int i; + + for (i = 0; i < NUM_BUFS; i++) + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); + bufs_received = 0; +} + +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) +{ + if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) { + + /* + * On SoCs with BMan revison 2.0, BMan only respects the 40 + * LS-bits of buffer addresses, masking off the upper 8-bits on + * release commands. The API provides for 48-bit addresses + * because some SoCs support all 48-bits. When generating + * garbage addresses for testing, we either need to zero the + * upper 8-bits when releasing to BMan (otherwise we'll be + * disappointed when the buffers we acquire back from BMan + * don't match), or we need to mask the upper 8-bits off when + * comparing. We do the latter. + */ + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) < + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return -1; + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) > + (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return 1; + } else { + if (bm_buffer_get64(a) < bm_buffer_get64(b)) + return -1; + if (bm_buffer_get64(a) > bm_buffer_get64(b)) + return 1; + } + + return 0; +} + +static void bufs_confirm(void) +{ + int i, j; + + for (i = 0; i < NUM_BUFS; i++) { + int matches = 0; + + for (j = 0; j < NUM_BUFS; j++) + if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) + matches++; + WARN_ON(matches != 1); + } +} + +/* test */ +void bman_test_api(void) +{ + int i, loops = LOOPS; + + bufs_init(); + + pr_info("%s(): Starting\n", __func__); + + pool = bman_new_pool(); + if (!pool) { + pr_crit("bman_new_pool() failed\n"); + goto failed; + } + + /* Release buffers */ +do_loop: + i = 0; + while (i < NUM_BUFS) { + int num = 8; + + if (i + num > NUM_BUFS) + num = NUM_BUFS - i; + if (bman_release(pool, bufs_in + i, num)) { + pr_crit("bman_release() failed\n"); + goto failed; + } + i += num; + } + + /* Acquire buffers */ + while (i > 0) { + int tmp, num = 8; + + if (num > i) + num = i; + tmp = bman_acquire(pool, bufs_out + i - num, num); + WARN_ON(tmp != num); + i -= num; + } + i = bman_acquire(pool, NULL, 1); + WARN_ON(i > 0); + + bufs_confirm(); + + if (--loops) + goto do_loop; + + /* Clean up */ + bman_free_pool(pool); + pr_info("%s(): Finished\n", __func__); + return; + +failed: + WARN_ON(1); +} -- cgit v1.2.3-59-g8ed1b From de7756233994b48cfc4d948904380cd0453a0063 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 22 Sep 2016 18:04:11 +0300 Subject: soc/qman: Add self-test for QMan driver Add self tests for the DPAA 1.x Queue Manager driver. The tests ensure that the driver can properly enqueue and dequeue to/from frame queues using the QMan portal infrastructure. Signed-off-by: Roy Pledge Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- drivers/soc/fsl/qbman/Kconfig | 23 ++ drivers/soc/fsl/qbman/Makefile | 5 + drivers/soc/fsl/qbman/qman_test.c | 62 ++++ drivers/soc/fsl/qbman/qman_test.h | 36 ++ drivers/soc/fsl/qbman/qman_test_api.c | 252 +++++++++++++ drivers/soc/fsl/qbman/qman_test_stash.c | 617 ++++++++++++++++++++++++++++++++ 6 files changed, 995 insertions(+) create mode 100644 drivers/soc/fsl/qbman/qman_test.c create mode 100644 drivers/soc/fsl/qbman/qman_test.h create mode 100644 drivers/soc/fsl/qbman/qman_test_api.c create mode 100644 drivers/soc/fsl/qbman/qman_test_stash.c (limited to 'drivers') diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index e6da6a2f396d..757033c0586c 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig @@ -41,4 +41,27 @@ config FSL_BMAN_TEST_API high-level API testing with them (whichever portal(s) are affine to the cpu(s) the test executes on). +config FSL_QMAN_TEST + tristate "QMan self-tests" + help + Compile self-test code for QMan. + +config FSL_QMAN_TEST_API + bool "QMan high-level self-test" + depends on FSL_QMAN_TEST + default y + help + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_QMAN_TEST_STASH + bool "QMan 'hot potato' data-stashing self-test" + depends on FSL_QMAN_TEST + default y + help + This performs a "hot potato" style test enqueuing/dequeuing a frame + across a series of FQs scheduled to different portals (and cpus), with + DQRR, data and context stashing always on. + endif # FSL_DPAA diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile index 714dd97484b3..7ae199f1664e 100644 --- a/drivers/soc/fsl/qbman/Makefile +++ b/drivers/soc/fsl/qbman/Makefile @@ -5,3 +5,8 @@ obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o bman-test-y = bman_test.o bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o + +obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o +qman-test-y = qman_test.o +qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o +qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c new file mode 100644 index 000000000000..18f7f0202fa7 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.c @@ -0,0 +1,62 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("QMan testing"); + +static int test_init(void) +{ + int loop = 1; + int err = 0; + + while (loop--) { +#ifdef CONFIG_FSL_QMAN_TEST_STASH + err = qman_test_stash(); + if (err) + break; +#endif +#ifdef CONFIG_FSL_QMAN_TEST_API + err = qman_test_api(); + if (err) + break; +#endif + } + return err; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h new file mode 100644 index 000000000000..d5f8cb2260dc --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test.h @@ -0,0 +1,36 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_priv.h" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +int qman_test_stash(void); +int qman_test_api(void); diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c new file mode 100644 index 000000000000..6880ff17f45e --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -0,0 +1,252 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +#define CGR_ID 27 +#define POOL_ID 2 +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID +#define NUM_ENQUEUES 10 +#define NUM_PARTIAL 4 +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ + QM_SDQCR_TYPE_PRIO_QOS | \ + QM_SDQCR_TOKEN_SET(0x98) | \ + QM_SDQCR_CHANNELS_DEDICATED | \ + QM_SDQCR_CHANNELS_POOL(POOL_ID)) +#define PORTAL_OPAQUE ((void *)0xf00dbeef) +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, + struct qman_fq *, + const struct qm_dqrr_entry *); +static void cb_ern(struct qman_portal *, struct qman_fq *, + const union qm_mr_entry *); +static void cb_fqs(struct qman_portal *, struct qman_fq *, + const union qm_mr_entry *); + +static struct qm_fd fd, fd_dq; +static struct qman_fq fq_base = { + .cb.dqrr = cb_dqrr, + .cb.ern = cb_ern, + .cb.fqs = cb_fqs +}; +static DECLARE_WAIT_QUEUE_HEAD(waitqueue); +static int retire_complete, sdqcr_complete; + +/* Helpers for initialising and "incrementing" a frame descriptor */ +static void fd_init(struct qm_fd *fd) +{ + qm_fd_addr_set64(fd, 0xabdeadbeefLLU); + qm_fd_set_contig_big(fd, 0x0000ffff); + fd->cmd = 0xfeedf00d; +} + +static void fd_inc(struct qm_fd *fd) +{ + u64 t = qm_fd_addr_get64(fd); + int z = t >> 40; + unsigned int len, off; + enum qm_fd_format fmt; + + t <<= 1; + if (z) + t |= 1; + qm_fd_addr_set64(fd, t); + + fmt = qm_fd_get_format(fd); + off = qm_fd_get_offset(fd); + len = qm_fd_get_length(fd); + len--; + qm_fd_set_param(fd, fmt, off, len); + + fd->cmd++; +} + +/* The only part of the 'fd' we can't memcmp() is the ppid */ +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +{ + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + + if (!r) { + enum qm_fd_format fmt_a, fmt_b; + + fmt_a = qm_fd_get_format(a); + fmt_b = qm_fd_get_format(b); + r = fmt_a - fmt_b; + } + if (!r) + r = a->cfg - b->cfg; + if (!r) + r = a->cmd - b->cmd; + return r; +} + +/* test */ +static int do_enqueues(struct qman_fq *fq) +{ + unsigned int loop; + int err = 0; + + for (loop = 0; loop < NUM_ENQUEUES; loop++) { + if (qman_enqueue(fq, &fd)) { + pr_crit("qman_enqueue() failed\n"); + err = -EIO; + } + fd_inc(&fd); + } + + return err; +} + +int qman_test_api(void) +{ + unsigned int flags, frmcnt; + int err; + struct qman_fq *fq = &fq_base; + + pr_info("%s(): Starting\n", __func__); + fd_init(&fd); + fd_init(&fd_dq); + + /* Initialise (parked) FQ */ + err = qman_create_fq(0, FQ_FLAGS, fq); + if (err) { + pr_crit("qman_create_fq() failed\n"); + goto failed; + } + err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); + if (err) { + pr_crit("qman_init_fq() failed\n"); + goto failed; + } + /* Do enqueues + VDQCR, twice. (Parked FQ) */ + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("VDQCR (till-empty);\n"); + frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_crit("qman_volatile_dequeue() failed\n"); + goto failed; + } + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, + NUM_ENQUEUES); + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); + if (err) { + pr_err("qman_volatile_dequeue() failed\n"); + goto failed; + } + + err = do_enqueues(fq); + if (err) + goto failed; + pr_info("scheduled dequeue (till-empty)\n"); + err = qman_schedule_fq(fq); + if (err) { + pr_crit("qman_schedule_fq() failed\n"); + goto failed; + } + wait_event(waitqueue, sdqcr_complete); + + /* Retire and OOS the FQ */ + err = qman_retire_fq(fq, &flags); + if (err < 0) { + pr_crit("qman_retire_fq() failed\n"); + goto failed; + } + wait_event(waitqueue, retire_complete); + if (flags & QMAN_FQ_STATE_BLOCKOOS) { + err = -EIO; + pr_crit("leaking frames\n"); + goto failed; + } + err = qman_oos_fq(fq); + if (err) { + pr_crit("qman_oos_fq() failed\n"); + goto failed; + } + qman_destroy_fq(fq); + pr_info("%s(): Finished\n", __func__); + return 0; + +failed: + WARN_ON(1); + return err; +} + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { + pr_err("BADNESS: dequeued frame doesn't match;\n"); + return qman_cb_dqrr_consume; + } + fd_inc(&fd_dq); + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + sdqcr_complete = 1; + wake_up(&waitqueue); + } + return qman_cb_dqrr_consume; +} + +static void cb_ern(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg) +{ + pr_crit("cb_ern() unimplemented"); + WARN_ON(1); +} + +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, + const union qm_mr_entry *msg) +{ + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); + + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) { + pr_crit("unexpected FQS message"); + WARN_ON(1); + return; + } + pr_info("Retirement message received\n"); + retire_complete = 1; + wake_up(&waitqueue); +} diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c new file mode 100644 index 000000000000..43cf66ba42f5 --- /dev/null +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -0,0 +1,617 @@ +/* Copyright 2009 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +#include +#include + +/* + * Algorithm: + * + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will + * shuttle a "hot potato" frame around them such that every forwarding action + * moves it from one cpu to another. (The use of more than one handler per cpu + * is to allow enough handlers/FQs to truly test the significance of caching - + * ie. when cache-expiries are occurring.) + * + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the + * first and last words of the frame data will undergo a transformation step on + * each forwarding action. To achieve this, each handler will be assigned a + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is + * received by a handler, the mixer of the expected sender is XOR'd into all + * words of the entire frame, which is then validated against the original + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of + * the current handler. Apart from validating that the frame is taking the + * expected path, this also provides some quasi-realistic overheads to each + * forwarding action - dereferencing *all* the frame data, computation, and + * conditional branching. There is a "special" handler designated to act as the + * instigator of the test by creating an enqueuing the "hot potato" frame, and + * to determine when the test has completed by counting HP_LOOPS iterations. + * + * Init phases: + * + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU + * handlers and link-list them (but do no other handler setup). + * + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler + * and advance the iterator for the next loop. This includes a final fixup, + * which connects the last handler to the first (and which is why phase 2 + * and 3 are separate). + * + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * initialise FQ objects and advance the iterator for the next loop. + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ + * initialisation targets the correct cpu. + */ + +/* + * helper to run something on all cpus (can't use on_each_cpu(), as that invokes + * the fn from irq context, which is too restrictive). + */ +struct bstrap { + int (*fn)(void); + atomic_t started; +}; +static int bstrap_fn(void *bs) +{ + struct bstrap *bstrap = bs; + int err; + + atomic_inc(&bstrap->started); + err = bstrap->fn(); + if (err) + return err; + while (!kthread_should_stop()) + msleep(20); + return 0; +} +static int on_all_cpus(int (*fn)(void)) +{ + int cpu; + + for_each_cpu(cpu, cpu_online_mask) { + struct bstrap bstrap = { + .fn = fn, + .started = ATOMIC_INIT(0) + }; + struct task_struct *k = kthread_create(bstrap_fn, &bstrap, + "hotpotato%d", cpu); + int ret; + + if (IS_ERR(k)) + return -ENOMEM; + kthread_bind(k, cpu); + wake_up_process(k); + /* + * If we call kthread_stop() before the "wake up" has had an + * effect, then the thread may exit with -EINTR without ever + * running the function. So poll until it's started before + * requesting it to stop. + */ + while (!atomic_read(&bstrap.started)) + msleep(20); + ret = kthread_stop(k); + if (ret) + return ret; + } + return 0; +} + +struct hp_handler { + + /* The following data is stashed when 'rx' is dequeued; */ + /* -------------- */ + /* The Rx FQ, dequeues of which will stash the entire hp_handler */ + struct qman_fq rx; + /* The Tx FQ we should forward to */ + struct qman_fq tx; + /* The value we XOR post-dequeue, prior to validating */ + u32 rx_mixer; + /* The value we XOR pre-enqueue, after validating */ + u32 tx_mixer; + /* what the hotpotato address should be on dequeue */ + dma_addr_t addr; + u32 *frame_ptr; + + /* The following data isn't (necessarily) stashed on dequeue; */ + /* -------------- */ + u32 fqid_rx, fqid_tx; + /* list node for linking us into 'hp_cpu' */ + struct list_head node; + /* Just to check ... */ + unsigned int processor_id; +} ____cacheline_aligned; + +struct hp_cpu { + /* identify the cpu we run on; */ + unsigned int processor_id; + /* root node for the per-cpu list of handlers */ + struct list_head handlers; + /* list node for linking us into 'hp_cpu_list' */ + struct list_head node; + /* + * when repeatedly scanning 'hp_list', each time linking the n'th + * handlers together, this is used as per-cpu iterator state + */ + struct hp_handler *iterator; +}; + +/* Each cpu has one of these */ +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); + +/* links together the hp_cpu structs, in first-come first-serve order. */ +static LIST_HEAD(hp_cpu_list); +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); + +static unsigned int hp_cpu_list_length; + +/* the "special" handler, that starts and terminates the test. */ +static struct hp_handler *special_handler; +static int loop_counter; + +/* handlers are allocated out of this, so they're properly aligned. */ +static struct kmem_cache *hp_handler_slab; + +/* this is the frame data */ +static void *__frame_ptr; +static u32 *frame_ptr; +static dma_addr_t frame_dma; + +/* the main function waits on this */ +static DECLARE_WAIT_QUEUE_HEAD(queue); + +#define HP_PER_CPU 2 +#define HP_LOOPS 8 +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ +#define HP_NUM_WORDS 80 +/* First word of the LFSR-based frame data */ +#define HP_FIRST_WORD 0xabbaf00d + +static inline u32 do_lfsr(u32 prev) +{ + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); +} + +static int allocate_frame_data(void) +{ + u32 lfsr = HP_FIRST_WORD; + int loop; + struct platform_device *pdev = platform_device_alloc("foobar", -1); + + if (!pdev) { + pr_crit("platform_device_alloc() failed"); + return -EIO; + } + if (platform_device_add(pdev)) { + pr_crit("platform_device_add() failed"); + return -EIO; + } + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); + if (!__frame_ptr) + return -ENOMEM; + + frame_ptr = PTR_ALIGN(__frame_ptr, 64); + for (loop = 0; loop < HP_NUM_WORDS; loop++) { + frame_ptr[loop] = lfsr; + lfsr = do_lfsr(lfsr); + } + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); + platform_device_del(pdev); + platform_device_put(pdev); + return 0; +} + +static void deallocate_frame_data(void) +{ + kfree(__frame_ptr); +} + +static inline int process_frame_data(struct hp_handler *handler, + const struct qm_fd *fd) +{ + u32 *p = handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + + if (qm_fd_addr_get64(fd) != handler->addr) { + pr_crit("bad frame address"); + return -EIO; + } + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + *p ^= handler->rx_mixer; + if (*p != lfsr) { + pr_crit("corrupt frame data"); + return -EIO; + } + *p ^= handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + return 0; +} + +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + if (process_frame_data(handler, &dqrr->fd)) { + WARN_ON(1); + goto skip; + } + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + } +skip: + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (++loop_counter < HP_LOOPS) { + if (qman_enqueue(&handler->tx, &dqrr->fd)) { + pr_crit("qman_enqueue() failed"); + WARN_ON(1); + goto skip; + } + } else { + pr_info("Received final (%dth) frame\n", loop_counter); + wake_up(&queue); + } +skip: + return qman_cb_dqrr_consume; +} + +static int create_per_cpu_handlers(void) +{ + struct hp_handler *handler; + int loop; + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + + hp_cpu->processor_id = smp_processor_id(); + spin_lock(&hp_lock); + list_add_tail(&hp_cpu->node, &hp_cpu_list); + hp_cpu_list_length++; + spin_unlock(&hp_lock); + INIT_LIST_HEAD(&hp_cpu->handlers); + for (loop = 0; loop < HP_PER_CPU; loop++) { + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); + if (!handler) { + pr_crit("kmem_cache_alloc() failed"); + WARN_ON(1); + return -EIO; + } + handler->processor_id = hp_cpu->processor_id; + handler->addr = frame_dma; + handler->frame_ptr = frame_ptr; + list_add_tail(&handler->node, &hp_cpu->handlers); + } + return 0; +} + +static int destroy_per_cpu_handlers(void) +{ + struct list_head *loop, *tmp; + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); + + spin_lock(&hp_lock); + list_del(&hp_cpu->node); + spin_unlock(&hp_lock); + list_for_each_safe(loop, tmp, &hp_cpu->handlers) { + u32 flags = 0; + struct hp_handler *handler = list_entry(loop, struct hp_handler, + node); + if (qman_retire_fq(&handler->rx, &flags) || + (flags & QMAN_FQ_STATE_BLOCKOOS)) { + pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); + WARN_ON(1); + return -EIO; + } + if (qman_oos_fq(&handler->rx)) { + pr_crit("qman_oos_fq(rx) failed"); + WARN_ON(1); + return -EIO; + } + qman_destroy_fq(&handler->rx); + qman_destroy_fq(&handler->tx); + qman_release_fqid(handler->fqid_rx); + list_del(&handler->node); + kmem_cache_free(hp_handler_slab, handler); + } + return 0; +} + +static inline u8 num_cachelines(u32 offset) +{ + u8 res = (offset + (L1_CACHE_BYTES - 1)) + / (L1_CACHE_BYTES); + if (res > 3) + return 3; + return res; +} +#define STASH_DATA_CL \ + num_cachelines(HP_NUM_WORDS * 4) +#define STASH_CTX_CL \ + num_cachelines(offsetof(struct hp_handler, fqid_rx)) + +static int init_handler(void *h) +{ + struct qm_mcc_initfq opts; + struct hp_handler *handler = h; + int err; + + if (handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } + /* Set up rx */ + memset(&handler->rx, 0, sizeof(handler->rx)); + if (handler == special_handler) + handler->rx.cb.dqrr = special_dqrr; + else + handler->rx.cb.dqrr = normal_dqrr; + err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); + if (err) { + pr_crit("qman_create_fq(rx) failed"); + goto failed; + } + memset(&opts, 0, sizeof(opts)); + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); + err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | + QMAN_INITFQ_FLAG_LOCAL, &opts); + if (err) { + pr_crit("qman_init_fq(rx) failed"); + goto failed; + } + /* Set up tx */ + memset(&handler->tx, 0, sizeof(handler->tx)); + err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, + &handler->tx); + if (err) { + pr_crit("qman_create_fq(tx) failed"); + goto failed; + } + + return 0; +failed: + return err; +} + +static void init_handler_cb(void *h) +{ + if (init_handler(h)) + WARN_ON(1); +} + +static int init_phase2(void) +{ + int loop; + u32 fqid = 0; + u32 lfsr = 0xdeadbeef; + struct hp_cpu *hp_cpu; + struct hp_handler *handler; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + int err; + + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + /* Rx FQID is the previous handler's Tx FQID */ + hp_cpu->iterator->fqid_rx = fqid; + /* Allocate new FQID for Tx */ + err = qman_alloc_fqid(&fqid); + if (err) { + pr_crit("qman_alloc_fqid() failed"); + return err; + } + hp_cpu->iterator->fqid_tx = fqid; + /* Rx mixer is the previous handler's Tx mixer */ + hp_cpu->iterator->rx_mixer = lfsr; + /* Get new mixer for Tx */ + lfsr = do_lfsr(lfsr); + hp_cpu->iterator->tx_mixer = lfsr; + } + } + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); + if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) + return 1; + handler->fqid_rx = fqid; + handler->rx_mixer = lfsr; + /* and tag it as our "special" handler */ + special_handler = handler; + return 0; +} + +static int init_phase3(void) +{ + int loop, err; + struct hp_cpu *hp_cpu; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + preempt_disable(); + if (hp_cpu->processor_id == smp_processor_id()) { + err = init_handler(hp_cpu->iterator); + if (err) + return err; + } else { + smp_call_function_single(hp_cpu->processor_id, + init_handler_cb, hp_cpu->iterator, 1); + } + preempt_enable(); + } + } + return 0; +} + +static int send_first_frame(void *ignore) +{ + u32 *p = special_handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop, err; + struct qm_fd fd; + + if (special_handler->processor_id != smp_processor_id()) { + err = -EIO; + goto failed; + } + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, special_handler->addr); + qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + if (*p != lfsr) { + err = -EIO; + pr_crit("corrupt frame data"); + goto failed; + } + *p ^= special_handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + pr_info("Sending first frame\n"); + err = qman_enqueue(&special_handler->tx, &fd); + if (err) { + pr_crit("qman_enqueue() failed"); + goto failed; + } + + return 0; +failed: + return err; +} + +static void send_first_frame_cb(void *ignore) +{ + if (send_first_frame(NULL)) + WARN_ON(1); +} + +int qman_test_stash(void) +{ + int err; + + if (cpumask_weight(cpu_online_mask) < 2) { + pr_info("%s(): skip - only 1 CPU\n", __func__); + return 0; + } + + pr_info("%s(): Starting\n", __func__); + + hp_cpu_list_length = 0; + loop_counter = 0; + hp_handler_slab = kmem_cache_create("hp_handler_slab", + sizeof(struct hp_handler), L1_CACHE_BYTES, + SLAB_HWCACHE_ALIGN, NULL); + if (!hp_handler_slab) { + err = -EIO; + pr_crit("kmem_cache_create() failed"); + goto failed; + } + + err = allocate_frame_data(); + if (err) + goto failed; + + /* Init phase 1 */ + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); + if (on_all_cpus(create_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } + pr_info("Number of cpus: %d, total of %d handlers\n", + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); + + err = init_phase2(); + if (err) + goto failed; + + err = init_phase3(); + if (err) + goto failed; + + preempt_disable(); + if (special_handler->processor_id == smp_processor_id()) { + err = send_first_frame(NULL); + if (err) + goto failed; + } else { + smp_call_function_single(special_handler->processor_id, + send_first_frame_cb, NULL, 1); + } + preempt_enable(); + + wait_event(queue, loop_counter == HP_LOOPS); + deallocate_frame_data(); + if (on_all_cpus(destroy_per_cpu_handlers)) { + err = -EIO; + pr_crit("on_each_cpu() failed"); + goto failed; + } + kmem_cache_destroy(hp_handler_slab); + pr_info("%s(): Finished\n", __func__); + + return 0; +failed: + WARN_ON(1); + return err; +} -- cgit v1.2.3-59-g8ed1b From e0b80f00bb96b925995d53980e0c764430bedb42 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 22 Sep 2016 18:04:12 +0300 Subject: arch/powerpc: Add CONFIG_FSL_DPAA to corenetXX_smp_defconfig Enable the drivers on the powerpc arch. Signed-off-by: Roy Pledge Signed-off-by: Claudiu Manoil Signed-off-by: Scott Wood --- arch/powerpc/Makefile | 4 ++-- arch/powerpc/configs/dpaa.config | 1 + drivers/soc/Kconfig | 1 + drivers/soc/fsl/Makefile | 1 + 4 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 arch/powerpc/configs/dpaa.config (limited to 'drivers') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 50d020ac0f48..617dece67924 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -318,12 +318,12 @@ mpc85xx_smp_defconfig: PHONY += corenet32_smp_defconfig corenet32_smp_defconfig: $(call merge_into_defconfig,corenet_basic_defconfig,\ - 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw) + 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa) PHONY += corenet64_smp_defconfig corenet64_smp_defconfig: $(call merge_into_defconfig,corenet_basic_defconfig,\ - 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw) + 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa) PHONY += mpc86xx_defconfig mpc86xx_defconfig: diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config new file mode 100644 index 000000000000..efa99c048543 --- /dev/null +++ b/arch/powerpc/configs/dpaa.config @@ -0,0 +1 @@ +CONFIG_FSL_DPAA=y diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index fe42a2fdf351..e6e90e80519a 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,6 +1,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/bcm/Kconfig" +source "drivers/soc/fsl/qbman/Kconfig" source "drivers/soc/fsl/qe/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile index 203307fd92c1..75e1f5334821 100644 --- a/drivers/soc/fsl/Makefile +++ b/drivers/soc/fsl/Makefile @@ -2,5 +2,6 @@ # Makefile for the Linux Kernel SOC fsl specific device drivers # +obj-$(CONFIG_FSL_DPAA) += qbman/ obj-$(CONFIG_QUICC_ENGINE) += qe/ obj-$(CONFIG_CPM) += qe/ -- cgit v1.2.3-59-g8ed1b From 0825b8ee2e58ac6616a9936a745a8ad68b0dd362 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Mon, 26 Sep 2016 20:00:59 +0800 Subject: scsi: be2iscsi: mark symbols static where possible We get 6 warnings when building kernel with W=1: drivers/scsi/be2iscsi/be_main.c:65:1: warning: no previous prototype for 'beiscsi_log_enable_disp' [-Wmissing-prototypes] drivers/scsi/be2iscsi/be_main.c:78:1: warning: no previous prototype for 'beiscsi_log_enable_change' [-Wmissing-prototypes] drivers/scsi/be2iscsi/be_main.c:97:1: warning: no previous prototype for 'beiscsi_log_enable_store' [-Wmissing-prototypes] drivers/scsi/be2iscsi/be_main.c:116:1: warning: no previous prototype for 'beiscsi_log_enable_init' [-Wmissing-prototypes] drivers/scsi/be2iscsi/be_main.c:4587:5: warning: no previous prototype for 'beiscsi_iotask_v2' [-Wmissing-prototypes] drivers/scsi/be2iscsi/be_main.c:4976:6: warning: no previous prototype for 'beiscsi_hba_attrs_init' [-Wmissing-prototypes] In fact, these functions are only used in the file in which they are declared and don't need a declaration, but can be made static. So this patch marks these functions with 'static'. Signed-off-by: Baoyou Xie Reviewed-by: Jitendra Bhivare Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 6a6906f847db..68138a647dfc 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -61,7 +61,7 @@ MODULE_PARM_DESC(be_max_phys_size, "memory that can be allocated. Range is 16 - 128"); #define beiscsi_disp_param(_name)\ -ssize_t \ +static ssize_t \ beiscsi_##_name##_disp(struct device *dev,\ struct device_attribute *attrib, char *buf) \ { \ @@ -74,7 +74,7 @@ beiscsi_##_name##_disp(struct device *dev,\ } #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ -int \ +static int \ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ {\ if (val >= _minval && val <= _maxval) {\ @@ -93,7 +93,7 @@ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ } #define beiscsi_store_param(_name) \ -ssize_t \ +static ssize_t \ beiscsi_##_name##_store(struct device *dev,\ struct device_attribute *attr, const char *buf,\ size_t count) \ @@ -112,7 +112,7 @@ beiscsi_##_name##_store(struct device *dev,\ } #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ -int \ +static int \ beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ { \ if (val >= _minval && val <= _maxval) {\ @@ -4584,7 +4584,7 @@ free_hndls: io_task->cmd_bhs = NULL; return -ENOMEM; } -int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, +static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int num_sg, unsigned int xferlen, unsigned int writedir) { @@ -4973,7 +4973,7 @@ static int beiscsi_bsg_request(struct bsg_job *job) return rc; } -void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) +static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) { /* Set the logging parameter */ beiscsi_log_enable_init(phba, beiscsi_log_enable); -- cgit v1.2.3-59-g8ed1b From 46c1cf706076500cdcde3445be97233793eec7f1 Mon Sep 17 00:00:00 2001 From: Kyuho Choi Date: Mon, 26 Sep 2016 23:58:25 +0900 Subject: scsi: ufs: Enable no vccq quirk for skhynix device This patch enable no vccq quirk for SKHynix devices. SKHynix ufs device don't need vccq vrail for device operation. Signed-off-by: Kyuho Choi Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufs_quirks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index ee4ab85e2801..22f881e9253a 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -25,6 +25,7 @@ #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE +#define UFS_VENDOR_SKHYNIX 0x1AD /** * ufs_device_info - ufs device details @@ -145,6 +146,7 @@ static struct ufs_dev_fix ufs_fixups[] = { UFS_DEVICE_QUIRK_PA_TACTIVATE), UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), END_FIX }; -- cgit v1.2.3-59-g8ed1b From fe9b34bfcb4e80b809a8a3dccdd49737d073593b Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Fri, 23 Sep 2016 20:07:40 +0530 Subject: scsi: cxgb4i: Set completion bit in work request If SKCBF_TX_FLAG_COMPL flag is set and unacknowledged credits are >= max credits / 2, set completion bit in work request to reclaim credits. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index e4ba2d2616cd..2487ae6be272 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -682,6 +682,11 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) req_completion); csk->snd_nxt += len; cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && + (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { + struct cpl_close_con_req *req = + (struct cpl_close_con_req *)skb->data; + req->wr.wr_hi |= htonl(FW_WR_COMPL_F); } total_size += skb->truesize; t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); -- cgit v1.2.3-59-g8ed1b From 54f9c4d0778b3f9ab791b1b7eb1a5d2809f02f50 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Tue, 20 Sep 2016 09:01:38 +0200 Subject: ipmi: add an Aspeed BT IPMI BMC driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a simple device driver to expose the iBT interface on Aspeed SOCs (AST2400 and AST2500) as a character device. Such SOCs are commonly used as BMCs (BaseBoard Management Controllers) and this driver implements the BMC side of the BT interface. The BT (Block Transfer) interface is used to perform in-band IPMI communication between a host and its BMC. Entire messages are buffered before sending a notification to the other end, host or BMC, that there is data to be read. Usually, the host emits requests and the BMC responses but the specification provides a mean for the BMC to send SMS Attention (BMC-to-Host attention or System Management Software attention) messages. For this purpose, the driver introduces a specific ioctl on the device: 'BT_BMC_IOCTL_SMS_ATN' that can be used by the system running on the BMC to signal the host of such an event. The device name defaults to '/dev/ipmi-bt-host' Signed-off-by: Alistair Popple Signed-off-by: Jeremy Kerr Signed-off-by: Joel Stanley [clg: - checkpatch fixes - added a devicetree binding documentation - replace 'bt_host' by 'bt_bmc' to reflect that the driver is the BMC side of the IPMI BT interface - renamed the device to 'ipmi-bt-host' - introduced a temporary buffer to copy_{to,from}_user - used platform_get_irq() - moved the driver under drivers/char/ipmi/ but kept it as a misc device - changed the compatible cell to "aspeed,ast2400-bt-bmc" ] Signed-off-by: Cédric Le Goater Acked-by: Arnd Bergmann [clg: - checkpatch --strict fixes - removed the use of devm_iounmap, devm_kfree in cleanup paths - introduced an atomic-t to limit opens to 1 - introduced a mutex to protect write/read operations] Acked-by: Rob Herring Signed-off-by: Cédric Le Goater Signed-off-by: Corey Minyard --- Documentation/devicetree/bindings/ipmi.txt | 25 - .../bindings/ipmi/aspeed,ast2400-bt-bmc.txt | 23 + .../devicetree/bindings/ipmi/ipmi-smic.txt | 25 + drivers/Makefile | 2 +- drivers/char/ipmi/Kconfig | 7 + drivers/char/ipmi/Makefile | 1 + drivers/char/ipmi/bt-bmc.c | 510 +++++++++++++++++++++ include/uapi/linux/Kbuild | 1 + include/uapi/linux/bt-bmc.h | 18 + 9 files changed, 586 insertions(+), 26 deletions(-) delete mode 100644 Documentation/devicetree/bindings/ipmi.txt create mode 100644 Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt create mode 100644 Documentation/devicetree/bindings/ipmi/ipmi-smic.txt create mode 100644 drivers/char/ipmi/bt-bmc.c create mode 100644 include/uapi/linux/bt-bmc.h (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/ipmi.txt b/Documentation/devicetree/bindings/ipmi.txt deleted file mode 100644 index d5f1a877ed3e..000000000000 --- a/Documentation/devicetree/bindings/ipmi.txt +++ /dev/null @@ -1,25 +0,0 @@ -IPMI device - -Required properties: -- compatible: should be one of ipmi-kcs, ipmi-smic, or ipmi-bt -- device_type: should be ipmi -- reg: Address and length of the register set for the device - -Optional properties: -- interrupts: The interrupt for the device. Without this the interface - is polled. -- reg-size - The size of the register. Defaults to 1 -- reg-spacing - The number of bytes between register starts. Defaults to 1 -- reg-shift - The amount to shift the registers to the right to get the data - into bit zero. - -Example: - -smic@fff3a000 { - compatible = "ipmi-smic"; - device_type = "ipmi"; - reg = <0xfff3a000 0x1000>; - interrupts = <0 24 4>; - reg-size = <4>; - reg-spacing = <4>; -}; diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt new file mode 100644 index 000000000000..fbbacd958240 --- /dev/null +++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt @@ -0,0 +1,23 @@ +* Aspeed BT (Block Transfer) IPMI interface + +The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs +(BaseBoard Management Controllers) and the BT interface can be used to +perform in-band IPMI communication with their host. + +Required properties: + +- compatible : should be "aspeed,ast2400-bt-bmc" +- reg: physical address and size of the registers + +Optional properties: + +- interrupts: interrupt generated by the BT interface. without an + interrupt, the driver will operate in poll mode. + +Example: + + ibt@1e789140 { + compatible = "aspeed,ast2400-bt-bmc"; + reg = <0x1e789140 0x18>; + interrupts = <8>; + }; diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt new file mode 100644 index 000000000000..d5f1a877ed3e --- /dev/null +++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt @@ -0,0 +1,25 @@ +IPMI device + +Required properties: +- compatible: should be one of ipmi-kcs, ipmi-smic, or ipmi-bt +- device_type: should be ipmi +- reg: Address and length of the register set for the device + +Optional properties: +- interrupts: The interrupt for the device. Without this the interface + is polled. +- reg-size - The size of the register. Defaults to 1 +- reg-spacing - The number of bytes between register starts. Defaults to 1 +- reg-shift - The amount to shift the registers to the right to get the data + into bit zero. + +Example: + +smic@fff3a000 { + compatible = "ipmi-smic"; + device_type = "ipmi"; + reg = <0xfff3a000 0x1000>; + interrupts = <0 24 4>; + reg-size = <4>; + reg-spacing = <4>; +}; diff --git a/drivers/Makefile b/drivers/Makefile index 53abb4a5f736..5a9e7b6b7928 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -21,7 +21,7 @@ obj-y += video/ obj-y += idle/ # IPMI must come before ACPI in order to provide IPMI opregion support -obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/ +obj-y += char/ipmi/ obj-$(CONFIG_ACPI) += acpi/ obj-$(CONFIG_SFI) += sfi/ diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 5a9350b1069a..2c234e3e7513 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -76,3 +76,10 @@ config IPMI_POWEROFF the IPMI management controller is capable of this. endif # IPMI_HANDLER + +config ASPEED_BT_IPMI_BMC + tristate "BT IPMI bmc driver" + help + Provides a driver for the BT (Block Transfer) IPMI interface + found on Aspeed SOCs (AST2400 and AST2500). The driver + implements the BMC side of the BT interface. diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index f3ffde1f5f1f..0d98cd91def1 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o +obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c new file mode 100644 index 000000000000..2e880bf0be26 --- /dev/null +++ b/drivers/char/ipmi/bt-bmc.c @@ -0,0 +1,510 @@ +/* + * Copyright (c) 2015-2016, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is a BMC device used to communicate to the host + */ +#define DEVICE_NAME "ipmi-bt-host" + +#define BT_IO_BASE 0xe4 +#define BT_IRQ 10 + +#define BT_CR0 0x0 +#define BT_CR0_IO_BASE 16 +#define BT_CR0_IRQ 12 +#define BT_CR0_EN_CLR_SLV_RDP 0x8 +#define BT_CR0_EN_CLR_SLV_WRP 0x4 +#define BT_CR0_ENABLE_IBT 0x1 +#define BT_CR1 0x4 +#define BT_CR1_IRQ_H2B 0x01 +#define BT_CR1_IRQ_HBUSY 0x40 +#define BT_CR2 0x8 +#define BT_CR2_IRQ_H2B 0x01 +#define BT_CR2_IRQ_HBUSY 0x40 +#define BT_CR3 0xc +#define BT_CTRL 0x10 +#define BT_CTRL_B_BUSY 0x80 +#define BT_CTRL_H_BUSY 0x40 +#define BT_CTRL_OEM0 0x20 +#define BT_CTRL_SMS_ATN 0x10 +#define BT_CTRL_B2H_ATN 0x08 +#define BT_CTRL_H2B_ATN 0x04 +#define BT_CTRL_CLR_RD_PTR 0x02 +#define BT_CTRL_CLR_WR_PTR 0x01 +#define BT_BMC2HOST 0x14 +#define BT_INTMASK 0x18 +#define BT_INTMASK_B2H_IRQEN 0x01 +#define BT_INTMASK_B2H_IRQ 0x02 +#define BT_INTMASK_BMC_HWRST 0x80 + +#define BT_BMC_BUFFER_SIZE 256 + +struct bt_bmc { + struct device dev; + struct miscdevice miscdev; + void __iomem *base; + int irq; + wait_queue_head_t queue; + struct timer_list poll_timer; + struct mutex mutex; +}; + +static atomic_t open_count = ATOMIC_INIT(0); + +static u8 bt_inb(struct bt_bmc *bt_bmc, int reg) +{ + return ioread8(bt_bmc->base + reg); +} + +static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg) +{ + iowrite8(data, bt_bmc->base + reg); +} + +static void clr_rd_ptr(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL); +} + +static void clr_wr_ptr(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL); +} + +static void clr_h2b_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL); +} + +static void set_b_busy(struct bt_bmc *bt_bmc) +{ + if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)) + bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL); +} + +static void clr_b_busy(struct bt_bmc *bt_bmc) +{ + if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY) + bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL); +} + +static void set_b2h_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL); +} + +static u8 bt_read(struct bt_bmc *bt_bmc) +{ + return bt_inb(bt_bmc, BT_BMC2HOST); +} + +static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n) +{ + int i; + + for (i = 0; i < n; i++) + buf[i] = bt_read(bt_bmc); + return n; +} + +static void bt_write(struct bt_bmc *bt_bmc, u8 c) +{ + bt_outb(bt_bmc, c, BT_BMC2HOST); +} + +static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n) +{ + int i; + + for (i = 0; i < n; i++) + bt_write(bt_bmc, buf[i]); + return n; +} + +static void set_sms_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL); +} + +static struct bt_bmc *file_bt_bmc(struct file *file) +{ + return container_of(file->private_data, struct bt_bmc, miscdev); +} + +static int bt_bmc_open(struct inode *inode, struct file *file) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + if (atomic_inc_return(&open_count) == 1) { + clr_b_busy(bt_bmc); + return 0; + } + + atomic_dec(&open_count); + return -EBUSY; +} + +/* + * The BT (Block Transfer) interface means that entire messages are + * buffered by the host before a notification is sent to the BMC that + * there is data to be read. The first byte is the length and the + * message data follows. The read operation just tries to capture the + * whole before returning it to userspace. + * + * BT Message format : + * + * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N + * Length NetFn/LUN Seq Cmd Data + * + */ +static ssize_t bt_bmc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + u8 len; + int len_byte = 1; + u8 kbuffer[BT_BMC_BUFFER_SIZE]; + ssize_t ret = 0; + ssize_t nread; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + WARN_ON(*ppos); + + if (wait_event_interruptible(bt_bmc->queue, + bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN)) + return -ERESTARTSYS; + + mutex_lock(&bt_bmc->mutex); + + if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) { + ret = -EIO; + goto out_unlock; + } + + set_b_busy(bt_bmc); + clr_h2b_atn(bt_bmc); + clr_rd_ptr(bt_bmc); + + /* + * The BT frames start with the message length, which does not + * include the length byte. + */ + kbuffer[0] = bt_read(bt_bmc); + len = kbuffer[0]; + + /* We pass the length back to userspace as well */ + if (len + 1 > count) + len = count - 1; + + while (len) { + nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte); + + bt_readn(bt_bmc, kbuffer + len_byte, nread); + + if (copy_to_user(buf, kbuffer, nread + len_byte)) { + ret = -EFAULT; + break; + } + len -= nread; + buf += nread + len_byte; + ret += nread + len_byte; + len_byte = 0; + } + + clr_b_busy(bt_bmc); + +out_unlock: + mutex_unlock(&bt_bmc->mutex); + return ret; +} + +/* + * BT Message response format : + * + * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N + * Length NetFn/LUN Seq Cmd Code Data + */ +static ssize_t bt_bmc_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + u8 kbuffer[BT_BMC_BUFFER_SIZE]; + ssize_t ret = 0; + ssize_t nwritten; + + /* + * send a minimum response size + */ + if (count < 5) + return -EINVAL; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + WARN_ON(*ppos); + + /* + * There's no interrupt for clearing bmc busy so we have to + * poll + */ + if (wait_event_interruptible(bt_bmc->queue, + !(bt_inb(bt_bmc, BT_CTRL) & + (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))) + return -ERESTARTSYS; + + mutex_lock(&bt_bmc->mutex); + + if (unlikely(bt_inb(bt_bmc, BT_CTRL) & + (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) { + ret = -EIO; + goto out_unlock; + } + + clr_wr_ptr(bt_bmc); + + while (count) { + nwritten = min_t(ssize_t, count, sizeof(kbuffer)); + if (copy_from_user(&kbuffer, buf, nwritten)) { + ret = -EFAULT; + break; + } + + bt_writen(bt_bmc, kbuffer, nwritten); + + count -= nwritten; + buf += nwritten; + ret += nwritten; + } + + set_b2h_atn(bt_bmc); + +out_unlock: + mutex_unlock(&bt_bmc->mutex); + return ret; +} + +static long bt_bmc_ioctl(struct file *file, unsigned int cmd, + unsigned long param) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + switch (cmd) { + case BT_BMC_IOCTL_SMS_ATN: + set_sms_atn(bt_bmc); + return 0; + } + return -EINVAL; +} + +static int bt_bmc_release(struct inode *inode, struct file *file) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + atomic_dec(&open_count); + set_b_busy(bt_bmc); + return 0; +} + +static unsigned int bt_bmc_poll(struct file *file, poll_table *wait) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + unsigned int mask = 0; + u8 ctrl; + + poll_wait(file, &bt_bmc->queue, wait); + + ctrl = bt_inb(bt_bmc, BT_CTRL); + + if (ctrl & BT_CTRL_H2B_ATN) + mask |= POLLIN; + + if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) + mask |= POLLOUT; + + return mask; +} + +static const struct file_operations bt_bmc_fops = { + .owner = THIS_MODULE, + .open = bt_bmc_open, + .read = bt_bmc_read, + .write = bt_bmc_write, + .release = bt_bmc_release, + .poll = bt_bmc_poll, + .unlocked_ioctl = bt_bmc_ioctl, +}; + +static void poll_timer(unsigned long data) +{ + struct bt_bmc *bt_bmc = (void *)data; + + bt_bmc->poll_timer.expires += msecs_to_jiffies(500); + wake_up(&bt_bmc->queue); + add_timer(&bt_bmc->poll_timer); +} + +static irqreturn_t bt_bmc_irq(int irq, void *arg) +{ + struct bt_bmc *bt_bmc = arg; + u32 reg; + + reg = ioread32(bt_bmc->base + BT_CR2); + reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY; + if (!reg) + return IRQ_NONE; + + /* ack pending IRQs */ + iowrite32(reg, bt_bmc->base + BT_CR2); + + wake_up(&bt_bmc->queue); + return IRQ_HANDLED; +} + +static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + u32 reg; + int rc; + + bt_bmc->irq = platform_get_irq(pdev, 0); + if (!bt_bmc->irq) + return -ENODEV; + + rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, + DEVICE_NAME, bt_bmc); + if (rc < 0) { + dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); + bt_bmc->irq = 0; + return rc; + } + + /* + * Configure IRQs on the bmc clearing the H2B and HBUSY bits; + * H2B will be asserted when the bmc has data for us; HBUSY + * will be cleared (along with B2H) when we can write the next + * message to the BT buffer + */ + reg = ioread32(bt_bmc->base + BT_CR1); + reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY; + iowrite32(reg, bt_bmc->base + BT_CR1); + + return 0; +} + +static int bt_bmc_probe(struct platform_device *pdev) +{ + struct bt_bmc *bt_bmc; + struct device *dev; + struct resource *res; + int rc; + + if (!pdev || !pdev->dev.of_node) + return -ENODEV; + + dev = &pdev->dev; + dev_info(dev, "Found bt bmc device\n"); + + bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL); + if (!bt_bmc) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, bt_bmc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Unable to find resources\n"); + return -ENXIO; + } + + bt_bmc->base = devm_ioremap_resource(&pdev->dev, res); + if (!bt_bmc->base) + return -ENOMEM; + + mutex_init(&bt_bmc->mutex); + init_waitqueue_head(&bt_bmc->queue); + + bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR, + bt_bmc->miscdev.name = DEVICE_NAME, + bt_bmc->miscdev.fops = &bt_bmc_fops, + bt_bmc->miscdev.parent = dev; + rc = misc_register(&bt_bmc->miscdev); + if (rc) { + dev_err(dev, "Unable to register misc device\n"); + return rc; + } + + bt_bmc_config_irq(bt_bmc, pdev); + + if (bt_bmc->irq) { + dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); + } else { + dev_info(dev, "No IRQ; using timer\n"); + setup_timer(&bt_bmc->poll_timer, poll_timer, + (unsigned long)bt_bmc); + bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10); + add_timer(&bt_bmc->poll_timer); + } + + iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) | + (BT_IRQ << BT_CR0_IRQ) | + BT_CR0_EN_CLR_SLV_RDP | + BT_CR0_EN_CLR_SLV_WRP | + BT_CR0_ENABLE_IBT, + bt_bmc->base + BT_CR0); + + clr_b_busy(bt_bmc); + + return 0; +} + +static int bt_bmc_remove(struct platform_device *pdev) +{ + struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); + + misc_deregister(&bt_bmc->miscdev); + if (!bt_bmc->irq) + del_timer_sync(&bt_bmc->poll_timer); + return 0; +} + +static const struct of_device_id bt_bmc_match[] = { + { .compatible = "aspeed,ast2400-bt-bmc" }, + { }, +}; + +static struct platform_driver bt_bmc_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = bt_bmc_match, + }, + .probe = bt_bmc_probe, + .remove = bt_bmc_remove, +}; + +module_platform_driver(bt_bmc_driver); + +MODULE_DEVICE_TABLE(of, bt_bmc_match); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alistair Popple "); +MODULE_DESCRIPTION("Linux device interface to the BT interface"); diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 185f8ea2702f..17b12942c67d 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -74,6 +74,7 @@ header-y += bpf_common.h header-y += bpf.h header-y += bpqether.h header-y += bsg.h +header-y += bt-bmc.h header-y += btrfs.h header-y += can.h header-y += capability.h diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h new file mode 100644 index 000000000000..d9ec766a63d0 --- /dev/null +++ b/include/uapi/linux/bt-bmc.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2015-2016, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_BT_BMC_H +#define _UAPI_LINUX_BT_BMC_H + +#include + +#define __BT_BMC_IOCTL_MAGIC 0xb1 +#define BT_BMC_IOCTL_SMS_ATN _IO(__BT_BMC_IOCTL_MAGIC, 0x00) + +#endif /* _UAPI_LINUX_BT_BMC_H */ -- cgit v1.2.3-59-g8ed1b From 1a377a79211a08c5c8a05c0b6dee6d5b13ef4107 Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Wed, 21 Sep 2016 19:35:53 +0930 Subject: ipmi: Fix ioremap error handling in bt-bmc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit devm_ioremap_resource returns ERR_PTR so we can't check for NULL. Signed-off-by: Joel Stanley Acked-by: Cédric Le Goater Signed-off-by: Corey Minyard --- drivers/char/ipmi/bt-bmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 2e880bf0be26..de64bf1f2f4d 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -438,8 +438,8 @@ static int bt_bmc_probe(struct platform_device *pdev) } bt_bmc->base = devm_ioremap_resource(&pdev->dev, res); - if (!bt_bmc->base) - return -ENOMEM; + if (IS_ERR(bt_bmc->base)) + return PTR_ERR(bt_bmc->base); mutex_init(&bt_bmc->mutex); init_waitqueue_head(&bt_bmc->queue); -- cgit v1.2.3-59-g8ed1b From a3e6061bad6292f2d5be3c1c4ccf1fa136517dec Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Wed, 21 Sep 2016 12:24:34 +0200 Subject: ipmi/bt-bmc: add a dependency on ARCH_ASPEED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cédric Le Goater Signed-off-by: Corey Minyard --- drivers/char/ipmi/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 2c234e3e7513..7f816655cbbf 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -78,6 +78,7 @@ config IPMI_POWEROFF endif # IPMI_HANDLER config ASPEED_BT_IPMI_BMC + depends on ARCH_ASPEED tristate "BT IPMI bmc driver" help Provides a driver for the BT (Block Transfer) IPMI interface -- cgit v1.2.3-59-g8ed1b From 6861285ce8f8a9a55aab4775847de6e48b2dc7f2 Mon Sep 17 00:00:00 2001 From: Zang Leigang Date: Thu, 25 Aug 2016 17:39:19 +0800 Subject: scsi: ufs: Data Segment only needed for WRITE DESCRIPTOR Some devices have problems handling a Query UPIU with Data Segment set. Only set it for WRITE DESCRIPTOR commands. [mkp: updated patch description] Signed-off-by: Zang Leigang Reviewed-by: Subhash Jadavani Reviewed-by: Kiwoong Kim Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 37f3c51e9d92..d2ec42512641 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1266,9 +1266,12 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( 0, query->request.query_func, 0, 0); - /* Data segment length */ - ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( - 0, 0, len >> 8, (u8)len); + /* Data segment length only need for WRITE_DESC */ + if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) + ucd_req_ptr->header.dword_2 = + UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); + else + ucd_req_ptr->header.dword_2 = 0; /* Copy the Query Request buffer as is */ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, -- cgit v1.2.3-59-g8ed1b From 73811c942954a1d2470656d1bd9e990e327ac580 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 28 Sep 2016 14:49:42 +0000 Subject: scsi: ufs: Fix error return code in ufshcd_init() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Reviewed-by: Subhash Jadavani Reviewed-by: Kiwoong Kim Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d2ec42512641..05c745663c10 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6503,6 +6503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) if (IS_ERR(hba->devfreq)) { dev_err(hba->dev, "Unable to register with devfreq %ld\n", PTR_ERR(hba->devfreq)); + err = PTR_ERR(hba->devfreq); goto out_remove_scsi_host; } /* Suspend devfreq until the UFS device is detected */ -- cgit v1.2.3-59-g8ed1b From c47946c2dda311216ac11e981809b2d70609258f Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Tue, 27 Sep 2016 21:00:23 +0200 Subject: scsi: g_NCR5380: Remove deprecated __setup Remove deprecated __setup for parsing command line parameters. g_NCR5380.* parameters could be used instead. This might break existing setups with g_NCR5380 built-in (if there are any). But it has to go in order to remove the overrides[] array. Signed-off-by: Ondrej Zary Acked-by: Finn Thain Signed-off-by: Martin K. Petersen --- Documentation/scsi/g_NCR5380.txt | 10 --- drivers/scsi/g_NCR5380.c | 135 --------------------------------------- 2 files changed, 145 deletions(-) (limited to 'drivers') diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt index fd880150aeea..843cbae88ebb 100644 --- a/Documentation/scsi/g_NCR5380.txt +++ b/Documentation/scsi/g_NCR5380.txt @@ -21,16 +21,6 @@ NCR53c400 card, the Trantor T130B in its default configuration: The NCR53c400 does not support DMA but it does have Pseudo-DMA which is supported by the driver. -If the default configuration does not work for you, you can use the kernel -command lines (eg using the lilo append command): - ncr5380=addr,irq - ncr53c400=addr,irq - ncr53c400a=addr,irq - dtc3181e=addr,irq - -The driver does not probe for any addresses or ports other than those in -the OVERRIDE or given to the kernel as above. - This driver provides some information on what it has detected in /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot time. More info to come in the future. diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 516bd6c4f442..7e50b44eb36a 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -56,136 +56,6 @@ static struct override { #define NO_OVERRIDES ARRAY_SIZE(overrides) -#ifndef MODULE - -/** - * internal_setup - handle lilo command string override - * @board: BOARD_* identifier for the board - * @str: unused - * @ints: numeric parameters - * - * Do LILO command line initialization of the overrides array. Display - * errors when needed - * - * Locks: none - */ - -static void __init internal_setup(int board, char *str, int *ints) -{ - static int commandline_current; - switch (board) { - case BOARD_NCR5380: - if (ints[0] != 2 && ints[0] != 3) { - printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n"); - return; - } - break; - case BOARD_NCR53C400: - if (ints[0] != 2) { - printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - case BOARD_NCR53C400A: - if (ints[0] != 2) { - printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - case BOARD_DTC3181E: - if (ints[0] != 2) { - printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n"); - return; - } - break; - } - - if (commandline_current < NO_OVERRIDES) { - overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1]; - overrides[commandline_current].irq = ints[2]; - if (ints[0] == 3) - overrides[commandline_current].dma = ints[3]; - else - overrides[commandline_current].dma = DMA_NONE; - overrides[commandline_current].board = board; - ++commandline_current; - } -} - - -/** - * do_NCR53C80_setup - set up entry point - * @str: unused - * - * Setup function invoked at boot to parse the ncr5380= command - * line. - */ - -static int __init do_NCR5380_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR5380, str, ints); - return 1; -} - -/** - * do_NCR53C400_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the ncr53c400= command - * line. - */ - -static int __init do_NCR53C400_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR53C400, str, ints); - return 1; -} - -/** - * do_NCR53C400A_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the ncr53c400a= command - * line. - */ - -static int __init do_NCR53C400A_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_NCR53C400A, str, ints); - return 1; -} - -/** - * do_DTC3181E_setup - set up entry point - * @str: unused - * @ints: integer parameters from kernel setup code - * - * Setup function invoked at boot to parse the dtc3181e= command - * line. - */ - -static int __init do_DTC3181E_setup(char *str) -{ - int ints[10]; - - get_options(str, ARRAY_SIZE(ints), ints); - internal_setup(BOARD_DTC3181E, str, ints); - return 1; -} - -#endif - #ifndef SCSI_G_NCR5380_MEM /* * Configure I/O address of 53C400A or DTC436 by writing magic numbers @@ -741,8 +611,3 @@ static struct isapnp_device_id id_table[] = { MODULE_DEVICE_TABLE(isapnp, id_table); #endif - -__setup("ncr5380=", do_NCR5380_setup); -__setup("ncr53c400=", do_NCR53C400_setup); -__setup("ncr53c400a=", do_NCR53C400A_setup); -__setup("dtc3181e=", do_DTC3181E_setup); -- cgit v1.2.3-59-g8ed1b From d91f5afe595a3c4bdc1b755225024862c2e917f0 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Tue, 27 Sep 2016 21:00:24 +0200 Subject: scsi: g_NCR5380: Reduce overrides[] from array to struct Remove compile-time card type definition GENERIC_NCR5380_OVERRIDE. Then remove all code iterating the overrides[] array and reduce it to struct card. Signed-off-by: Ondrej Zary Acked-by: Finn Thain Signed-off-by: Martin K. Petersen --- drivers/scsi/g_NCR5380.c | 351 ++++++++++++++++++++++------------------------- 1 file changed, 167 insertions(+), 184 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 7e50b44eb36a..5162de61a92f 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -42,19 +42,12 @@ static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; -static struct override { +static struct card { NCR5380_map_type NCR5380_map_name; int irq; int dma; int board; /* Use NCR53c400, Ricoh, etc. extensions ? */ -} overrides -#ifdef GENERIC_NCR5380_OVERRIDE -[] __initdata = GENERIC_NCR5380_OVERRIDE; -#else -[1] __initdata = { { 0,},}; -#endif - -#define NO_OVERRIDES ARRAY_SIZE(overrides) +} card; #ifndef SCSI_G_NCR5380_MEM /* @@ -85,16 +78,13 @@ static void magic_configure(int idx, u8 irq, u8 magic[]) * @tpnt: the scsi template * * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E - * and DTC436(ISAPnP) controllers. If overrides have been set we use - * them. + * and DTC436(ISAPnP) controllers. * * Locks: none */ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) { - static int current_override; - int count; unsigned int *ports; u8 *magic = NULL; #ifndef SCSI_G_NCR5380_MEM @@ -124,28 +114,25 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) #endif if (ncr_irq) - overrides[0].irq = ncr_irq; + card.irq = ncr_irq; if (ncr_dma) - overrides[0].dma = ncr_dma; + card.dma = ncr_dma; if (ncr_addr) - overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr; + card.NCR5380_map_name = (NCR5380_map_type) ncr_addr; if (ncr_5380) - overrides[0].board = BOARD_NCR5380; + card.board = BOARD_NCR5380; else if (ncr_53c400) - overrides[0].board = BOARD_NCR53C400; + card.board = BOARD_NCR53C400; else if (ncr_53c400a) - overrides[0].board = BOARD_NCR53C400A; + card.board = BOARD_NCR53C400A; else if (dtc_3181e) - overrides[0].board = BOARD_DTC3181E; + card.board = BOARD_DTC3181E; else if (hp_c2502) - overrides[0].board = BOARD_HP_C2502; + card.board = BOARD_HP_C2502; #ifndef SCSI_G_NCR5380_MEM - if (!current_override && isapnp_present()) { + if (isapnp_present()) { struct pnp_dev *dev = NULL; - count = 0; while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) { - if (count >= NO_OVERRIDES) - break; if (pnp_device_attach(dev) < 0) continue; if (pnp_activate_dev(dev) < 0) { @@ -159,202 +146,198 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) continue; } if (pnp_irq_valid(dev, 0)) - overrides[count].irq = pnp_irq(dev, 0); + card.irq = pnp_irq(dev, 0); else - overrides[count].irq = NO_IRQ; + card.irq = NO_IRQ; if (pnp_dma_valid(dev, 0)) - overrides[count].dma = pnp_dma(dev, 0); + card.dma = pnp_dma(dev, 0); else - overrides[count].dma = DMA_NONE; - overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0); - overrides[count].board = BOARD_DTC3181E; - count++; + card.dma = DMA_NONE; + card.NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0); + card.board = BOARD_DTC3181E; + break; } } #endif - for (count = 0; current_override < NO_OVERRIDES; ++current_override) { - if (!(overrides[current_override].NCR5380_map_name)) - continue; + if (!(card.NCR5380_map_name)) + return 0; - ports = NULL; - flags = 0; - switch (overrides[current_override].board) { - case BOARD_NCR5380: - flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; - break; - case BOARD_NCR53C400A: - ports = ncr_53c400a_ports; - magic = ncr_53c400a_magic; - break; - case BOARD_HP_C2502: - ports = ncr_53c400a_ports; - magic = hp_c2502_magic; - break; - case BOARD_DTC3181E: - ports = dtc_3181e_ports; - magic = ncr_53c400a_magic; - break; - } + ports = NULL; + flags = 0; + switch (card.board) { + case BOARD_NCR5380: + flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; + break; + case BOARD_NCR53C400A: + ports = ncr_53c400a_ports; + magic = ncr_53c400a_magic; + break; + case BOARD_HP_C2502: + ports = ncr_53c400a_ports; + magic = hp_c2502_magic; + break; + case BOARD_DTC3181E: + ports = dtc_3181e_ports; + magic = ncr_53c400a_magic; + break; + } #ifndef SCSI_G_NCR5380_MEM - if (ports && magic) { - /* wakeup sequence for the NCR53C400A and DTC3181E */ - - /* Disable the adapter and look for a free io port */ - magic_configure(-1, 0, magic); - - region_size = 16; - - if (overrides[current_override].NCR5380_map_name != PORT_AUTO) - for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) - continue; - if (overrides[current_override].NCR5380_map_name == ports[i]) - break; - release_region(ports[i], region_size); - } else - for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) - continue; - if (inb(ports[i]) == 0xff) - break; - release_region(ports[i], region_size); - } - if (ports[i]) { - /* At this point we have our region reserved */ - magic_configure(i, 0, magic); /* no IRQ yet */ - outb(0xc0, ports[i] + 9); - if (inb(ports[i] + 9) != 0x80) + if (ports && magic) { + /* wakeup sequence for the NCR53C400A and DTC3181E */ + + /* Disable the adapter and look for a free io port */ + magic_configure(-1, 0, magic); + + region_size = 16; + + if (card.NCR5380_map_name != PORT_AUTO) + for (i = 0; ports[i]; i++) { + if (!request_region(ports[i], region_size, "ncr53c80")) continue; - overrides[current_override].NCR5380_map_name = ports[i]; - port_idx = i; - } else - continue; - } - else - { - /* Not a 53C400A style setup - just grab */ - region_size = 8; - if (!request_region(overrides[current_override].NCR5380_map_name, - region_size, "ncr5380")) - continue; - } + if (card.NCR5380_map_name == ports[i]) + break; + release_region(ports[i], region_size); + } else + for (i = 0; ports[i]; i++) { + if (!request_region(ports[i], region_size, "ncr53c80")) + continue; + if (inb(ports[i]) == 0xff) + break; + release_region(ports[i], region_size); + } + if (ports[i]) { + /* At this point we have our region reserved */ + magic_configure(i, 0, magic); /* no IRQ yet */ + outb(0xc0, ports[i] + 9); + if (inb(ports[i] + 9) != 0x80) + return 0; + card.NCR5380_map_name = ports[i]; + port_idx = i; + } else + return 0; + } + else + { + /* Not a 53C400A style setup - just grab */ + region_size = 8; + if (!request_region(card.NCR5380_map_name, + region_size, "ncr5380")) + return 0; + } #else - base = overrides[current_override].NCR5380_map_name; - iomem_size = NCR53C400_region_size; - if (!request_mem_region(base, iomem_size, "ncr5380")) - continue; - iomem = ioremap(base, iomem_size); - if (!iomem) { - release_mem_region(base, iomem_size); - continue; - } + base = card.NCR5380_map_name; + iomem_size = NCR53C400_region_size; + if (!request_mem_region(base, iomem_size, "ncr5380")) + return 0; + iomem = ioremap(base, iomem_size); + if (!iomem) { + release_mem_region(base, iomem_size); + return 0; + } #endif - instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) - goto out_release; - hostdata = shost_priv(instance); + instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); + if (instance == NULL) + goto out_release; + hostdata = shost_priv(instance); #ifndef SCSI_G_NCR5380_MEM - instance->io_port = overrides[current_override].NCR5380_map_name; - instance->n_io_port = region_size; - hostdata->io_width = 1; /* 8-bit PDMA by default */ - - /* - * On NCR53C400 boards, NCR5380 registers are mapped 8 past - * the base address. - */ - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - instance->io_port += 8; - hostdata->c400_ctl_status = 0; - hostdata->c400_blk_cnt = 1; - hostdata->c400_host_buf = 4; - break; - case BOARD_DTC3181E: - hostdata->io_width = 2; /* 16-bit PDMA */ - /* fall through */ - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - hostdata->c400_ctl_status = 9; - hostdata->c400_blk_cnt = 10; - hostdata->c400_host_buf = 8; - break; - } + instance->io_port = card.NCR5380_map_name; + instance->n_io_port = region_size; + hostdata->io_width = 1; /* 8-bit PDMA by default */ + + /* + * On NCR53C400 boards, NCR5380 registers are mapped 8 past + * the base address. + */ + switch (card.board) { + case BOARD_NCR53C400: + instance->io_port += 8; + hostdata->c400_ctl_status = 0; + hostdata->c400_blk_cnt = 1; + hostdata->c400_host_buf = 4; + break; + case BOARD_DTC3181E: + hostdata->io_width = 2; /* 16-bit PDMA */ + /* fall through */ + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + hostdata->c400_ctl_status = 9; + hostdata->c400_blk_cnt = 10; + hostdata->c400_host_buf = 8; + break; + } #else - instance->base = overrides[current_override].NCR5380_map_name; - hostdata->iomem = iomem; - hostdata->iomem_size = iomem_size; - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - hostdata->c400_ctl_status = 0x100; - hostdata->c400_blk_cnt = 0x101; - hostdata->c400_host_buf = 0x104; - break; - case BOARD_DTC3181E: - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); - goto out_unregister; - } + instance->base = card.NCR5380_map_name; + hostdata->iomem = iomem; + hostdata->iomem_size = iomem_size; + switch (card.board) { + case BOARD_NCR53C400: + hostdata->c400_ctl_status = 0x100; + hostdata->c400_blk_cnt = 0x101; + hostdata->c400_host_buf = 0x104; + break; + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + goto out_unregister; + } #endif - if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) - goto out_unregister; + if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) + goto out_unregister; - switch (overrides[current_override].board) { - case BOARD_NCR53C400: - case BOARD_DTC3181E: - case BOARD_NCR53C400A: - case BOARD_HP_C2502: - NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); - } + switch (card.board) { + case BOARD_NCR53C400: + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } - NCR5380_maybe_reset_bus(instance); + NCR5380_maybe_reset_bus(instance); - if (overrides[current_override].irq != IRQ_AUTO) - instance->irq = overrides[current_override].irq; - else - instance->irq = NCR5380_probe_irq(instance, 0xffff); + if (card.irq != IRQ_AUTO) + instance->irq = card.irq; + else + instance->irq = NCR5380_probe_irq(instance, 0xffff); - /* Compatibility with documented NCR5380 kernel parameters */ - if (instance->irq == 255) - instance->irq = NO_IRQ; + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; - if (instance->irq != NO_IRQ) { + if (instance->irq != NO_IRQ) { #ifndef SCSI_G_NCR5380_MEM - /* set IRQ for HP C2502 */ - if (overrides[current_override].board == BOARD_HP_C2502) - magic_configure(port_idx, instance->irq, magic); + /* set IRQ for HP C2502 */ + if (card.board == BOARD_HP_C2502) + magic_configure(port_idx, instance->irq, magic); #endif - if (request_irq(instance->irq, generic_NCR5380_intr, - 0, "NCR5380", instance)) { - printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = NO_IRQ; - } - } - - if (instance->irq == NO_IRQ) { - printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); - printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); + if (request_irq(instance->irq, generic_NCR5380_intr, + 0, "NCR5380", instance)) { + printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); + instance->irq = NO_IRQ; } + } - ++current_override; - ++count; + if (instance->irq == NO_IRQ) { + printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); + printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } - return count; + + return 1; out_unregister: scsi_unregister(instance); out_release: #ifndef SCSI_G_NCR5380_MEM - release_region(overrides[current_override].NCR5380_map_name, region_size); + release_region(card.NCR5380_map_name, region_size); #else iounmap(iomem); release_mem_region(base, iomem_size); #endif - return count; + return 0; } /** -- cgit v1.2.3-59-g8ed1b From a8cfbcaec0c1214ddebf1b7a39aa0df545fbbf90 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Tue, 27 Sep 2016 21:00:25 +0200 Subject: scsi: g_NCR5380: Stop using scsi_module.c Convert g_NCR5380 to use scsi_add_host instead of scsi_module.c Use pnp_driver and isa_driver to manage cards. In order to support multiple cards, new module parameter format is introduced. The old parameters are kept for compatibility. Signed-off-by: Ondrej Zary Acked-by: Finn Thain Signed-off-by: Martin K. Petersen --- Documentation/scsi/g_NCR5380.txt | 24 ++- drivers/scsi/g_NCR5380.c | 335 +++++++++++++++++++++++---------------- drivers/scsi/g_NCR5380.h | 8 - 3 files changed, 215 insertions(+), 152 deletions(-) (limited to 'drivers') diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt index 843cbae88ebb..e2c187947e58 100644 --- a/Documentation/scsi/g_NCR5380.txt +++ b/Documentation/scsi/g_NCR5380.txt @@ -28,6 +28,16 @@ time. More info to come in the future. This driver works as a module. When included as a module, parameters can be passed on the insmod/modprobe command line: + irq=xx[,...] the interrupt(s) + base=xx[,...] the port or base address(es) (for port or memory mapped, resp.) + card=xx[,...] card type(s): + 0 = NCR5380, + 1 = NCR53C400, + 2 = NCR53C400A, + 3 = Domex Technology Corp 3181E (DTC3181E) + 4 = Hewlett Packard C2502 + +These old-style parameters can support only one card: ncr_irq=xx the interrupt ncr_addr=xx the port or base address (for port or memory mapped, resp.) @@ -36,11 +46,19 @@ command line: ncr_53c400a=1 to set up for a NCR53C400A board dtc_3181e=1 to set up for a Domex Technology Corp 3181E board hp_c2502=1 to set up for a Hewlett Packard C2502 board + e.g. -modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 +OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 +NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0 for a port mapped NCR5380 board or -modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 - for a memory mapped NCR53C400 board with interrupts disabled. + +OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 +NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1 + for a memory mapped NCR53C400 board with interrupts disabled or + +NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4 + for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ + and HP C2502 at 0x300 with IRQ 7 (255 should be specified for no or DMA interrupt, 254 to autoprobe for an IRQ line if overridden on the command line.) diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 5162de61a92f..cbf010324c18 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -30,24 +30,41 @@ #include "NCR5380.h" #include #include -#include +#include +#include #include +#define MAX_CARDS 8 + +/* old-style parameters for compatibility */ static int ncr_irq; -static int ncr_dma; static int ncr_addr; static int ncr_5380; static int ncr_53c400; static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; +module_param(ncr_irq, int, 0); +module_param(ncr_addr, int, 0); +module_param(ncr_5380, int, 0); +module_param(ncr_53c400, int, 0); +module_param(ncr_53c400a, int, 0); +module_param(dtc_3181e, int, 0); +module_param(hp_c2502, int, 0); + +static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(irq, "IRQ number(s)"); -static struct card { - NCR5380_map_type NCR5380_map_name; - int irq; - int dma; - int board; /* Use NCR53c400, Ricoh, etc. extensions ? */ -} card; +static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; +module_param_array(base, int, NULL, 0); +MODULE_PARM_DESC(base, "base address(es)"); + +static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +module_param_array(card, int, NULL, 0); +MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); + +MODULE_LICENSE("GPL"); #ifndef SCSI_G_NCR5380_MEM /* @@ -73,17 +90,8 @@ static void magic_configure(int idx, u8 irq, u8 magic[]) } #endif -/** - * generic_NCR5380_detect - look for NCR5380 controllers - * @tpnt: the scsi template - * - * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E - * and DTC436(ISAPnP) controllers. - * - * Locks: none - */ - -static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) +static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, + struct device *pdev, int base, int irq, int board) { unsigned int *ports; u8 *magic = NULL; @@ -92,80 +100,29 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) int port_idx = -1; unsigned long region_size; #endif - static unsigned int __initdata ncr_53c400a_ports[] = { + static unsigned int ncr_53c400a_ports[] = { 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 }; - static unsigned int __initdata dtc_3181e_ports[] = { + static unsigned int dtc_3181e_ports[] = { 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 }; - static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */ + static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ 0x59, 0xb9, 0xc5, 0xae, 0xa6 }; - static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */ + static u8 hp_c2502_magic[] = { /* HP C2502 */ 0x0f, 0x22, 0xf0, 0x20, 0x80 }; - int flags; + int flags, ret; struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; #ifdef SCSI_G_NCR5380_MEM - unsigned long base; void __iomem *iomem; resource_size_t iomem_size; #endif - if (ncr_irq) - card.irq = ncr_irq; - if (ncr_dma) - card.dma = ncr_dma; - if (ncr_addr) - card.NCR5380_map_name = (NCR5380_map_type) ncr_addr; - if (ncr_5380) - card.board = BOARD_NCR5380; - else if (ncr_53c400) - card.board = BOARD_NCR53C400; - else if (ncr_53c400a) - card.board = BOARD_NCR53C400A; - else if (dtc_3181e) - card.board = BOARD_DTC3181E; - else if (hp_c2502) - card.board = BOARD_HP_C2502; -#ifndef SCSI_G_NCR5380_MEM - if (isapnp_present()) { - struct pnp_dev *dev = NULL; - while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) { - if (pnp_device_attach(dev) < 0) - continue; - if (pnp_activate_dev(dev) < 0) { - printk(KERN_ERR "dtc436e probe: activate failed\n"); - pnp_device_detach(dev); - continue; - } - if (!pnp_port_valid(dev, 0)) { - printk(KERN_ERR "dtc436e probe: no valid port\n"); - pnp_device_detach(dev); - continue; - } - if (pnp_irq_valid(dev, 0)) - card.irq = pnp_irq(dev, 0); - else - card.irq = NO_IRQ; - if (pnp_dma_valid(dev, 0)) - card.dma = pnp_dma(dev, 0); - else - card.dma = DMA_NONE; - card.NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0); - card.board = BOARD_DTC3181E; - break; - } - } -#endif - - if (!(card.NCR5380_map_name)) - return 0; - ports = NULL; flags = 0; - switch (card.board) { + switch (board) { case BOARD_NCR5380: flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; break; @@ -191,17 +148,20 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) magic_configure(-1, 0, magic); region_size = 16; - - if (card.NCR5380_map_name != PORT_AUTO) + if (base) for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) - continue; - if (card.NCR5380_map_name == ports[i]) + if (base == ports[i]) { /* index found */ + if (!request_region(ports[i], + region_size, + "ncr53c80")) + return -EBUSY; break; - release_region(ports[i], region_size); - } else + } + } + else for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], region_size, "ncr53c80")) + if (!request_region(ports[i], region_size, + "ncr53c80")) continue; if (inb(ports[i]) == 0xff) break; @@ -211,39 +171,41 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) /* At this point we have our region reserved */ magic_configure(i, 0, magic); /* no IRQ yet */ outb(0xc0, ports[i] + 9); - if (inb(ports[i] + 9) != 0x80) - return 0; - card.NCR5380_map_name = ports[i]; + if (inb(ports[i] + 9) != 0x80) { + ret = -ENODEV; + goto out_release; + } + base = ports[i]; port_idx = i; } else - return 0; + return -EINVAL; } else { - /* Not a 53C400A style setup - just grab */ + /* NCR5380 - no configuration, just grab */ region_size = 8; - if (!request_region(card.NCR5380_map_name, - region_size, "ncr5380")) - return 0; + if (!base || !request_region(base, region_size, "ncr5380")) + return -EBUSY; } #else - base = card.NCR5380_map_name; iomem_size = NCR53C400_region_size; if (!request_mem_region(base, iomem_size, "ncr5380")) - return 0; + return -EBUSY; iomem = ioremap(base, iomem_size); if (!iomem) { release_mem_region(base, iomem_size); - return 0; + return -ENOMEM; } #endif - instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) + instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); + if (instance == NULL) { + ret = -ENOMEM; goto out_release; + } hostdata = shost_priv(instance); #ifndef SCSI_G_NCR5380_MEM - instance->io_port = card.NCR5380_map_name; + instance->io_port = base; instance->n_io_port = region_size; hostdata->io_width = 1; /* 8-bit PDMA by default */ @@ -251,7 +213,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) * On NCR53C400 boards, NCR5380 registers are mapped 8 past * the base address. */ - switch (card.board) { + switch (board) { case BOARD_NCR53C400: instance->io_port += 8; hostdata->c400_ctl_status = 0; @@ -269,10 +231,10 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) break; } #else - instance->base = card.NCR5380_map_name; + instance->base = base; hostdata->iomem = iomem; hostdata->iomem_size = iomem_size; - switch (card.board) { + switch (board) { case BOARD_NCR53C400: hostdata->c400_ctl_status = 0x100; hostdata->c400_blk_cnt = 0x101; @@ -282,14 +244,16 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) case BOARD_NCR53C400A: case BOARD_HP_C2502: pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + ret = -EINVAL; goto out_unregister; } #endif - if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) + ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); + if (ret) goto out_unregister; - switch (card.board) { + switch (board) { case BOARD_NCR53C400: case BOARD_DTC3181E: case BOARD_NCR53C400A: @@ -299,8 +263,8 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) NCR5380_maybe_reset_bus(instance); - if (card.irq != IRQ_AUTO) - instance->irq = card.irq; + if (irq != IRQ_AUTO) + instance->irq = irq; else instance->irq = NCR5380_probe_irq(instance, 0xffff); @@ -311,7 +275,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) if (instance->irq != NO_IRQ) { #ifndef SCSI_G_NCR5380_MEM /* set IRQ for HP C2502 */ - if (card.board == BOARD_HP_C2502) + if (board == BOARD_HP_C2502) magic_configure(port_idx, instance->irq, magic); #endif if (request_irq(instance->irq, generic_NCR5380_intr, @@ -326,31 +290,32 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } - return 1; + ret = scsi_add_host(instance, pdev); + if (ret) + goto out_free_irq; + scsi_scan_host(instance); + dev_set_drvdata(pdev, instance); + return 0; +out_free_irq: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); out_unregister: - scsi_unregister(instance); + scsi_host_put(instance); out_release: #ifndef SCSI_G_NCR5380_MEM - release_region(card.NCR5380_map_name, region_size); + release_region(base, region_size); #else iounmap(iomem); release_mem_region(base, iomem_size); #endif - return 0; + return ret; } -/** - * generic_NCR5380_release_resources - free resources - * @instance: host adapter to clean up - * - * Free the generic interface resources from this adapter. - * - * Locks: none - */ - -static int generic_NCR5380_release_resources(struct Scsi_Host *instance) +static void generic_NCR5380_release_resources(struct Scsi_Host *instance) { + scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); @@ -364,7 +329,7 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance) release_mem_region(instance->base, hostdata->iomem_size); } #endif - return 0; + scsi_host_put(instance); } /** @@ -554,10 +519,9 @@ static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, #include "NCR5380.c" static struct scsi_host_template driver_template = { + .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Generic NCR5380/NCR53C400 SCSI", - .detect = generic_NCR5380_detect, - .release = generic_NCR5380_release_resources, .info = generic_NCR5380_info, .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, @@ -571,26 +535,115 @@ static struct scsi_host_template driver_template = { .max_sectors = 128, }; -#include "scsi_module.c" -module_param(ncr_irq, int, 0); -module_param(ncr_dma, int, 0); -module_param(ncr_addr, int, 0); -module_param(ncr_5380, int, 0); -module_param(ncr_53c400, int, 0); -module_param(ncr_53c400a, int, 0); -module_param(dtc_3181e, int, 0); -module_param(hp_c2502, int, 0); -MODULE_LICENSE("GPL"); +static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) +{ + int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], + irq[ndev], card[ndev]); + if (ret) { + if (base[ndev]) + printk(KERN_WARNING "Card not found at address 0x%03x\n", + base[ndev]); + return 0; + } -#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE) -static struct isapnp_device_id id_table[] = { - { - ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), - 0}, - {0} + return 1; +} + +static int generic_NCR5380_isa_remove(struct device *pdev, + unsigned int ndev) +{ + generic_NCR5380_release_resources(dev_get_drvdata(pdev)); + dev_set_drvdata(pdev, NULL); + return 0; +} + +static struct isa_driver generic_NCR5380_isa_driver = { + .match = generic_NCR5380_isa_match, + .remove = generic_NCR5380_isa_remove, + .driver = { + .name = DRV_MODULE_NAME + }, +}; + +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) +static struct pnp_device_id generic_NCR5380_pnp_ids[] = { + { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); + +static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, + const struct pnp_device_id *id) +{ + int base, irq; + + if (pnp_activate_dev(pdev) < 0) + return -EBUSY; + + base = pnp_port_start(pdev, 0); + irq = pnp_irq(pdev, 0); + + return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, + id->driver_data); +} + +static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) +{ + generic_NCR5380_release_resources(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +static struct pnp_driver generic_NCR5380_pnp_driver = { + .name = DRV_MODULE_NAME, + .id_table = generic_NCR5380_pnp_ids, + .probe = generic_NCR5380_pnp_probe, + .remove = generic_NCR5380_pnp_remove, }; +#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */ -MODULE_DEVICE_TABLE(isapnp, id_table); +static int pnp_registered, isa_registered; + +static int __init generic_NCR5380_init(void) +{ + int ret = 0; + + /* compatibility with old-style parameters */ + if (irq[0] == 0 && base[0] == 0 && card[0] == -1) { + irq[0] = ncr_irq; + base[0] = ncr_addr; + if (ncr_5380) + card[0] = BOARD_NCR5380; + if (ncr_53c400) + card[0] = BOARD_NCR53C400; + if (ncr_53c400a) + card[0] = BOARD_NCR53C400A; + if (dtc_3181e) + card[0] = BOARD_DTC3181E; + if (hp_c2502) + card[0] = BOARD_HP_C2502; + } + +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) + if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) + pnp_registered = 1; #endif + ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS); + if (!ret) + isa_registered = 1; + + return (pnp_registered || isa_registered) ? 0 : ret; +} + +static void __exit generic_NCR5380_exit(void) +{ +#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) + if (pnp_registered) + pnp_unregister_driver(&generic_NCR5380_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&generic_NCR5380_isa_driver); +} + +module_init(generic_NCR5380_init); +module_exit(generic_NCR5380_exit); diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 595177428d76..b175b9234458 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -14,15 +14,9 @@ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H -#define __STRVAL(x) #x -#define STRVAL(x) __STRVAL(x) - #ifndef SCSI_G_NCR5380_MEM #define DRV_MODULE_NAME "g_NCR5380" -#define NCR5380_map_type int -#define NCR5380_map_name port - #define NCR5380_read(reg) \ inb(instance->io_port + (reg)) #define NCR5380_write(reg, value) \ @@ -38,8 +32,6 @@ /* therefore SCSI_G_NCR5380_MEM */ #define DRV_MODULE_NAME "g_NCR5380_mmio" -#define NCR5380_map_type unsigned long -#define NCR5380_map_name base #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR53C400_region_size 0x3a00 -- cgit v1.2.3-59-g8ed1b From 5404fb7cec7473413bc1dd2d114dbe7c291dfb15 Mon Sep 17 00:00:00 2001 From: Joao Pinto Date: Fri, 23 Sep 2016 12:52:52 +0100 Subject: scsi: ufs: Kconfig fix Currently if we have PCI and UFSHCD configured in the kernel, both SCSI_UFS_DWC_TC_PCI and SCSI_UFSHCD_PCI show up, which is not correct. This patch changes the UFS Kconfig to assure hierarchy between the 2 options. Signed-off-by: Joao Pinto Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 47966909286d..e27b4d4e6ae2 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -63,7 +63,7 @@ config SCSI_UFSHCD_PCI config SCSI_UFS_DWC_TC_PCI tristate "DesignWare pci support using a G210 Test Chip" - depends on SCSI_UFSHCD && PCI + depends on SCSI_UFSHCD_PCI ---help--- Synopsys Test Chip is a PHY for prototyping purposes. -- cgit v1.2.3-59-g8ed1b From 1ce788d24268a33513d832d9030ceab93f1c2ce2 Mon Sep 17 00:00:00 2001 From: Tang Yuantian Date: Fri, 30 Sep 2016 14:13:13 +0800 Subject: ahci: qoriq: Revert "ahci: qoriq: Disable NCQ on ls2080a SoC" This reverts commit 640847298e2b7f19 ("ahci: qoriq: Disable NCQ on ls2080a SoC") The erratum has been fixed in ls2080a v2.0 and later soc. In reality, customer will not get any ls2080a v1.0 soc. Neither apply to any products. So reverting this commit won't create any side effect. Blacklisting v2.0 could also be a option, but that needs to check the soc version which is not suitable in the driver. Signed-off-by: Tang Yuantian Signed-off-by: Tejun Heo --- drivers/ata/ahci_qoriq.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 925c4b6a753b..1eba8dff875e 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -136,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = { .hardreset = ahci_qoriq_hardreset, }; -static struct ata_port_info ahci_qoriq_port_info = { +static const struct ata_port_info ahci_qoriq_port_info = { .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, @@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev) if (rc) goto disable_resources; - /* Workaround for ls2080a */ - if (qoriq_priv->type == AHCI_LS2080A) { - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ; - } - rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info, &ahci_qoriq_sht); if (rc) -- cgit v1.2.3-59-g8ed1b From d94655b405ba08838fb3db301dddb02a435ae16c Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 24 Sep 2016 12:02:54 +0000 Subject: ipmi/bt-bmc: remove redundant return value check of platform_get_resource() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unneeded error handling on the result of a call to platform_get_resource() when the value is passed to devm_ioremap_resource(). Signed-off-by: Wei Yongjun Acked-by: Cédric Le Goater Signed-off-by: Corey Minyard --- drivers/char/ipmi/bt-bmc.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index de64bf1f2f4d..b49e61320952 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -432,11 +432,6 @@ static int bt_bmc_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, bt_bmc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "Unable to find resources\n"); - return -ENXIO; - } - bt_bmc->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(bt_bmc->base)) return PTR_ERR(bt_bmc->base); -- cgit v1.2.3-59-g8ed1b From 231147ee77f39f4134935686e9d7e415bdf48149 Mon Sep 17 00:00:00 2001 From: sayli karnik Date: Wed, 28 Sep 2016 21:46:51 +0530 Subject: iio: maxim_thermocouple: Align 16 bit big endian value of raw reads Driver was reporting invalid raw read values for MAX6675 on big endian architectures. MAX6675 buffered mode is not affected, nor is the MAX31855. The driver was losing a 2 byte read value when it used a 32 bit integer buffer to store a 16 bit big endian value. Use big endian types to properly align buffers on big endian architectures. Fixes following sparse endianness warnings: warning: cast to restricted __be16 warning: cast to restricted __be32 Fixes checkpatch issue: CHECK: No space is necessary after a cast Signed-off-by: sayli karnik Fixes: 1f25ca11d84a ("iio: temperature: add support for Maxim thermocouple chips") Signed-off-by: Jonathan Cameron --- drivers/iio/temperature/maxim_thermocouple.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 39dd2026ccc9..066161a4bccd 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, { unsigned int storage_bytes = data->chip->read_size; unsigned int shift = chan->scan_type.shift + (chan->address * 8); - unsigned int buf; + __be16 buf16; + __be32 buf32; int ret; - ret = spi_read(data->spi, (void *) &buf, storage_bytes); - if (ret) - return ret; - switch (storage_bytes) { case 2: - *val = be16_to_cpu(buf); + ret = spi_read(data->spi, (void *)&buf16, storage_bytes); + *val = be16_to_cpu(buf16); break; case 4: - *val = be32_to_cpu(buf); + ret = spi_read(data->spi, (void *)&buf32, storage_bytes); + *val = be32_to_cpu(buf32); break; } + if (ret) + return ret; + /* check to be sure this is a valid reading */ if (*val & data->chip->status_bit) return -EINVAL; -- cgit v1.2.3-59-g8ed1b From bd85f4b37ddf2da22ccf5b29d264b2459b6722df Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Tue, 27 Sep 2016 15:07:12 +0800 Subject: ipmi: fix crash on reading version from proc after unregisted bmc I meet a crash, which could be reproduce: 1) while true; do cat /proc/ipmi/0/version; done 2) modprobe -rv ipmi_si ipmi_msghandler ipmi_devintf [82761.021137] IPMI BT: req2rsp=5 secs retries=2 [82761.034524] ipmi device interface [82761.222218] ipmi_si ipmi_si.0: Found new BMC (man_id: 0x0007db, prod_id: 0x0001, dev_id: 0x01) [82761.222230] ipmi_si ipmi_si.0: IPMI bt interface initialized [82903.922740] BUG: unable to handle kernel NULL pointer dereference at 00000000000002d4 [82903.930952] IP: [] smi_version_proc_show+0x18/0x40 [ipmi_msghandler] [82903.939220] PGD 86693a067 PUD 865304067 PMD 0 [82903.943893] Thread overran stack, or stack corrupted [82903.949034] Oops: 0000 [#1] SMP [82903.983091] Modules linked in: ipmi_si(-) ipmi_msghandler binfmt_misc ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter ... [82904.057285] pps_core scsi_transport_sas dm_mod vfio_iommu_type1 vfio xt_sctp nf_conntrack_proto_sctp nf_nat_proto_sctp nf_nat nf_conntrack sctp libcrc32c [last unloaded: ipmi_devintf] [82904.073169] CPU: 37 PID: 28089 Comm: cat Tainted: GF O ---- ------- 3.10.0-327.28.3.el7.x86_64 #1 [82904.083373] Hardware name: Huawei RH2288H V3/BC11HGSA0, BIOS 3.22 05/16/2016 [82904.090592] task: ffff880101cc2e00 ti: ffff880369c54000 task.ti: ffff880369c54000 [82904.098414] RIP: 0010:[] [] smi_version_proc_show+0x18/0x40 [ipmi_msghandler] [82904.109124] RSP: 0018:ffff880369c57e70 EFLAGS: 00010203 [82904.114608] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000024688470 [82904.121912] RDX: fffffffffffffff4 RSI: ffffffffa0313404 RDI: ffff8808670ce200 [82904.129218] RBP: ffff880369c57e70 R08: 0000000000019720 R09: ffffffff81204a27 [82904.136521] R10: ffff88046f803300 R11: 0000000000000246 R12: ffff880662399700 [82904.143828] R13: 0000000000000001 R14: ffff880369c57f48 R15: ffff8808670ce200 [82904.151128] FS: 00007fb70c9ca740(0000) GS:ffff88086e340000(0000) knlGS:0000000000000000 [82904.159557] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [82904.165473] CR2: 00000000000002d4 CR3: 0000000864c0c000 CR4: 00000000003407e0 [82904.172778] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [82904.180084] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [82904.187385] Stack: [82904.189573] ffff880369c57ee0 ffffffff81204f1a 00000000122a2427 0000000001426000 [82904.197392] ffff8808670ce238 0000000000010000 0000000000000000 0000000000000fff [82904.205198] 00000000122a2427 ffff880862079600 0000000001426000 ffff880369c57f48 [82904.212962] Call Trace: [82904.219667] [] seq_read+0xfa/0x3a0 [82904.224893] [] proc_reg_read+0x3d/0x80 [82904.230468] [] vfs_read+0x9c/0x170 [82904.235689] [] SyS_read+0x7f/0xe0 [82904.240816] [] system_call_fastpath+0x16/0x1b [82904.246991] Code: 30 a0 e8 0c 6f ef e0 5b 5d c3 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 8b 47 78 55 48 c7 c6 04 34 31 a0 48 89 e5 48 8b 40 50 <0f> b6 90 d4 02 00 00 31 c0 89 d1 83 e2 0f c0 e9 04 0f b6 c9 e8 [82904.267710] RIP [] smi_version_proc_show+0x18/0x40 [ipmi_msghandler] [82904.276079] RSP [82904.279734] CR2: 00000000000002d4 [82904.283731] ---[ end trace a69e4328b49dd7c4 ]--- [82904.328118] Kernel panic - not syncing: Fatal exception Reading versin from /proc need bmc device struct available. So in this patch we move add/remove_proc_entries between ipmi_bmc_register and ipmi_bmc_unregister. Cc: Kefeng Wang Signed-off-by: Xie XiuQi Signed-off-by: Corey Minyard --- drivers/char/ipmi/ipmi_msghandler.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index d8619998cfb5..fcdd886819f5 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, intf->curr_channel = IPMI_MAX_CHANNELS; } + rv = ipmi_bmc_register(intf, i); + if (rv == 0) rv = add_proc_entries(intf, i); - rv = ipmi_bmc_register(intf, i); - out: if (rv) { if (intf->proc_dir) @@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf) int intf_num = intf->intf_num; ipmi_user_t user; - ipmi_bmc_unregister(intf); - mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; @@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf) mutex_unlock(&ipmi_interfaces_mutex); remove_proc_entries(intf); + ipmi_bmc_unregister(intf); /* * Call all the watcher interfaces to tell them that -- cgit v1.2.3-59-g8ed1b From 0e97cd4e4632e385b3bf7edcf0c803de0487212a Mon Sep 17 00:00:00 2001 From: lipeng Date: Thu, 29 Sep 2016 18:09:09 +0100 Subject: net: hns: fix port unavailable after hnae_reserve_buffer_map fail When hnae_reserve_buffer_map fail, it will break cycle and some buffer description has no available memory, therefore the port will be unavailable. Signed-off-by: Peng Li Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index d7e1f8c7ae92..32ff27028687 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -747,6 +747,14 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, ndev->last_rx = jiffies; } +static int hns_desc_unused(struct hnae_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { @@ -755,17 +763,21 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int num, bnum, ex_num; #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns_desc_unused(ring); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); rmb(); /* make sure num taken effect before the other data is touched */ recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; recv: while (recv_pkts < budget && recv_bds < num) { /* reuse or realloc buffers */ - if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count); clean_count = 0; + unused_count = hns_desc_unused(ring); } /* poll one pkt */ @@ -789,7 +801,7 @@ recv: /* make all data has been write before submit */ if (recv_pkts < budget) { ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - + ex_num -= unused_count; if (ex_num > clean_count) { num += ex_num - clean_count; rmb(); /*complete read rx ring bd number*/ @@ -799,8 +811,9 @@ recv: out: /* make all data has been write before submit */ - if (clean_count > 0) - hns_nic_alloc_rx_buffers(ring_data, clean_count); + if (clean_count + unused_count > 0) + hns_nic_alloc_rx_buffers(ring_data, + clean_count + unused_count); return recv_pkts; } -- cgit v1.2.3-59-g8ed1b From 6771cbf9446121cb7b64810395391712a03b899d Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:10 +0100 Subject: net: hns: bug fix about setting coalsecs-usecs to 0 When set rx/tx coalesce usecs to 0, the interrupt coalesce will be disabled, but there is a interrupt rate limit which set to 1us, it will cause no interrupt occurs. This patch disable interrupt limit when sets coalsecs usecs to 0, and restores it to 1 in other case. Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 16 ++++++++++++++++ drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 4 ++++ 2 files changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index ef1107777c08..f0ed80d6ef9c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs( "error: coalesce_usecs setting supports 0~1023us\n"); return -EINVAL; } + + if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { + if (timeout == 0) + /* set timeout to 0, Disable gap time */ + dsaf_set_reg_field(rcb_common->io_base, + RCB_INT_GAP_TIME_REG + port_idx * 4, + PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, + 0); + else + /* set timeout non 0, restore gap time to 1 */ + dsaf_set_reg_field(rcb_common->io_base, + RCB_INT_GAP_TIME_REG + port_idx * 4, + PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, + 1); + } + hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 4b8b803822d1..878950a42e6c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -417,6 +417,7 @@ #define RCB_CFG_OVERTIME_REG 0x9300 #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 +#define RCB_INT_GAP_TIME_REG 0x9400 #define RCB_PORT_CFG_OVERTIME_REG 0x9430 #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 @@ -898,6 +899,9 @@ #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 +#define PPE_INT_GAPTIME_B 0 +#define PPE_INT_GAPTIME_M 0x3ff + #define PPE_COMMON_CNT_CLR_CE_B 0 #define PPE_COMMON_CNT_CLR_SNAP_EN_B 1 #define RCB_COM_TSO_MODE_B 0 -- cgit v1.2.3-59-g8ed1b From cee5add45853ed9e6bbb6ad203602be6c42d3e16 Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:11 +0100 Subject: net: hns: add fini_process for v2 napi process This patch adds fini_process for v2, it handles the packets recevied by the hardware in the napi porcess. With this patch, the hardware irq numbers will drop 50% per sec. Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 45 ++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 32ff27028687..e6bfc51f8022 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -823,6 +823,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) struct hnae_ring *ring = ring_data->ring; int num = 0; + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + /* for hardware bug fixed */ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); @@ -834,6 +836,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) } } +static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + int num = 0; + + num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); + + if (num == 0) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); +} + static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, int *bytes, int *pkts) { @@ -935,7 +951,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + int head; + + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + + head = readl_relaxed(ring->io_base + RCB_REG_HEAD); if (head != ring->next_to_clean) { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( @@ -945,6 +965,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) } } +static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + + if (head == ring->next_to_clean) + ring_data->ring->q->handle->dev->ops->toggle_ring_irq( + ring, 0); + else + napi_schedule(&ring_data->napi); +} + static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; @@ -976,10 +1008,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) if (clean_complete >= 0 && clean_complete < budget) { napi_complete(napi); - ring_data->ring->q->handle->dev->ops->toggle_ring_irq( - ring_data->ring, 0); - if (ring_data->fini_process) - ring_data->fini_process(ring_data); + ring_data->fini_process(ring_data); return 0; } @@ -1755,7 +1784,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) rd->queue_index = i; rd->ring = &h->qs[i]->tx_ring; rd->poll_one = hns_nic_tx_poll_one; - rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : + hns_nic_tx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); @@ -1767,7 +1797,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) rd->ring = &h->qs[i - h->q_num]->rx_ring; rd->poll_one = hns_nic_rx_poll_one; rd->ex_process = hns_nic_rx_up_pro; - rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; + rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : + hns_nic_rx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); -- cgit v1.2.3-59-g8ed1b From 34447271dc10302a9aaee273a3ed354aefeaef7c Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:12 +0100 Subject: net: hns: delete repeat read fbd num after while Because we handle the received packets after napi, so delete the checking before submitting. It delete the code of read the fbd number register, which reduces the cpu usages while receiving packets Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e6bfc51f8022..09ed237f4b4c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -760,7 +760,7 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, { struct hnae_ring *ring = ring_data->ring; struct sk_buff *skb; - int num, bnum, ex_num; + int num, bnum; #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int recv_pkts, recv_bds, clean_count, err; int unused_count = hns_desc_unused(ring); @@ -770,7 +770,7 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; -recv: + while (recv_pkts < budget && recv_bds < num) { /* reuse or realloc buffers */ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { @@ -798,17 +798,6 @@ recv: recv_pkts++; } - /* make all data has been write before submit */ - if (recv_pkts < budget) { - ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - ex_num -= unused_count; - if (ex_num > clean_count) { - num += ex_num - clean_count; - rmb(); /*complete read rx ring bd number*/ - goto recv; - } - } - out: /* make all data has been write before submit */ if (clean_count + unused_count > 0) -- cgit v1.2.3-59-g8ed1b From ea870bf93c5cd3635a07a0570cb7fe61436b0c3d Mon Sep 17 00:00:00 2001 From: Kejian Yan Date: Thu, 29 Sep 2016 18:09:13 +0100 Subject: net: hns: fix port not available after testing loopback After running command "ethtool -t eth0", eth0 can not be connected to network. It is caused by the changing the inner loopback register and this register cannot be changed when hns connected to network. The routine of setting this register needs to be removed and using promisc mode to let the packet looped back pass by dsaf mode. Reported-by: Jun He Signed-off-by: Kejian Yan Reviewed-by: Yisen Zhaung Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | 3 --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 10 ---------- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | 1 - drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 7 +++++++ 4 files changed, 7 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e28d960997af..e0f9cdc54f40 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -678,9 +678,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, ret = -EINVAL; } - if (!ret) - hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); - return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 9283bc60bb24..827d8fb72370 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -768,16 +768,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) DSAF_CFG_MIX_MODE_S, !!en); } -void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) -{ - if (AE_IS_VER1(dsaf_dev->dsaf_ver) || - dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG) - return; - - dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, - DSAFV2_SERDES_LBK_EN_B, !!en); -} - /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 35df187e66f1..c494fc52be74 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 *en); int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 en); -void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index ab33487a5321..fa91ce32199e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -342,6 +342,13 @@ static int __lb_setup(struct net_device *ndev, break; } + if (!ret) { + if (loop == MAC_LOOP_NONE) + h->dev->ops->set_promisc_mode( + h, ndev->flags & IFF_PROMISC); + else + h->dev->ops->set_promisc_mode(h, 1); + } return ret; } -- cgit v1.2.3-59-g8ed1b From 58035fd92dcd54c8954c0813f229346b8febd5c0 Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:14 +0100 Subject: net: hns: fix the bug of forwarding table As the sub queue id in the broadcast forwarding table is always set to absolute queue 0 rather than the interface's relative queue 0, this will cause the received broadcast packets loopback to rcb. This patch sets the sub queue id to relative queue 0 of each port. Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | 8 ++++++-- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 13 ++++++++++--- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 2 ++ 3 files changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e0f9cdc54f40..2d0cb609adc3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) int ret; char *mac_addr = (char *)addr; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + u8 port_num; assert(mac_cb); @@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) return ret; } - ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, - mac_addr, true); + ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num); + if (ret) + return ret; + + ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true); if (ret) dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a68eef0ee65f..151fd6e107e5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) *@port_num:port number * */ -static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, - u8 vmid, u8 *port_num) +int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) { + int q_num_per_vf, vf_num_per_port; + int vm_queue_id; u8 tmp_port; if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { @@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } + q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf; + vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn; + + vm_queue_id = vmid * q_num_per_vf + + vf_num_per_port * q_num_per_vf * mac_cb->mac_id; + switch (mac_cb->dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: tmp_port = 0; @@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, case DSAF_MODE_DISABLE_6PORT_2VM: case DSAF_MODE_DISABLE_6PORT_4VM: case DSAF_MODE_DISABLE_6PORT_16VM: - tmp_port = vmid; + tmp_port = vm_queue_id; break; default: dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n", diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 4cbdf14f5c16..d3a1f72ece0e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb); int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status); void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en); +int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, + u8 vmid, u8 *port_num); #endif /* _HNS_DSAF_MAC_H */ -- cgit v1.2.3-59-g8ed1b From 2162a4a1ed8b61afed3b76137aff3ea031d9c6a6 Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:15 +0100 Subject: net: hns: bug fix about broadcast/multicast packets When the dsaf mode receives a broadcast packet, it will filter the packet by comparing the received queue number and destination queue number(get from forwarding table), if they are the same, the packet will be filtered. Otherwise, the packet will be loopback. So this patch select queue 0 to send broadcast and multicast packets. Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 09ed237f4b4c..5494e0e3fe8e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1597,6 +1597,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, return stats; } +static u16 +hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; + struct hns_nic_priv *priv = netdev_priv(ndev); + + /* fix hardware broadcast/multicast packets queue loopback */ + if (!AE_IS_VER1(priv->enet_ver) && + is_multicast_ether_addr(eth_hdr->h_dest)) + return 0; + else + return fallback(ndev, skb); +} + static const struct net_device_ops hns_nic_netdev_ops = { .ndo_open = hns_nic_net_open, .ndo_stop = hns_nic_net_stop, @@ -1612,6 +1627,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_poll_controller = hns_nic_poll_controller, #endif .ndo_set_rx_mode = hns_nic_set_rx_mode, + .ndo_select_queue = hns_nic_select_queue, }; static void hns_nic_update_link_status(struct net_device *netdev) -- cgit v1.2.3-59-g8ed1b From 5d80b3e1cf9900009d4a69a59f1c6b6114b072f4 Mon Sep 17 00:00:00 2001 From: Daode Huang Date: Thu, 29 Sep 2016 18:09:16 +0100 Subject: net: hns: delete redundant broadcast packet filter process The broadcast packets is filtered in the hardware now, so this process is no need in the driver, just delete it. Signed-off-by: Daode Huang Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5494e0e3fe8e..a7abe118b5f0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -574,7 +574,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; - struct ethhdr *eh; unsigned char *va; int bnum, length, i; int pull_len; @@ -600,7 +599,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ring->stats.sw_err_cnt++; return -ENOMEM; } - skb_reset_mac_header(skb); prefetchw(skb->data); length = le16_to_cpu(desc->rx.pkt_len); @@ -682,14 +680,6 @@ out_bnum_err: return -EFAULT; } - /* filter out multicast pkt with the same src mac as this port */ - eh = eth_hdr(skb); - if (unlikely(is_multicast_ether_addr(eh->h_dest) && - ether_addr_equal(ndev->dev_addr, eh->h_source))) { - dev_kfree_skb_any(skb); - return -EFAULT; - } - ring->stats.rx_pkts++; ring->stats.rx_bytes += skb->len; -- cgit v1.2.3-59-g8ed1b From 2eefca272275cb2eac048ddfa3f6fb30d9b24d5e Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:06 +0100 Subject: IB/hns: Register HNS RoCE Driver get_netdev() with IB Core This patch adds get_netdev() function to the IB device. This shall be used to fetch netdev corresponding to the port number. This function would be called by IB core(Generic CM Agent) for example, when the RDMA connection is being established. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_main.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index f64f0dde9a88..39e69c31c5f2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -372,6 +372,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev, return 0; } +static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, + u8 port_num) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct net_device *ndev; + + if (port_num < 1 || port_num > hr_dev->caps.num_ports) + return NULL; + + rcu_read_lock(); + + ndev = hr_dev->iboe.netdevs[port_num - 1]; + if (ndev) + dev_hold(ndev); + + rcu_read_unlock(); + return ndev; +} + static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, struct ib_port_attr *props) { @@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->query_port = hns_roce_query_port; ib_dev->modify_port = hns_roce_modify_port; ib_dev->get_link_layer = hns_roce_get_link_layer; + ib_dev->get_netdev = hns_roce_get_netdev; ib_dev->query_gid = hns_roce_query_gid; ib_dev->query_pkey = hns_roce_query_pkey; ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; -- cgit v1.2.3-59-g8ed1b From 31644665d41140c3961c5213350759edb24d5081 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:07 +0100 Subject: IB/hns: Add & initialize "node_guid" parameter for RDMA CM According to the Infiniband spec, NodeGUID uniquely identifies a node. This must be initialized to some unique value. This patch adds the support to the HNS RoCE driver to fetch the NodeGUID value from DT or ACPI and then use this value to initialize the node_guid parameter of IB device. This value shall be used by RDMA CM. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_main.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index ea735800eb18..e943b9864aa2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -74,6 +74,7 @@ #define MR_TYPE_DMA 0x03 #define PKEY_ID 0xffff +#define GUID_LEN 8 #define NODE_DESC_SIZE 64 #define SERV_TYPE_RC 0 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 39e69c31c5f2..f0700f4ca2c3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -797,6 +797,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) if (IS_ERR(hr_dev->reg_base)) return PTR_ERR(hr_dev->reg_base); + /* read the node_guid of IB device from the DT or ACPI */ + ret = device_property_read_u8_array(dev, "node-guid", + (u8 *)&hr_dev->ib_dev.node_guid, + GUID_LEN); + if (ret) { + dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); + return ret; + } + /* get the RoCE associated ethernet ports or netdevices */ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { if (dev_of_node(dev)) { -- cgit v1.2.3-59-g8ed1b From a74aab6c2fdbd9f9501638f67ebedce49d9a8195 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:08 +0100 Subject: IB/hns: Fix the value of device_cap_flags In the latest IB core version, it has some known issues with memory registration using the local_dma_lkey. Thus RoCE don't expose support for it, and remove device->local_dma_lkey which is introduced to working systems. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index f0700f4ca2c3..a9960ba70671 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_qp = hr_dev->caps.num_qps; props->max_qp_wr = hr_dev->caps.max_wqes; props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | - IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_LOCAL_DMA_LKEY; + IB_DEVICE_RC_RNR_NAK_GEN; props->max_sge = hr_dev->caps.max_sq_sg; props->max_sge_rd = 1; props->max_cq = hr_dev->caps.num_cqs; -- cgit v1.2.3-59-g8ed1b From c24bf895c56157e4d0fba7717ed565f2f3fc25aa Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:09 +0100 Subject: IB/hns: Fix two possible bugs for rdma cm Fix the length of wqe that maybe lead to an error and write the end bytes of QP1C into the register. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 399f5dedaf2d..aaea95c99f02 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -205,8 +205,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, (wr->send_flags & IB_SEND_FENCE ? (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - wqe = (struct hns_roce_wqe_ctrl_seg *)wqe + - sizeof(struct hns_roce_wqe_ctrl_seg); + wqe += sizeof(struct hns_roce_wqe_ctrl_seg); switch (wr->opcode) { case IB_WR_RDMA_READ: @@ -235,8 +234,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; } ctrl->flag |= cpu_to_le32(ps_opcode); - wqe = (struct hns_roce_wqe_raddr_seg *)wqe + - sizeof(struct hns_roce_wqe_raddr_seg); + wqe += sizeof(struct hns_roce_wqe_raddr_seg); dseg = wqe; if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { @@ -253,8 +251,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, memcpy(wqe, ((void *) (uintptr_t) wr->sg_list[i].addr), wr->sg_list[i].length); - wqe = (struct hns_roce_wqe_raddr_seg *) - wqe + wr->sg_list[i].length; + wqe += wr->sg_list[i].length; } ctrl->flag |= HNS_ROCE_WQE_INLINE; } else { @@ -1795,6 +1792,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, writel(context->qp1c_bytes_28, addr + 6); writel(context->qp1c_bytes_32, addr + 7); writel(context->cur_sq_wqe_ba_l, addr + 8); + writel(context->qp1c_bytes_40, addr + 9); } /* Modify QP1C status */ -- cgit v1.2.3-59-g8ed1b From 7716809efe5727d7fbf9cbf5689ef4162d999de5 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:10 +0100 Subject: IB/hns: Add phy_port for computing GSI/QPN This patch mainly adds phy_port to HNS RoCE QP. This shall be used in calculating the GSI QPN for the port. Initally when RDMA is being established, all IB ports share a QPN which later needs to be re-assigned to a particular GSI/QPN and which is per-port. This also fixes a bug in base driver where iboe port was being used instead of phy_port at some places. This values might not be same always. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 30 +++++++++++++++-------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 13 ++++++------- 3 files changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index e943b9864aa2..833742b62567 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -409,6 +409,7 @@ struct hns_roce_qp { u32 buff_size; struct mutex mutex; u8 port; + u8 phy_port; u8 sl; u8 resp_depth; u8 state; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index aaea95c99f02..581e54246465 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -162,7 +162,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->port, + hns_get_gid_index(hr_dev, qp->phy_port, ah->av.gid_index)); roce_set_field(ud_sq_wqe->u32_40, @@ -282,7 +282,7 @@ out: SQ_DOORBELL_U32_4_SQ_HEAD_S, (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->port); + SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); @@ -362,14 +362,14 @@ out: /* SW update GSI rq header */ reg_val = roce_read(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); + QP1C_CFGN_OFFSET * hr_qp->phy_port); roce_set_field(reg_val, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, hr_qp->rq.head); roce_write(to_hr_dev(ibqp->device), ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); + QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); } else { rq_db.u32_4 = 0; rq_db.u32_8 = 0; @@ -1730,7 +1730,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->port); + QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, hr_qp->sq_signal_bits); @@ -1781,7 +1781,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, /* Copy context to QP1C register */ addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); writel(context->qp1c_bytes_4, addr); writel(context->sq_rq_bt_l, addr + 1); @@ -1797,11 +1797,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, /* Modify QP1C status */ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context)); + hr_qp->phy_port * sizeof(*context)); roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(*context), reg_val); + hr_qp->phy_port * sizeof(*context), reg_val); hr_qp->state = new_state; if (new_state == IB_QPS_RESET) { @@ -2184,7 +2184,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2290,7 +2290,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->port); + hr_qp->phy_port); roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); @@ -2398,13 +2398,13 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { /* SW update GSI rq header */ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port); + QP1C_CFGN_OFFSET * hr_qp->phy_port); roce_set_field(reg_val, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, hr_qp->rq.head); roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->port, reg_val); + QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); } else { rq_db.u32_4 = 0; rq_db.u32_8 = 0; @@ -2430,8 +2430,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) - hr_qp->port = (attr->port_num - 1); + if (attr_mask & IB_QP_PORT) { + hr_qp->port = attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + } if (new_state == IB_QPS_RESET && !ibqp->uobject) { hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 645c18d809a5..089da7f39ed0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -617,21 +617,20 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); hr_qp = &hr_sqp->hr_qp; + hr_qp->port = init_attr->port_num - 1; + hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; + hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start + + HNS_ROCE_MAX_PORTS + + hr_dev->iboe.phy_port[hr_qp->port]; ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1, hr_qp); + hr_qp->ibqp.qp_num, hr_qp); if (ret) { dev_err(dev, "Create GSI QP failed!\n"); kfree(hr_sqp); return ERR_PTR(ret); } - hr_qp->port = (init_attr->port_num - 1); - hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start + - hr_dev->caps.num_ports + - init_attr->port_num - 1; break; } default:{ -- cgit v1.2.3-59-g8ed1b From b280db52cc71a0cb135c2ab9adc0592af6440812 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:11 +0100 Subject: IB/hns: Change the logic for allocating uar registers This patch mainly modifies the logic for allocating uar registers. In HiP06 SoC, HW has 8 group of uar registers for kernel and user space application. The uar index is assigned as follows: 0 ------ for kernel 1~7 ------ for user space application Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_pd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 16271b5bd170..4109f74a4ceb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -117,7 +117,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) if (ret == -1) return -ENOMEM; - uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1; + if (uar->index > 0) + uar->index = (uar->index - 1) % + (hr_dev->caps.phy_num_uars - 1) + 1; res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; -- cgit v1.2.3-59-g8ed1b From 509bf0c2da99f0fc22c841de1fa3d813e4932a9f Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:12 +0100 Subject: IB/hns: Fix the bug of rdma cm connecting on user mode Fix bug of modify qp from init to init on user mode. Otherwise, it will oops when rmda cm established. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 43 ++++++++++------------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 1 - 3 files changed, 15 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 833742b62567..e8d273408fe1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -76,6 +76,7 @@ #define PKEY_ID 0xffff #define GUID_LEN 8 #define NODE_DESC_SIZE 64 +#define DB_REG_OFFSET 0x1000 #define SERV_TYPE_RC 0 #define SERV_TYPE_RD 1 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 581e54246465..110efd3aa7cb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1834,12 +1834,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = &hr_dev->pdev->dev; struct hns_roce_qp_context *context; - struct hns_roce_rq_db rq_db; dma_addr_t dma_handle_2 = 0; dma_addr_t dma_handle = 0; uint32_t doorbell[2] = {0}; int rq_pa_start = 0; - u32 reg_val = 0; u64 *mtts_2 = NULL; int ret = -EINVAL; u64 *mtts = NULL; @@ -2395,35 +2393,22 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { /* Memory barrier */ wmb(); - if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* SW update GSI rq header */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port); - roce_set_field(reg_val, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); - } else { - rq_db.u32_4 = 0; - rq_db.u32_8 = 0; - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); + roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, + RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, + RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); + roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, + RQ_DOORBELL_U32_8_CMD_S, 1); + roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); + + if (ibqp->uobject) { + hr_qp->rq.db_reg_l = hr_dev->reg_base + + ROCEE_DB_OTHERS_L_0_REG + + DB_REG_OFFSET * hr_dev->priv_uar.index; } + + hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); } hr_qp->state = new_state; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 089da7f39ed0..dd94e4156a59 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -38,7 +38,6 @@ #include "hns_roce_hem.h" #include "hns_roce_user.h" -#define DB_REG_OFFSET 0x1000 #define SQP_NUM 12 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) -- cgit v1.2.3-59-g8ed1b From ac11125bfd63a87d29838d0a1cd66b92c1997c03 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Thu, 15 Sep 2016 23:48:13 +0100 Subject: IB/hns: Fix two bugs for rdma cm connecting This patch mainly modify the value of HNS_ROCE_SL_SHIFT and delete the lines for assigning for the field of local_enable_e2e_credit in QP1C. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index e8d273408fe1..4d02bcfdf294 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -62,7 +62,7 @@ #define HNS_ROCE_AEQE_OF_VEC_NUM 1 /* 4G/4K = 1M */ -#define HNS_ROCE_SL_SHIFT 29 +#define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 110efd3aa7cb..5ffdf7a3e6e3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1734,9 +1734,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S, - hr_qp->sq_signal_bits); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 1); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, -- cgit v1.2.3-59-g8ed1b From a4be892e834e8a5a38279c2ebca747b5c7a68f75 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:06:54 +0100 Subject: IB/hns: Remove unused parameters in some functions The parameter named collapsed unused in hns_roce_cq_alloc. Also, parameter named doorbell_lock unsed in hns_roce_v1_cq_set_ci. This patch optimize these parameters. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 7 +++---- drivers/infiniband/hw/hns/hns_roce_device.h | 1 - drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 13 ++++--------- drivers/infiniband/hw/hns/hns_roce_main.c | 1 - 4 files changed, 7 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 875597b0e69c..10fd2093ca18 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, struct hns_roce_uar *hr_uar, - struct hns_roce_cq *hr_cq, int vector, - int collapsed) + struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cq_table *cq_table = NULL; @@ -338,8 +337,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, } /* Allocate cq index, fill cq_context */ - ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, - uar, hr_cq, vector, 0); + ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, + hr_cq, vector); if (ret) { dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); goto err_mtt; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 4d02bcfdf294..db974e400818 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -536,7 +536,6 @@ struct hns_roce_dev { struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t cq_db_lock; spinlock_t bt_cmd_lock; struct hns_roce_ib_iboe iboe; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 5ffdf7a3e6e3..48c08627faf1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1189,9 +1189,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) return get_sw_cqe(hr_cq, hr_cq->cons_index); } -void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index, - spinlock_t *doorbell_lock) - +void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) { u32 doorbell[2]; @@ -1251,8 +1249,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, */ wmb(); - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock); + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); } } @@ -1588,10 +1585,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - if (npolled) { - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, - &to_hr_dev(ibcq->device)->cq_db_lock); - } + if (npolled) + hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); spin_unlock_irqrestore(&hr_cq->lock, flags); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index a9960ba70671..4b44998a039e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -951,7 +951,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->cq_db_lock); spin_lock_init(&hr_dev->bt_cmd_lock); ret = hns_roce_init_uar_table(hr_dev); -- cgit v1.2.3-59-g8ed1b From 622b85f9f7fbe1255649894d03e9e077b1a97e6c Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:06:55 +0100 Subject: IB/hns: Remove parameters of resize cq In old version of RoCE, it doesn't support to resize cq. So, we remove parameters related to resize cq. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 3 --- drivers/infiniband/hw/hns/hns_roce_device.h | 9 --------- 2 files changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 10fd2093ca18..a52306f9c1de 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -299,10 +299,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, cq_entries = roundup_pow_of_two((unsigned int)cq_entries); hr_cq->ib_cq.cqe = cq_entries - 1; - mutex_init(&hr_cq->resize_mutex); spin_lock_init(&hr_cq->lock); - hr_cq->hr_resize_buf = NULL; - hr_cq->resize_umem = NULL; if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index db974e400818..15bc229041c7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -284,20 +284,11 @@ struct hns_roce_cq_buf { struct hns_roce_mtt hr_mtt; }; -struct hns_roce_cq_resize { - struct hns_roce_cq_buf hr_buf; - int cqe; -}; - struct hns_roce_cq { struct ib_cq ib_cq; struct hns_roce_cq_buf hr_buf; - /* pointer to store information after resize*/ - struct hns_roce_cq_resize *hr_resize_buf; spinlock_t lock; - struct mutex resize_mutex; struct ib_umem *umem; - struct ib_umem *resize_umem; void (*comp)(struct hns_roce_cq *); void (*event)(struct hns_roce_cq *, enum hns_roce_event); -- cgit v1.2.3-59-g8ed1b From ed3e6d01138eb26634abb519bc66e0b0dc473d0e Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:06:56 +0100 Subject: IB/hns: Fix bug of using uninit refcount and free In current version, it uses uninitialized parameters named refcount and free in hns_roce_cq_event. This patch initializes these parameter in cq alloc and add correspond process in cq free. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index a52306f9c1de..3095f06099e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -152,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, hr_cq->cons_index = 0; hr_cq->uar = hr_uar; + atomic_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_radix: @@ -191,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev, /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); + /* wait for all interrupt processed */ + if (atomic_dec_and_test(&hr_cq->refcount)) + complete(&hr_cq->free); + wait_for_completion(&hr_cq->free); + spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); -- cgit v1.2.3-59-g8ed1b From a598c6f4c5a838f3141a739aa0f6799c5bf0afa9 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:06:57 +0100 Subject: IB/hns: Simplify function of pd alloc and qp alloc Hns_roce_pd_alloc and hns_roce_reserve_range_qp use unnecessary transformation of parameters. This patch simplify these two functions. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_pd.c | 14 +------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 10 +--------- 2 files changed, 2 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 4109f74a4ceb..0cd6132b75f0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -35,19 +35,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) { - struct device *dev = &hr_dev->pdev->dev; - unsigned long pd_number; - int ret = 0; - - ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); - if (ret == -1) { - dev_err(dev, "alloc pdn from pdbitmap failed\n"); - return -ENOMEM; - } - - *pdn = pd_number; - - return 0; + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); } static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index dd94e4156a59..73de1d3dbb32 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -112,16 +112,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, int align, unsigned long *base) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - int ret = 0; - unsigned long qpn; - - ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn); - if (ret == -1) - return -ENOMEM; - *base = qpn; - - return 0; + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) -- cgit v1.2.3-59-g8ed1b From 76445703e54326f9039d6302b78ad6e8dab17777 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:06:58 +0100 Subject: IB/hns: Remove unused parameter named qp_type This patch removes the qp_type parameter in hns_roce_set_kernel_sq_size(). Signed-off-by: Lijun Ou Signed-off-by: Ping Zhang Reviewed-by: Wei Hu Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 73de1d3dbb32..177f48f081ab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -336,12 +336,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, - enum ib_qp_type type, struct hns_roce_qp *hr_qp) { struct device *dev = &hr_dev->pdev->dev; u32 max_cnt; - (void)type; if (cap->max_send_wr > hr_dev->caps.max_wqes || cap->max_send_sge > hr_dev->caps.max_sq_sg || @@ -467,7 +465,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, /* Set SQ size */ ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - init_attr->qp_type, hr_qp); + hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); goto err_out; -- cgit v1.2.3-59-g8ed1b From 97f0e39fa51406e7d73b37e635c04c85829ce9ab Mon Sep 17 00:00:00 2001 From: "Wei Hu (Xavier)" Date: Tue, 20 Sep 2016 17:06:59 +0100 Subject: IB/hns: Fix bug of clear hem In hip06, there's no interface to release hem memory. So, hardware can't identify whether hem memory released or not. If all context in a hem memory released, the related hem memory will be released by driver and reused by others. But, hardware don't know that this memory can't be used already. In order to fix this bug, hns roce driver reserved 128K memory for each type of hem(QPC/CQC/MTPT). While unmap hem memory, hns roce driver will write base address of reserved memory according to hem type. Signed-off-by: Wei Hu (Xavier) Signed-off-by: Dongdong Huang(Donald) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 + drivers/infiniband/hw/hns/hns_roce_hem.c | 76 +-------------- drivers/infiniband/hw/hns/hns_roce_hem.h | 4 + drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 140 ++++++++++++++++++++++++++++ drivers/infiniband/hw/hns/hns_roce_hw_v1.h | 9 ++ 5 files changed, 157 insertions(+), 74 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 15bc229041c7..3058599d0404 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -506,6 +506,8 @@ struct hns_roce_hw { void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle, int nent, u32 vector); + int (*clear_hem)(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj); int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index d53d64362389..250d8f280390 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -36,14 +36,10 @@ #include "hns_roce_hem.h" #include "hns_roce_common.h" -#define HW_SYNC_TIMEOUT_MSECS 500 -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 - #define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17) #define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17) #define DMA_ADDR_T_SHIFT 12 -#define BT_CMD_SYNC_SHIFT 31 #define BT_BA_SHIFT 32 struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, @@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, - unsigned long obj) -{ - struct device *dev = &hr_dev->pdev->dev; - unsigned long end = 0; - unsigned long flags; - void __iomem *bt_cmd; - uint32_t bt_cmd_val[2]; - u32 bt_cmd_h_val = 0; - int ret = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); - break; - case HEM_TYPE_MTPT: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_MTPT); - break; - case HEM_TYPE_CQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); - break; - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, - HEM_TYPE_SRQC); - break; - default: - return ret; - } - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - msleep(HW_SYNC_SLEEP_TIME_INTERVAL); - } - - bt_cmd_val[0] = 0; - bt_cmd_val[1] = bt_cmd_h_val; - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return ret; -} - int hns_roce_table_get(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { @@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, if (--table->hem[i]->refcount == 0) { /* Clear HEM base address */ - if (hns_roce_clear_hem(hr_dev, table, obj)) + if (hr_dev->hw->clear_hem(hr_dev, table, obj)) dev_warn(dev, "Clear HEM base address failed.\n"); hns_roce_free_hem(hr_dev, table->hem[i]); @@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, for (i = 0; i < table->num_hem; ++i) if (table->hem[i]) { - if (hns_roce_clear_hem(hr_dev, table, + if (hr_dev->hw->clear_hem(hr_dev, table, i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)) dev_err(dev, "Clear HEM base address failed.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index ad6617588fba..435748858252 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -34,6 +34,10 @@ #ifndef _HNS_ROCE_HEM_H #define _HNS_ROCE_HEM_H +#define HW_SYNC_TIMEOUT_MSECS 500 +#define HW_SYNC_SLEEP_TIME_INTERVAL 20 +#define BT_CMD_SYNC_SHIFT 31 + enum { /* MAP HEM(Hardware Entry Memory) */ HEM_TYPE_QPC = 0, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 48c08627faf1..d767ebebd27e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -786,6 +786,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) } } +static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + int ret; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.qpc_buf.buf) + return -ENOMEM; + + priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, + GFP_KERNEL); + if (!priv->bt_table.mtpt_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_mtpt_buf; + } + + priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, + HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, + GFP_KERNEL); + if (!priv->bt_table.cqc_buf.buf) { + ret = -ENOMEM; + goto err_failed_alloc_cqc_buf; + } + + return 0; + +err_failed_alloc_cqc_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + +err_failed_alloc_mtpt_buf: + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); + + return ret; +} + +static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); + + dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, + priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); +} + /** * hns_roce_v1_reset - reset RoCE * @hr_dev: RoCE device struct pointer @@ -941,8 +1001,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev) hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); + ret = hns_roce_bt_init(hr_dev); + if (ret) { + dev_err(dev, "bt init failed!\n"); + goto error_failed_bt_init; + } + return 0; +error_failed_bt_init: + hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); + hns_roce_raq_free(hr_dev); + error_failed_raq_init: hns_roce_db_free(hr_dev); return ret; @@ -950,6 +1020,7 @@ error_failed_raq_init: void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) { + hns_roce_bt_free(hr_dev); hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_raq_free(hr_dev); hns_roce_db_free(hr_dev); @@ -1596,6 +1667,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return ret; } +int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, int obj) +{ + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_v1_priv *priv; + unsigned long end = 0, flags = 0; + uint32_t bt_cmd_val[2] = {0}; + void __iomem *bt_cmd; + u64 bt_ba = 0; + + priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; + + switch (table->type) { + case HEM_TYPE_QPC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); + bt_ba = priv->bt_table.qpc_buf.map >> 12; + break; + case HEM_TYPE_MTPT: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); + bt_ba = priv->bt_table.mtpt_buf.map >> 12; + break; + case HEM_TYPE_CQC: + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); + bt_ba = priv->bt_table.cqc_buf.map >> 12; + break; + case HEM_TYPE_SRQC: + dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); + return -EINVAL; + default: + return 0; + } + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); + roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); + + spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); + + bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; + + end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; + while (1) { + if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { + if (!(time_before(jiffies, end))) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, + flags); + return -EBUSY; + } + } else { + break; + } + msleep(HW_SYNC_SLEEP_TIME_INTERVAL); + } + + bt_cmd_val[0] = (uint32_t)bt_ba; + roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, + ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); + hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); + + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); + + return 0; +} + static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, enum hns_roce_qp_state cur_state, @@ -2766,6 +2905,7 @@ struct hns_roce_hw hns_roce_hw_v1 = { .set_mtu = hns_roce_v1_set_mtu, .write_mtpt = hns_roce_v1_write_mtpt, .write_cqc = hns_roce_v1_write_cqc, + .clear_hem = hns_roce_v1_clear_hem, .modify_qp = hns_roce_v1_modify_qp, .query_qp = hns_roce_v1_query_qp, .destroy_qp = hns_roce_v1_destroy_qp, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 316b592b1636..539b0a3b92b0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -102,6 +102,8 @@ #define HNS_ROCE_V1_EXT_ODB_ALFUL \ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) +#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) + #define HNS_ROCE_ODB_POLL_MODE 0 #define HNS_ROCE_SDB_NORMAL_MODE 0 @@ -971,9 +973,16 @@ struct hns_roce_db_table { struct hns_roce_ext_db *ext_db; }; +struct hns_roce_bt_table { + struct hns_roce_buf_list qpc_buf; + struct hns_roce_buf_list mtpt_buf; + struct hns_roce_buf_list cqc_buf; +}; + struct hns_roce_v1_priv { struct hns_roce_db_table db_table; struct hns_roce_raq_table raq_table; + struct hns_roce_bt_table bt_table; }; int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); -- cgit v1.2.3-59-g8ed1b From 1ca5b253adbf7da514475f24151fb700062795c0 Mon Sep 17 00:00:00 2001 From: "Wei Hu (Xavier)" Date: Tue, 20 Sep 2016 17:07:00 +0100 Subject: IB/hns: Delete the sqp_start from the structure hns_roce_caps This patch deleted the sqp_start from the structure hns_roce_caps, and modified the calculation of the qp number. Signed-off-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 - drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 - drivers/infiniband/hw/hns/hns_roce_qp.c | 10 ++++------ 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 3058599d0404..341731553a60 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -465,7 +465,6 @@ struct hns_roce_caps { u32 max_rq_desc_sz; /* 64 */ int max_qp_init_rdma; int max_qp_dest_rdma; - int sqp_start; int num_cqs; int max_cqes; int reserved_cqs; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index d767ebebd27e..65c3192c2f00 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -936,7 +936,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->sqp_start = 0; caps->reserved_lkey = 0; caps->reserved_pds = 0; caps->reserved_mrws = 1; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 177f48f081ab..dd1b2140d8db 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -38,7 +38,7 @@ #include "hns_roce_hem.h" #include "hns_roce_user.h" -#define SQP_NUM 12 +#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { @@ -246,7 +246,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports)) + if (base_qpn < SQP_NUM) return; hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); @@ -608,8 +608,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, hr_qp = &hr_sqp->hr_qp; hr_qp->port = init_attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start + - HNS_ROCE_MAX_PORTS + + hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + hr_dev->iboe.phy_port[hr_qp->port]; ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, @@ -825,8 +824,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) /* A port include two SQP, six port total 12 */ ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, - hr_dev->caps.num_qps - 1, - hr_dev->caps.sqp_start + SQP_NUM, + hr_dev->caps.num_qps - 1, SQP_NUM, reserved_from_top); if (ret) { dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", -- cgit v1.2.3-59-g8ed1b From c4a193d3a84766ce01d2cfdf3afaf0252db1ddf4 Mon Sep 17 00:00:00 2001 From: "Wei Hu (Xavier)" Date: Tue, 20 Sep 2016 17:07:01 +0100 Subject: IB/hns: Optimize code of aeq and ceq interrupt handle and fix the bug of qpn This patch optimized the codes of aeq and ceq interrupt handle and fixed the bug in the calculation of qpn. For the special qp(GSI or SMI), calculated the qp number according to physical port and the qpn reported in the event of async event queue. Signed-off-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_eq.c | 146 ++++++++++++++++---------------- drivers/infiniband/hw/hns/hns_roce_eq.h | 4 + 2 files changed, 75 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c index 98af7fecf2f1..21e21b03cfb5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.c +++ b/drivers/infiniband/hw/hns/hns_roce_eq.c @@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, default: break; } - - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); } static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, @@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, { struct device *dev = &hr_dev->pdev->dev; - qpn = roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); dev_warn(dev, "Local Access Violation Work Queue Error.\n"); switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { @@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, default: break; } +} + +static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + int phy_port; + int qpn; + + qpn = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); + phy_port = roce_get_field(aeqe->event.qp_event.qp, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, + HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); + if (qpn <= 1) + qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + dev_warn(dev, "Invalid Req Local Work Queue Error.\n" + "QP %d, phy_port %d.\n", qpn, phy_port); + break; + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); + break; + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + break; + default: + break; + } + + hns_roce_qp_event(hr_dev, qpn, event_type); +} + +static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev, + struct hns_roce_aeqe *aeqe, + int event_type) +{ + struct device *dev = &hr_dev->pdev->dev; + u32 cqn; + + cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, + HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + dev_warn(dev, "CQ 0x%x access err.\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + dev_warn(dev, "CQ 0x%x overflow\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: + dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); + break; + default: + break; + } - hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_event(hr_dev, cqn, event_type); } static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev, @@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) struct device *dev = &hr_dev->pdev->dev; struct hns_roce_aeqe *aeqe; int aeqes_found = 0; - int qpn = 0; + int event_type; while ((aeqe = next_aeqe_sw(eq))) { dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, @@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) /* Memory barrier */ rmb(); - switch (roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) { + event_type = roce_get_field(aeqe->asyn, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, + HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); + switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: dev_warn(dev, "PATH MIG not supported\n"); break; @@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "PATH MIG failed\n"); break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "qpn = 0x%lx\n", - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)); - hns_roce_qp_event(hr_dev, - roce_get_field(aeqe->event.qp_event.qp, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); + hns_roce_qp_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: @@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) dev_warn(dev, "SRQ not support!\n"); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%lx access err.\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%lx overflow\n", - roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - break; case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ ID invalid.\n"); - hns_roce_cq_event(hr_dev, - le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); + hns_roce_cq_err_handle(hr_dev, aeqe, event_type); break; case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: dev_warn(dev, "port change.\n"); @@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); break; default: - dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n", - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S), - eq->eqn, eq->cons_index); + dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n", + event_type, eq->eqn, eq->cons_index); break; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h index fe4388191a3c..c6d212d12e03 100644 --- a/drivers/infiniband/hw/hns/hns_roce_eq.h +++ b/drivers/infiniband/hw/hns/hns_roce_eq.h @@ -107,6 +107,10 @@ struct hns_roce_aeqe { #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \ (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S) +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 +#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \ + (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S) + #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \ (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S) -- cgit v1.2.3-59-g8ed1b From 49fdf6bb0a172a8fe631d89a421857336efc4382 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:02 +0100 Subject: IB/hns: Modify the init of iboe lock This lock will be used in query port interface, and will be called while IB device was registered to OFED framework/IB Core. So, the lock of iboe must be initiated before IB device was registered. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4b44998a039e..764e35a54457 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -602,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) struct device *dev = &hr_dev->pdev->dev; iboe = &hr_dev->iboe; + spin_lock_init(&iboe->lock); ib_dev = &hr_dev->ib_dev; strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX); @@ -686,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) goto error_failed_setup_mtu_gids; } - spin_lock_init(&iboe->lock); - iboe->nb.notifier_call = hns_roce_netdev_event; ret = register_netdevice_notifier(&iboe->nb); if (ret) { -- cgit v1.2.3-59-g8ed1b From 1cd11064da4b0bca992ca0f9594adbf53df25879 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:03 +0100 Subject: IB/hns: Fix bug of memory leakage for registering user mr While the page size attribute of umem is illegal, we should release umem that get by ib_umem_get interface. Also, we should return a non-zero value while pbl number is wrong. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_mr.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 59f5e2be046b..fb87883ead34 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) { dev_err(dev, "Just support 4K page size but is 0x%x now!\n", mr->umem->page_size); + ret = -EINVAL; + goto err_umem; } if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n", length); + ret = -EINVAL; goto err_umem; } -- cgit v1.2.3-59-g8ed1b From 07182fa77b3a64962aca12e8c0006646316bd26d Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:04 +0100 Subject: IB/hns: Return bad wr while post send failed While post failed, hns roce should return the wr failed to user. We omitted this while qp type is wrong and fixed it. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 65c3192c2f00..1205d1af4db0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, u32 ind = 0; int ret = 0; - spin_lock_irqsave(&qp->sq.lock, flags); + if (unlikely(ibqp->qp_type != IB_QPT_GSI && + ibqp->qp_type != IB_QPT_RC)) { + dev_err(dev, "un-supported QP type\n"); + *bad_wr = NULL; + return -EOPNOTSUPP; + } + spin_lock_irqsave(&qp->sq.lock, flags); ind = qp->sq_next_wqe; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { @@ -263,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, HNS_ROCE_WQE_SGE_NUM_BIT); } ind++; - } else { - dev_dbg(dev, "unSupported QP type\n"); - break; } } -- cgit v1.2.3-59-g8ed1b From c6c3bfea82be60c3d60144b980ea25f1252a6eb5 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:05 +0100 Subject: IB/hns: The Ack timeout need a lower limit value The Ack timeout of qpc need a lower limit value,otherwise the read performance will be very lower. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 1205d1af4db0..5110a1864284 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -2410,10 +2410,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, attr->retry_cnt); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); + if (attr->timeout < 0x12) { + dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", + attr->timeout); + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + 0x12); + } else { + roce_set_field(context->qpc_bytes_156, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, + QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, + attr->timeout); + } roce_set_field(context->qpc_bytes_156, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, -- cgit v1.2.3-59-g8ed1b From 7c7a4ea145a5dc914402678d0299a81c3f5b38b6 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:06 +0100 Subject: IB/hns: Some items of qpc need to take user param Some items of qpc need to take user param when modified qp state. Signed-off-by: Lijun Ou Signed-off-by: Dongdong Huang(Donald) Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 5110a1864284..bc06004ae67d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -2396,10 +2396,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, + attr->retry_cnt); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0); + QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, + attr->rnr_retry); roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_LSN_M, QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); -- cgit v1.2.3-59-g8ed1b From cb814642e7e3e67f6a7c24b14531e95ccdb7c3cc Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:07 +0100 Subject: IB/hns: Validate mtu when modified qp The mtu should be validated when modify qp,so we check it. Signed-off-by: Lijun Ou Signed-off-by: Peter Chen Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_qp.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index dd1b2140d8db..6a38909929cd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,6 +32,7 @@ */ #include +#include #include #include "hns_roce_common.h" #include "hns_roce_device.h" @@ -657,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct device *dev = &hr_dev->pdev->dev; int ret = -EINVAL; int p; + enum ib_mtu active_mtu; mutex_lock(&hr_qp->mutex); @@ -687,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } + if (attr_mask & IB_QP_PATH_MTU) { + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); + + if (attr->path_mtu > IB_MTU_2048 || + attr->path_mtu < IB_MTU_256 || + attr->path_mtu > active_mtu) { + dev_err(dev, "attr path_mtu(%d)invalid while modify qp", + attr->path_mtu); + goto out; + } + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", -- cgit v1.2.3-59-g8ed1b From 24f0c9c0ff0955c5849dfb40403a21c8c11b9d2a Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:08 +0100 Subject: IB/hns: Cq has not been freed Cq has not been freed when fail to ib_copy_to_udata, so need to free it. Signed-off-by: Lijun Ou Signed-off-by: Peter Chen Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 3095f06099e6..097365932b09 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -357,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if (context) { if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) { ret = -EFAULT; - goto err_mtt; + goto err_cqc; } } return &hr_cq->ib_cq; +err_cqc: + hns_roce_free_cq(hr_dev, hr_cq); + err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (context) -- cgit v1.2.3-59-g8ed1b From 1fad5fab782c18a90865f61bff1bde3a0dba1678 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:09 +0100 Subject: IB/hns: Update the rq head when modify qp state The rq head in qpc was zero will miss the rq wqes which have be sent, so here we should take the real value. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index bc06004ae67d..2070e9ee0474 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -2248,7 +2248,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0); + QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, + hr_qp->rq.head); roce_set_field(context->qpc_bytes_68, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); -- cgit v1.2.3-59-g8ed1b From 3e413872b1832e1cf6de037ec01b2c4c992f0136 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:10 +0100 Subject: IB/hns: Fix the bug when platform_get_resource() exec fail This patch mainly fixes the bug with platform_get_resource(). It should return NULL when platform_get_resource() exec fail. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_pd.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 0cd6132b75f0..05db7d59812a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -110,6 +110,10 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) (hr_dev->caps.phy_num_uars - 1) + 1; res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); + return -EINVAL; + } uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; return 0; -- cgit v1.2.3-59-g8ed1b From deb17f6f826708e46af538048b2af508215c3dc5 Mon Sep 17 00:00:00 2001 From: Lijun Ou Date: Tue, 20 Sep 2016 17:07:11 +0100 Subject: IB/hns: Delete the redundant lines in hns_roce_v1_m_qp() It doesn't need to assign for the filed of qp state in qpc separately when qp happen to migrate state which supported in RoCE engine v1. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 2070e9ee0474..d52eeda2ef3c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -2387,11 +2387,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_bit(context->qpc_bytes_140, QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - roce_set_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); @@ -2498,21 +2493,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, 0); - } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || + } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, - attr->qp_state); - - } else { - dev_err(dev, "not support this modify\n"); + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) { + dev_err(dev, "not support this status migration\n"); goto out; } -- cgit v1.2.3-59-g8ed1b From 1bdab400af5954932714e68ab3df0187a92916cb Mon Sep 17 00:00:00 2001 From: Salil Date: Tue, 20 Sep 2016 17:07:12 +0100 Subject: IB/hns: Fix for removal of redundant code This patch removes the redundant code lines present in the functions get_send_wqe() and get_recv_wqe(). This also fixes the error in calculating the SQ WQE. Signed-off-by: Lijun Ou Reviewed-by: Wei Hu (Xavier) Signed-off-by: Salil Mehta Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_qp.c | 18 ------------------ 2 files changed, 2 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index d52eeda2ef3c..71232e5fabf6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1552,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, /* SQ conrespond to CQE */ sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S)); + CQE_BYTE_4_WQE_INDEX_S)& + ((*cur_qp)->sq.wqe_cnt-1)); switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { case HNS_ROCE_WQE_OPCODE_SEND: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 6a38909929cd..e86dd8d06777 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -784,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n", - n, hr_qp->rq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); } void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) { - struct ib_qp *ibqp = &hr_qp->ibqp; - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - - if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) { - dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n", - n, hr_qp->sq.wqe_cnt); - return NULL; - } - return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); } -- cgit v1.2.3-59-g8ed1b From ad3cce0641161301b3702bf2d69c0109fa92e8ec Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Mon, 3 Oct 2016 16:45:55 -0700 Subject: Input: elantech - fix Lenovo version typo Signed-off-by: Marcos Paulo de Souza Reviewed-by: Ulrik De Bie Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elantech.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 08e252a42480..1b51961f90cb 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1134,7 +1134,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * System76 Pangolin 0x250f01 ? 2 hw buttons * (*) + 3 trackpoint buttons * (**) + 0 trackpoint buttons - * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps + * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps */ static void elantech_set_buttonpad_prop(struct psmouse *psmouse) { -- cgit v1.2.3-59-g8ed1b From f9a703a54d16ba2470391c4b12236ee56591d50c Mon Sep 17 00:00:00 2001 From: Matti Kurkela Date: Mon, 3 Oct 2016 16:48:17 -0700 Subject: Input: elantech - force needed quirks on Fujitsu H760 Just like Fujitsu CELSIUS H730, the H760 also has an Elantech touchpad with the same quirks. Without this patch, the touchpad is useless out-of-the-box as the mouse pointer won't move. This patch makes the driver aware of both the crc_enabled=1 requirement and the middle button, making the touchpad fully functional out-of-the-box. Signed-off-by: Matti Kurkela Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elantech.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 1b51961f90cb..37b52f150664 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), }, }, + { + /* Fujitsu H760 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), + }, + }, #endif { } }; @@ -1502,6 +1509,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), }, }, + { + /* Fujitsu H760 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), + }, + }, { /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ .matches = { -- cgit v1.2.3-59-g8ed1b From 39a3b6355329397c30441b62c940bb9cecec0448 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 11 Sep 2016 10:59:57 +0200 Subject: WATCHDOG: txx9wdt: Add missing clock (un)prepare calls for CCF While the custom minimal TXx9 clock implementation doesn't need or use clock (un)prepare calls (they are dummies if !CONFIG_HAVE_CLK_PREPARE), they are mandatory when using the Common Clock Framework. Hence add them, to prepare for the advent of CCF. Signed-off-by: Geert Uytterhoeven Reviewed-by: Guenter Roeck Cc: Atsushi Nemoto Cc: Wim Van Sebroeck Cc: linux-clk@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-watchdog@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14238/ Signed-off-by: Ralf Baechle --- drivers/watchdog/txx9wdt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index c2da880292bc..6f7a9deb27d0 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c @@ -112,7 +112,7 @@ static int __init txx9wdt_probe(struct platform_device *dev) txx9_imclk = NULL; goto exit; } - ret = clk_enable(txx9_imclk); + ret = clk_prepare_enable(txx9_imclk); if (ret) { clk_put(txx9_imclk); txx9_imclk = NULL; @@ -144,7 +144,7 @@ static int __init txx9wdt_probe(struct platform_device *dev) return 0; exit: if (txx9_imclk) { - clk_disable(txx9_imclk); + clk_disable_unprepare(txx9_imclk); clk_put(txx9_imclk); } return ret; @@ -153,7 +153,7 @@ exit: static int __exit txx9wdt_remove(struct platform_device *dev) { watchdog_unregister_device(&txx9wdt); - clk_disable(txx9_imclk); + clk_disable_unprepare(txx9_imclk); clk_put(txx9_imclk); return 0; } -- cgit v1.2.3-59-g8ed1b From 72bc8c75eaf72aa0e45652d09f1b80dd5346797e Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Wed, 7 Sep 2016 10:45:20 +0100 Subject: cpuidle: cpuidle-cps: Enable use with MIPSr6 CPUs. This patch enables the MIPS CPS driver for MIPSr6 CPUs. Signed-off-by: Matt Redfearn Reviewed-by: Paul Burton Reviewed-by: Daniel Lezcano Cc: Rafael J. Wysocki Cc: linux-mips@linux-mips.org Cc: linux-pm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14228/ Signed-off-by: Ralf Baechle --- drivers/cpuidle/Kconfig.mips | 2 +- drivers/cpuidle/cpuidle-cps.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips index 4102be01d06a..512ee37b374b 100644 --- a/drivers/cpuidle/Kconfig.mips +++ b/drivers/cpuidle/Kconfig.mips @@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE bool "CPU Idle driver for MIPS CPS platforms" depends on CPU_IDLE && MIPS_CPS depends on SYS_SUPPORTS_MIPS_CPS - select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT + select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6 select GENERIC_CLOCKEVENTS_BROADCAST if SMP select MIPS_CPS_PM default y diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c index 1adb6980b707..926ba9871c62 100644 --- a/drivers/cpuidle/cpuidle-cps.c +++ b/drivers/cpuidle/cpuidle-cps.c @@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void) core = cpu_data[cpu].core; device = &per_cpu(cpuidle_dev, cpu); device->cpu = cpu; -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]); #endif -- cgit v1.2.3-59-g8ed1b From 4777ac220c430173e297237b896932ed5fd8aaf3 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 4 Oct 2016 11:33:25 -0700 Subject: Input: ALPS - add touchstick support for SS5 hardware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add touchstick support for the so-called SS5 hardware, which uses a variant of the SS4 protocol. Reviewed-by: Pali Rohár Tested-by: Michal Hocko Signed-off-by: Ben Gamari Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 65 ++++++++++++++++++++++++++++++++++++++-------- drivers/input/mouse/alps.h | 2 ++ 2 files changed, 56 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 936f07a4e35f..8e15783d2932 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1156,15 +1156,28 @@ static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte) { unsigned char pkt_id = SS4_PACKET_ID_IDLE; - if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 && - (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) { - pkt_id = SS4_PACKET_ID_IDLE; - } else if (!(byte[3] & 0x10)) { - pkt_id = SS4_PACKET_ID_ONE; - } else if (!(byte[3] & 0x20)) { + switch (byte[3] & 0x30) { + case 0x00: + if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 && + (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && + byte[5] == 0x00) { + pkt_id = SS4_PACKET_ID_IDLE; + } else { + pkt_id = SS4_PACKET_ID_ONE; + } + break; + case 0x10: + /* two-finger finger positions */ pkt_id = SS4_PACKET_ID_TWO; - } else { + break; + case 0x20: + /* stick pointer */ + pkt_id = SS4_PACKET_ID_STICK; + break; + case 0x30: + /* third and fourth finger positions */ pkt_id = SS4_PACKET_ID_MULTI; + break; } return pkt_id; @@ -1246,16 +1259,38 @@ static int alps_decode_ss4_v2(struct alps_fields *f, } break; + case SS4_PACKET_ID_STICK: + if (!(priv->flags & ALPS_DUALPOINT)) { + psmouse_warn(psmouse, + "Rejected trackstick packet from non DualPoint device"); + } else { + int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f)); + int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f)); + + input_report_rel(priv->dev2, REL_X, x); + input_report_rel(priv->dev2, REL_Y, -y); + } + break; + case SS4_PACKET_ID_IDLE: default: memset(f, 0, sizeof(struct alps_fields)); break; } - f->left = !!(SS4_BTN_V2(p) & 0x01); - if (!(priv->flags & ALPS_BUTTONPAD)) { - f->right = !!(SS4_BTN_V2(p) & 0x02); - f->middle = !!(SS4_BTN_V2(p) & 0x04); + /* handle buttons */ + if (pkt_id == SS4_PACKET_ID_STICK) { + f->ts_left = !!(SS4_BTN_V2(p) & 0x01); + if (!(priv->flags & ALPS_BUTTONPAD)) { + f->ts_right = !!(SS4_BTN_V2(p) & 0x02); + f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); + } + } else { + f->left = !!(SS4_BTN_V2(p) & 0x01); + if (!(priv->flags & ALPS_BUTTONPAD)) { + f->right = !!(SS4_BTN_V2(p) & 0x02); + f->middle = !!(SS4_BTN_V2(p) & 0x04); + } } return 0; @@ -1266,6 +1301,7 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; + struct input_dev *dev2 = priv->dev2; struct alps_fields *f = &priv->f; memset(f, 0, sizeof(struct alps_fields)); @@ -1311,6 +1347,13 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse) input_report_abs(dev, ABS_PRESSURE, f->pressure); input_sync(dev); + + if (priv->flags & ALPS_DUALPOINT) { + input_report_key(dev2, BTN_LEFT, f->ts_left); + input_report_key(dev2, BTN_RIGHT, f->ts_right); + input_report_key(dev2, BTN_MIDDLE, f->ts_middle); + input_sync(dev2); + } } static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse) diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index d37f814dc447..b9417e2d7ad3 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -37,12 +37,14 @@ * or there's button activities. * SS4_PACKET_ID_TWO: There's two or more fingers on touchpad * SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad + * SS4_PACKET_ID_STICK: A stick pointer packet */ enum SS4_PACKET_ID { SS4_PACKET_ID_IDLE = 0, SS4_PACKET_ID_ONE, SS4_PACKET_ID_TWO, SS4_PACKET_ID_MULTI, + SS4_PACKET_ID_STICK, }; #define SS4_COUNT_PER_ELECTRODE 256 -- cgit v1.2.3-59-g8ed1b From a831776323e7c532ef9c88d0d62512ae15d78415 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 4 Oct 2016 11:40:44 -0700 Subject: Input: ALPS - handle 0-pressure 1F events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While a button is held SS5 hardware will give us single-finger packets with x, y, and pressure equal to zero. This causes annoying jumps in pointer position if a touch is released while the button is held. Handle this by claiming zero contacts to ensure that no position events are provided to the user. Reviewed-by: Pali Rohár Tested-by: Michal Hocko Signed-off-by: Ben Gamari Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 8e15783d2932..a76e5c6fce54 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1198,7 +1198,13 @@ static int alps_decode_ss4_v2(struct alps_fields *f, f->mt[0].x = SS4_1F_X_V2(p); f->mt[0].y = SS4_1F_Y_V2(p); f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f; - f->fingers = 1; + /* + * When a button is held the device will give us events + * with x, y, and pressure of 0. This causes annoying jumps + * if a touch is released while the button is held. + * Handle this by claiming zero contacts. + */ + f->fingers = f->pressure > 0 ? 1 : 0; f->first_mp = 0; f->is_mp = 0; break; -- cgit v1.2.3-59-g8ed1b From 7ad8a1067dfc3bf74ad2daef013ea4f3df1841a7 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 4 Oct 2016 11:41:58 -0700 Subject: Input: ALPS - allow touchsticks to report pressure The SS5 hardware can report this. Tested-by: Michal Hocko Signed-off-by: Ben Gamari Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index a76e5c6fce54..bb0b7230cf3b 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -103,6 +103,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { 6-byte ALPS packet */ #define ALPS_STICK_BITS 0x100 /* separate stick button bits */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ +#define ALPS_DUALPOINT_WITH_PRESSURE 0x400 /* device can report trackpoint pressure */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */ @@ -1272,9 +1273,11 @@ static int alps_decode_ss4_v2(struct alps_fields *f, } else { int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f)); int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f)); + int pressure = (s8)(p[4] & 0x7f); input_report_rel(priv->dev2, REL_X, x); input_report_rel(priv->dev2, REL_Y, -y); + input_report_abs(priv->dev2, ABS_PRESSURE, pressure); } break; @@ -2998,6 +3001,10 @@ int alps_init(struct psmouse *psmouse) input_set_capability(dev2, EV_REL, REL_X); input_set_capability(dev2, EV_REL, REL_Y); + if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) { + input_set_capability(dev2, EV_ABS, ABS_PRESSURE); + input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0); + } input_set_capability(dev2, EV_KEY, BTN_LEFT); input_set_capability(dev2, EV_KEY, BTN_RIGHT); input_set_capability(dev2, EV_KEY, BTN_MIDDLE); -- cgit v1.2.3-59-g8ed1b From aeaa881f9b17823028b464893b89c42db797b717 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 4 Oct 2016 11:43:30 -0700 Subject: Input: ALPS - set DualPoint flag for 74 03 28 devices Here we introduce logic in alps_identify to set the ALPS_DUALPOINT flag for touchpad hardware responding to E7 report with 73 03 28, as is found in the Dell Latitude E7470. Tested-by: Michal Hocko Signed-off-by: Ben Gamari Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index bb0b7230cf3b..6d7de9bfed9a 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2747,6 +2747,10 @@ static int alps_set_protocol(struct psmouse *psmouse, if (alps_set_defaults_ss4_v2(psmouse, priv)) return -EIO; + if (priv->fw_ver[1] == 0x1) + priv->flags |= ALPS_DUALPOINT | + ALPS_DUALPOINT_WITH_PRESSURE; + break; } @@ -2819,6 +2823,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x14 && ec[1] == 0x02) { protocol = &alps_v8_protocol_data; + } else if (e7[0] == 0x73 && e7[1] == 0x03 && + e7[2] == 0x28 && ec[1] == 0x01) { + protocol = &alps_v8_protocol_data; } else { psmouse_dbg(psmouse, "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); -- cgit v1.2.3-59-g8ed1b From bbc2ceeb3220e54c7574f0b5e3a252fd9a62cf8a Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 4 Oct 2016 11:48:55 -0700 Subject: Input: synaptics-rmi4 - fix error handling in SPI transport driver Instantiating the rmi4 SPI transport driver without an interrupt assigned caused the driver to fail to load, but it does not clean up its transport device registration. Result may be a crash at a later time, for example when rebooting the system. Fixes: 8d99758dee31 ("Input: synaptics-rmi4 - add SPI transport driver") Signed-off-by: Guenter Roeck Signed-off-by: Dmitry Torokhov --- drivers/input/rmi4/rmi_spi.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index 55bd1b34970c..4ebef607e214 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi, } #endif +static void rmi_spi_unregister_transport(void *data) +{ + struct rmi_spi_xport *rmi_spi = data; + + rmi_unregister_transport_device(&rmi_spi->xport); +} + static int rmi_spi_probe(struct spi_device *spi) { struct rmi_spi_xport *rmi_spi; @@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi) dev_err(&spi->dev, "failed to register transport.\n"); return retval; } + retval = devm_add_action_or_reset(&spi->dev, + rmi_spi_unregister_transport, + rmi_spi); + if (retval) + return retval; retval = rmi_spi_init_irq(spi); if (retval < 0) @@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi) return 0; } -static int rmi_spi_remove(struct spi_device *spi) -{ - struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); - - rmi_unregister_transport_device(&rmi_spi->xport); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int rmi_spi_suspend(struct device *dev) { @@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = { }, .id_table = rmi_id, .probe = rmi_spi_probe, - .remove = rmi_spi_remove, }; module_spi_driver(rmi_spi_driver); -- cgit v1.2.3-59-g8ed1b From 261d7794c49b9a3bb5115c5ffc452e00f969bf43 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 4 Oct 2016 11:50:54 -0700 Subject: Input: synaptics-rmi4 - fix error handling in I2C transport driver Instantiating the rmi4 I2C transport driver without interrupts assigned (for example using manual i2c instantiation from the command line) caused the driver to fail to load, but it does not clean up its regulator or transport device registrations. Result is a crash at a later time, for example when rebooting the system. Fixes: 946c8432aab0 ("Input: synaptics-rmi4 - support regulator supplies") Cc: Bjorn Andersson Signed-off-by: Guenter Roeck Signed-off-by: Dmitry Torokhov --- drivers/input/rmi4/rmi_i2c.c | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c index 6f2e0e4f0296..1ebc2c1debae 100644 --- a/drivers/input/rmi4/rmi_i2c.c +++ b/drivers/input/rmi4/rmi_i2c.c @@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = { MODULE_DEVICE_TABLE(of, rmi_i2c_of_match); #endif +static void rmi_i2c_regulator_bulk_disable(void *data) +{ + struct rmi_i2c_xport *rmi_i2c = data; + + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); +} + +static void rmi_i2c_unregister_transport(void *data) +{ + struct rmi_i2c_xport *rmi_i2c = data; + + rmi_unregister_transport_device(&rmi_i2c->xport); +} + static int rmi_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client, if (retval < 0) return retval; + retval = devm_add_action_or_reset(&client->dev, + rmi_i2c_regulator_bulk_disable, + rmi_i2c); + if (retval) + return retval; + of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms", &rmi_i2c->startup_delay); @@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client, client->addr); return retval; } + retval = devm_add_action_or_reset(&client->dev, + rmi_i2c_unregister_transport, + rmi_i2c); + if (retval) + return retval; retval = rmi_i2c_init_irq(client); if (retval < 0) @@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client, return 0; } -static int rmi_i2c_remove(struct i2c_client *client) -{ - struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); - - rmi_unregister_transport_device(&rmi_i2c->xport); - regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), - rmi_i2c->supplies); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int rmi_i2c_suspend(struct device *dev) { @@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = { }, .id_table = rmi_id, .probe = rmi_i2c_probe, - .remove = rmi_i2c_remove, }; module_i2c_driver(rmi_i2c_driver); -- cgit v1.2.3-59-g8ed1b From ea908ba8f73446dfbf87ff71f7cadb1994d2c5bb Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Oct 2016 22:19:57 +0100 Subject: drm/armada: fix clock counts The DPMS handling wrt clock enables/disables was incorrect: we could end up decrementing the clock count multiple times if we transition via several low-power DPMS states, resulting in a kernel warning. Fix this by only testing to see whether we are entering or exiting the DPMS off state. Signed-off-by: Russell King --- drivers/gpu/drm/armada/armada_crtc.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2f58e9e2a59c..a51f8cbcfe26 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); - if (dcrtc->dpms != dpms) { - dcrtc->dpms = dpms; - if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms)) - WARN_ON(clk_prepare_enable(dcrtc->clk)); - armada_drm_crtc_update(dcrtc); - if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms)) - clk_disable_unprepare(dcrtc->clk); + if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) { if (dpms_blanked(dpms)) armada_drm_vblank_off(dcrtc); - else + else if (!IS_ERR(dcrtc->clk)) + WARN_ON(clk_prepare_enable(dcrtc->clk)); + dcrtc->dpms = dpms; + armada_drm_crtc_update(dcrtc); + if (!dpms_blanked(dpms)) drm_crtc_vblank_on(&dcrtc->crtc); + else if (!IS_ERR(dcrtc->clk)) + clk_disable_unprepare(dcrtc->clk); + } else if (dcrtc->dpms != dpms) { + dcrtc->dpms = dpms; } } -- cgit v1.2.3-59-g8ed1b From 63c8d90ca967c7e87006dbd363a8724ba31f1b57 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 26 Aug 2016 15:17:39 +0100 Subject: USB: host: ehci-sead3: Remove SEAD-3 EHCI code The SEAD-3 board is now probing its EHCI controller using the generic EHCI driver & its generic-ehci device tree binding. Remove the unused SEAD-3 specific EHCI code. Signed-off-by: Paul Burton Acked-by: Alan Stern Cc: Greg Kroah-Hartman Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: linux-usb@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14052/ Signed-off-by: Ralf Baechle --- drivers/usb/host/ehci-hcd.c | 5 -- drivers/usb/host/ehci-sead3.c | 185 ------------------------------------------ 2 files changed, 190 deletions(-) delete mode 100644 drivers/usb/host/ehci-sead3.c (limited to 'drivers') diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 1e5f529d51a2..063064801ceb 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1308,11 +1308,6 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_mv_driver #endif -#ifdef CONFIG_MIPS_SEAD3 -#include "ehci-sead3.c" -#define PLATFORM_DRIVER ehci_hcd_sead3_driver -#endif - static int __init ehci_hcd_init(void) { int retval = 0; diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c deleted file mode 100644 index 3d86cc2ffe68..000000000000 --- a/drivers/usb/host/ehci-sead3.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * MIPS CI13320A EHCI Host Controller driver - * Based on "ehci-au1xxx.c" by K.Boge - * - * Copyright (C) 2012 MIPS Technologies, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include - -static int ehci_sead3_setup(struct usb_hcd *hcd) -{ - int ret; - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - - ehci->caps = hcd->regs + 0x100; - -#ifdef __BIG_ENDIAN - ehci->big_endian_mmio = 1; - ehci->big_endian_desc = 1; -#endif - - ret = ehci_setup(hcd); - if (ret) - return ret; - - ehci->need_io_watchdog = 0; - - /* Set burst length to 16 words. */ - ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]); - - return ret; -} - -const struct hc_driver ehci_sead3_hc_driver = { - .description = hcd_name, - .product_desc = "SEAD-3 EHCI", - .hcd_priv_size = sizeof(struct ehci_hcd), - - /* - * generic hardware linkage - */ - .irq = ehci_irq, - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, - - /* - * basic lifecycle operations - * - */ - .reset = ehci_sead3_setup, - .start = ehci_run, - .stop = ehci_stop, - .shutdown = ehci_shutdown, - - /* - * managing i/o requests and associated device resources - */ - .urb_enqueue = ehci_urb_enqueue, - .urb_dequeue = ehci_urb_dequeue, - .endpoint_disable = ehci_endpoint_disable, - .endpoint_reset = ehci_endpoint_reset, - - /* - * scheduling support - */ - .get_frame_number = ehci_get_frame, - - /* - * root hub support - */ - .hub_status_data = ehci_hub_status_data, - .hub_control = ehci_hub_control, - .bus_suspend = ehci_bus_suspend, - .bus_resume = ehci_bus_resume, - .relinquish_port = ehci_relinquish_port, - .port_handed_over = ehci_port_handed_over, - - .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, -}; - -static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev) -{ - struct usb_hcd *hcd; - struct resource *res; - int ret; - - if (usb_disabled()) - return -ENODEV; - - if (pdev->resource[1].flags != IORESOURCE_IRQ) { - pr_debug("resource[1] is not IORESOURCE_IRQ"); - return -ENOMEM; - } - hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3"); - if (!hcd) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hcd->regs)) { - ret = PTR_ERR(hcd->regs); - goto err1; - } - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); - - /* Root hub has integrated TT. */ - hcd->has_tt = 1; - - ret = usb_add_hcd(hcd, pdev->resource[1].start, - IRQF_SHARED); - if (ret == 0) { - platform_set_drvdata(pdev, hcd); - device_wakeup_enable(hcd->self.controller); - return ret; - } - -err1: - usb_put_hcd(hcd); - return ret; -} - -static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev) -{ - struct usb_hcd *hcd = platform_get_drvdata(pdev); - - usb_remove_hcd(hcd); - usb_put_hcd(hcd); - - return 0; -} - -#ifdef CONFIG_PM -static int ehci_hcd_sead3_drv_suspend(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - bool do_wakeup = device_may_wakeup(dev); - - return ehci_suspend(hcd, do_wakeup); -} - -static int ehci_hcd_sead3_drv_resume(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - - ehci_resume(hcd, false); - return 0; -} - -static const struct dev_pm_ops sead3_ehci_pmops = { - .suspend = ehci_hcd_sead3_drv_suspend, - .resume = ehci_hcd_sead3_drv_resume, -}; - -#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops) - -#else -#define SEAD3_EHCI_PMOPS NULL -#endif - -static struct platform_driver ehci_hcd_sead3_driver = { - .probe = ehci_hcd_sead3_drv_probe, - .remove = ehci_hcd_sead3_drv_remove, - .shutdown = usb_hcd_platform_shutdown, - .driver = { - .name = "sead3-ehci", - .pm = SEAD3_EHCI_PMOPS, - } -}; - -MODULE_ALIAS("platform:sead3-ehci"); -- cgit v1.2.3-59-g8ed1b From f38b7c2547537a8219d273e20eb3b88e6fc6b764 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 28 Sep 2016 14:38:36 +0000 Subject: wlcore: sdio: drop kfree for memory allocated with devm_kzalloc It's not necessary to free memory allocated with devm_kzalloc and using kfree leads to a double free. Fixes: d776fc86b82f ("wlcore: sdio: Populate config firmware data") Signed-off-by: Wei Yongjun Acked-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sdio.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index a6e94b1a12cb..47fe7f96a242 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func) pm_runtime_get_noresume(&func->dev); platform_device_unregister(glue->core); - kfree(glue); } #ifdef CONFIG_PM -- cgit v1.2.3-59-g8ed1b From eb42d760db2ea8ab16f10e320a7a8bfff331307f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 26 Aug 2016 15:17:47 +0100 Subject: FBDEV: cobalt_lcdfb: Drop SEAD3 support The SEAD3 board no longer uses the cobalt_lcdfb driver, so remove the SEAD3-specific code from it. Signed-off-by: Paul Burton Acked-by: Tomi Valkeinen Cc: Ondrej Zary Cc: Arnd Bergmann Cc: Robert Jarzmik Cc: Maciej W. Rozycki Cc: Ezequiel Garcia Cc: Tomi Valkeinen Cc: Jean-Christophe Plagniol-Villard Cc: Geert Uytterhoeven Cc: Simon Horman Cc: linux-mips@linux-mips.org Cc: linux-fbdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14060/ Signed-off-by: Ralf Baechle --- drivers/video/fbdev/Kconfig | 2 +- drivers/video/fbdev/cobalt_lcdfb.c | 42 -------------------------------------- 2 files changed, 1 insertion(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 88b008fb8a4e..914bfb2f176b 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2183,7 +2183,7 @@ config FB_GOLDFISH config FB_COBALT tristate "Cobalt server LCD frame buffer support" - depends on FB && (MIPS_COBALT || MIPS_SEAD3) + depends on FB && MIPS_COBALT config FB_SH7760 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 07675d6f323e..2d3b691f3fc4 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -63,7 +63,6 @@ #define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK) #define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE) -#ifdef CONFIG_MIPS_COBALT static inline void lcd_write_control(struct fb_info *info, u8 control) { writel((u32)control << 24, info->screen_base); @@ -83,47 +82,6 @@ static inline u8 lcd_read_data(struct fb_info *info) { return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24; } -#else - -#define LCD_CTL 0x00 -#define LCD_DATA 0x08 -#define CPLD_STATUS 0x10 -#define CPLD_DATA 0x18 - -static inline void cpld_wait(struct fb_info *info) -{ - do { - } while (readl(info->screen_base + CPLD_STATUS) & 1); -} - -static inline void lcd_write_control(struct fb_info *info, u8 control) -{ - cpld_wait(info); - writel(control, info->screen_base + LCD_CTL); -} - -static inline u8 lcd_read_control(struct fb_info *info) -{ - cpld_wait(info); - readl(info->screen_base + LCD_CTL); - cpld_wait(info); - return readl(info->screen_base + CPLD_DATA) & 0xff; -} - -static inline void lcd_write_data(struct fb_info *info, u8 data) -{ - cpld_wait(info); - writel(data, info->screen_base + LCD_DATA); -} - -static inline u8 lcd_read_data(struct fb_info *info) -{ - cpld_wait(info); - readl(info->screen_base + LCD_DATA); - cpld_wait(info); - return readl(info->screen_base + CPLD_DATA) & 0xff; -} -#endif static int lcd_busy_wait(struct fb_info *info) { -- cgit v1.2.3-59-g8ed1b From 0cad855fbd083ee5fd0584a47c2aaa7dca936fd4 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 26 Aug 2016 15:17:49 +0100 Subject: auxdisplay: img-ascii-lcd: driver for simple ASCII LCD displays Add a driver for simple ASCII LCD displays found on the MIPS Boston, Malta & SEAD3 development boards. The Boston display is an independent memory mapped device with a simple memory mapped 8 byte register space containing the 8 ASCII characters to display. The Malta display is exposed as part of the Malta board registers, and provides 8 registers each of which corresponds to one of the ASCII characters to display. The SEAD3 display is slightly more complex, exposing an interface to an S6A0069 LCD controller via registers provided by the boards CPLD. However although the displays differ in their register interface, we require similar functionality on each board so abstracting away the differences within a single driver allows us to share a significant amount of code & ensure consistent behaviour. The driver displays the Linux kernel version as the default message, but allows the message to be changed via a character device. Messages longer then the number of characters that the display can show will scroll. This provides different behaviour to the existing LCD display code for the MIPS Malta or MIPS SEAD3 platforms in the following ways: - The default string to display is not "LINUX ON MALTA" or "LINUX ON SEAD3" but "Linux" followed by the version number of the kernel (UTS_RELEASE). - Since that string tends to be significantly longer it scrolls twice as fast, moving every 500ms rather than every 1s. - The LCD won't be updated until the driver is probed, so it doesn't provide the early "LINUX" string. Signed-off-by: Paul Burton Cc: Mauro Carvalho Chehab Cc: Miguel Ojeda Sandonis Cc: Guenter Roeck Cc: David S. Miller Cc: Greg Kroah-Hartman Cc: Geert Uytterhoeven Cc: Andrew Morton Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14062/ Signed-off-by: Ralf Baechle --- MAINTAINERS | 1 + drivers/auxdisplay/Kconfig | 9 + drivers/auxdisplay/Makefile | 1 + drivers/auxdisplay/img-ascii-lcd.c | 443 +++++++++++++++++++++++++++++++++++++ 4 files changed, 454 insertions(+) create mode 100644 drivers/auxdisplay/img-ascii-lcd.c (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 7b24f9f30f47..1f1790031dbc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5968,6 +5968,7 @@ IMGTEC ASCII LCD DRIVER M: Paul Burton S: Maintained F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt +F: drivers/auxdisplay/img-ascii-lcd.c INA209 HARDWARE MONITOR DRIVER M: Guenter Roeck diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index c07e725ea93d..10e1b9eee10e 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -119,4 +119,13 @@ config CFAG12864B_RATE If you compile this as a module, you can still override this value using the module parameters. +config IMG_ASCII_LCD + tristate "Imagination Technologies ASCII LCD Display" + default y if MIPS_MALTA || MIPS_SEAD3 + select SYSCON + help + Enable this to support the simple ASCII LCD displays found on + development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3 + from Imagination Technologies. + endif # AUXDISPLAY diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile index 8a8936a468b9..3127175c89df 100644 --- a/drivers/auxdisplay/Makefile +++ b/drivers/auxdisplay/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KS0108) += ks0108.o obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o +obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c new file mode 100644 index 000000000000..bf43b5d2aafc --- /dev/null +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -0,0 +1,443 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct img_ascii_lcd_ctx; + +/** + * struct img_ascii_lcd_config - Configuration information about an LCD model + * @num_chars: the number of characters the LCD can display + * @external_regmap: true if registers are in a system controller, else false + * @update: function called to update the LCD + */ +struct img_ascii_lcd_config { + unsigned int num_chars; + bool external_regmap; + void (*update)(struct img_ascii_lcd_ctx *ctx); +}; + +/** + * struct img_ascii_lcd_ctx - Private data structure + * @pdev: the ASCII LCD platform device + * @base: the base address of the LCD registers + * @regmap: the regmap through which LCD registers are accessed + * @offset: the offset within regmap to the start of the LCD registers + * @cfg: pointer to the LCD model configuration + * @message: the full message to display or scroll on the LCD + * @message_len: the length of the @message string + * @scroll_pos: index of the first character of @message currently displayed + * @scroll_rate: scroll interval in jiffies + * @timer: timer used to implement scrolling + * @curr: the string currently displayed on the LCD + */ +struct img_ascii_lcd_ctx { + struct platform_device *pdev; + union { + void __iomem *base; + struct regmap *regmap; + }; + u32 offset; + const struct img_ascii_lcd_config *cfg; + char *message; + unsigned int message_len; + unsigned int scroll_pos; + unsigned int scroll_rate; + struct timer_list timer; + char curr[] __aligned(8); +}; + +/* + * MIPS Boston development board + */ + +static void boston_update(struct img_ascii_lcd_ctx *ctx) +{ + ulong val; + +#if BITS_PER_LONG == 64 + val = *((u64 *)&ctx->curr[0]); + __raw_writeq(val, ctx->base); +#elif BITS_PER_LONG == 32 + val = *((u32 *)&ctx->curr[0]); + __raw_writel(val, ctx->base); + val = *((u32 *)&ctx->curr[4]); + __raw_writel(val, ctx->base + 4); +#else +# error Not 32 or 64 bit +#endif +} + +static struct img_ascii_lcd_config boston_config = { + .num_chars = 8, + .update = boston_update, +}; + +/* + * MIPS Malta development board + */ + +static void malta_update(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int i; + int err; + + for (i = 0; i < ctx->cfg->num_chars; i++) { + err = regmap_write(ctx->regmap, + ctx->offset + (i * 8), ctx->curr[i]); + if (err) + break; + } + + if (unlikely(err)) + pr_err_ratelimited("Failed to update LCD display: %d\n", err); +} + +static struct img_ascii_lcd_config malta_config = { + .num_chars = 8, + .external_regmap = true, + .update = malta_update, +}; + +/* + * MIPS SEAD3 development board + */ + +enum { + SEAD3_REG_LCD_CTRL = 0x00, +#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7) + SEAD3_REG_LCD_DATA = 0x08, + SEAD3_REG_CPLD_STATUS = 0x10, +#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0) + SEAD3_REG_CPLD_DATA = 0x18, +#define SEAD3_REG_CPLD_DATA_BUSY BIT(7) +}; + +static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int status; + int err; + + do { + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_CPLD_STATUS, + &status); + if (err) + return err; + } while (status & SEAD3_REG_CPLD_STATUS_BUSY); + + return 0; + +} + +static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int cpld_data; + int err; + + err = sead3_wait_sm_idle(ctx); + if (err) + return err; + + do { + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_CTRL, + &cpld_data); + if (err) + return err; + + err = sead3_wait_sm_idle(ctx); + if (err) + return err; + + err = regmap_read(ctx->regmap, + ctx->offset + SEAD3_REG_CPLD_DATA, + &cpld_data); + if (err) + return err; + } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY); + + return 0; +} + +static void sead3_update(struct img_ascii_lcd_ctx *ctx) +{ + unsigned int i; + int err; + + for (i = 0; i < ctx->cfg->num_chars; i++) { + err = sead3_wait_lcd_idle(ctx); + if (err) + break; + + err = regmap_write(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_CTRL, + SEAD3_REG_LCD_CTRL_SETDRAM | i); + if (err) + break; + + err = sead3_wait_lcd_idle(ctx); + if (err) + break; + + err = regmap_write(ctx->regmap, + ctx->offset + SEAD3_REG_LCD_DATA, + ctx->curr[i]); + if (err) + break; + } + + if (unlikely(err)) + pr_err_ratelimited("Failed to update LCD display: %d\n", err); +} + +static struct img_ascii_lcd_config sead3_config = { + .num_chars = 16, + .external_regmap = true, + .update = sead3_update, +}; + +static const struct of_device_id img_ascii_lcd_matches[] = { + { .compatible = "img,boston-lcd", .data = &boston_config }, + { .compatible = "mti,malta-lcd", .data = &malta_config }, + { .compatible = "mti,sead3-lcd", .data = &sead3_config }, +}; + +/** + * img_ascii_lcd_scroll() - scroll the display by a character + * @arg: really a pointer to the private data structure + * + * Scroll the current message along the LCD by one character, rearming the + * timer if required. + */ +static void img_ascii_lcd_scroll(unsigned long arg) +{ + struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg; + unsigned int i, ch = ctx->scroll_pos; + unsigned int num_chars = ctx->cfg->num_chars; + + /* update the current message string */ + for (i = 0; i < num_chars;) { + /* copy as many characters from the string as possible */ + for (; i < num_chars && ch < ctx->message_len; i++, ch++) + ctx->curr[i] = ctx->message[ch]; + + /* wrap around to the start of the string */ + ch = 0; + } + + /* update the LCD */ + ctx->cfg->update(ctx); + + /* move on to the next character */ + ctx->scroll_pos++; + ctx->scroll_pos %= ctx->message_len; + + /* rearm the timer */ + if (ctx->message_len > ctx->cfg->num_chars) + mod_timer(&ctx->timer, jiffies + ctx->scroll_rate); +} + +/** + * img_ascii_lcd_display() - set the message to be displayed + * @ctx: pointer to the private data structure + * @msg: the message to display + * @count: length of msg, or -1 + * + * Display a new message @msg on the LCD. @msg can be longer than the number of + * characters the LCD can display, in which case it will begin scrolling across + * the LCD display. + * + * Return: 0 on success, -ENOMEM on memory allocation failure + */ +static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx, + const char *msg, ssize_t count) +{ + char *new_msg; + + /* stop the scroll timer */ + del_timer_sync(&ctx->timer); + + if (count == -1) + count = strlen(msg); + + /* if the string ends with a newline, trim it */ + if (msg[count - 1] == '\n') + count--; + + new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL); + if (!new_msg) + return -ENOMEM; + + memcpy(new_msg, msg, count); + new_msg[count] = 0; + + if (ctx->message) + devm_kfree(&ctx->pdev->dev, ctx->message); + + ctx->message = new_msg; + ctx->message_len = count; + ctx->scroll_pos = 0; + + /* update the LCD */ + img_ascii_lcd_scroll((unsigned long)ctx); + + return 0; +} + +/** + * message_show() - read message via sysfs + * @dev: the LCD device + * @attr: the LCD message attribute + * @buf: the buffer to read the message into + * + * Read the current message being displayed or scrolled across the LCD display + * into @buf, for reads from sysfs. + * + * Return: the number of characters written to @buf + */ +static ssize_t message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", ctx->message); +} + +/** + * message_store() - write a new message via sysfs + * @dev: the LCD device + * @attr: the LCD message attribute + * @buf: the buffer containing the new message + * @count: the size of the message in @buf + * + * Write a new message to display or scroll across the LCD display from sysfs. + * + * Return: the size of the message on success, else -ERRNO + */ +static ssize_t message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev); + int err; + + err = img_ascii_lcd_display(ctx, buf, count); + return err ?: count; +} + +static DEVICE_ATTR_RW(message); + +/** + * img_ascii_lcd_probe() - probe an LCD display device + * @pdev: the LCD platform device + * + * Probe an LCD display device, ensuring that we have the required resources in + * order to access the LCD & setting up private data as well as sysfs files. + * + * Return: 0 on success, else -ERRNO + */ +static int img_ascii_lcd_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct img_ascii_lcd_config *cfg; + struct img_ascii_lcd_ctx *ctx; + struct resource *res; + int err; + + match = of_match_device(img_ascii_lcd_matches, &pdev->dev); + if (!match) + return -ENODEV; + + cfg = match->data; + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars, + GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + if (cfg->external_regmap) { + ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(ctx->regmap)) + return PTR_ERR(ctx->regmap); + + if (of_property_read_u32(pdev->dev.of_node, "offset", + &ctx->offset)) + return -EINVAL; + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + } + + ctx->pdev = pdev; + ctx->cfg = cfg; + ctx->message = NULL; + ctx->scroll_pos = 0; + ctx->scroll_rate = HZ / 2; + + /* initialise a timer for scrolling the message */ + init_timer(&ctx->timer); + ctx->timer.function = img_ascii_lcd_scroll; + ctx->timer.data = (unsigned long)ctx; + + platform_set_drvdata(pdev, ctx); + + /* display a default message */ + err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1); + if (err) + goto out_del_timer; + + err = device_create_file(&pdev->dev, &dev_attr_message); + if (err) + goto out_del_timer; + + return 0; +out_del_timer: + del_timer_sync(&ctx->timer); + return err; +} + +/** + * img_ascii_lcd_remove() - remove an LCD display device + * @pdev: the LCD platform device + * + * Remove an LCD display device, freeing private resources & ensuring that the + * driver stops using the LCD display registers. + * + * Return: 0 + */ +static int img_ascii_lcd_remove(struct platform_device *pdev) +{ + struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev); + + device_remove_file(&pdev->dev, &dev_attr_message); + del_timer_sync(&ctx->timer); + return 0; +} + +static struct platform_driver img_ascii_lcd_driver = { + .driver = { + .name = "img-ascii-lcd", + .of_match_table = img_ascii_lcd_matches, + }, + .probe = img_ascii_lcd_probe, + .remove = img_ascii_lcd_remove, +}; +module_platform_driver(img_ascii_lcd_driver); -- cgit v1.2.3-59-g8ed1b From 690803acca14e8aa101ca5204f0a23c24d7ad8c3 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 19 Sep 2016 22:21:18 +0100 Subject: irqchip: i8259: Add domain before mapping parent irq Mapping the parent IRQ will use a virq number which may conflict with the hardcoded I8259A_IRQ_BASE..I8259A_IRQ_BASE+15 range that the i8259 driver expects to be free. If this occurs then we'll hit errors when adding the i8259 IRQ domain, since one of its virq numbers will already be in use. Avoid this by adding the i8259 domain before mapping the parent IRQ, such that the i8259 virq numbers become used before the parent interrupt controller gets a chance to use any of them. Signed-off-by: Paul Burton Acked-by: Thomas Gleixner Cc: Marc Zyngier Cc: Jason Cooper Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14269/ Signed-off-by: Ralf Baechle --- drivers/irqchip/irq-i8259.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 6b304eb39bd2..85897fdc1527 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -370,13 +370,15 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent) struct irq_domain *domain; unsigned int parent_irq; + domain = __init_i8259_irqs(node); + parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { pr_err("Failed to map i8259 parent IRQ\n"); + irq_domain_remove(domain); return -ENODEV; } - domain = __init_i8259_irqs(node); irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch, domain); return 0; -- cgit v1.2.3-59-g8ed1b From 19afc3d269fe66137ddb030b8ffdb8553066ba4a Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 19 Sep 2016 22:21:19 +0100 Subject: irqchip: i8259: Allow platforms to override poll function The default i8259 polling function (i8259_irq) is nicely generic but is fairly costly. Platforms often provide an alternative means of polling for an i8259 interrupt, and when using the i8259 without device tree have typically just chained its parent interrupt to their own handler function. In order to allow for platform-specific polling functions to be used in cases where the driver is probed via device tree, provide an i8259_set_poll function that accepts a pointer to an alternative poll function that will override the default. Signed-off-by: Paul Burton Acked-by: Thomas Gleixner Cc: Jason Cooper Cc: Marc Zyngier Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14270/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/i8259.h | 11 +++++++++++ drivers/irqchip/irq-i8259.c | 8 +++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h index a7fbcd6ed13c..b27fcc4ae474 100644 --- a/arch/mips/include/asm/i8259.h +++ b/arch/mips/include/asm/i8259.h @@ -43,6 +43,17 @@ extern void make_8259A_irq(unsigned int irq); extern void init_i8259_irqs(void); extern int i8259_of_init(struct device_node *node, struct device_node *parent); +/** + * i8159_set_poll() - Override the i8259 polling function + * @poll: pointer to platform-specific polling function + * + * Call this to override the generic i8259 polling function, which directly + * accesses i8259 registers, with a platform specific one which may be faster + * in cases where hardware provides a more optimal means of polling for an + * interrupt. + */ +extern void i8259_set_poll(int (*poll)(void)); + /* * Do the traditional i8259 interrupt polling thing. This is for the few * cases where no better interrupt acknowledge method is available and we diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 85897fdc1527..1f4a3442342b 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -38,6 +38,7 @@ static void disable_8259A_irq(struct irq_data *d); static void enable_8259A_irq(struct irq_data *d); static void mask_and_ack_8259A(struct irq_data *d); static void init_8259A(int auto_eoi); +static int (*i8259_poll)(void) = i8259_irq; static struct irq_chip i8259A_chip = { .name = "XT-PIC", @@ -51,6 +52,11 @@ static struct irq_chip i8259A_chip = { * 8259A PIC functions to handle ISA devices: */ +void i8259_set_poll(int (*poll)(void)) +{ + i8259_poll = poll; +} + /* * This contains the irq mask for both 8259A irq controllers, */ @@ -355,7 +361,7 @@ void __init init_i8259_irqs(void) static void i8259_irq_dispatch(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); - int hwirq = i8259_irq(); + int hwirq = i8259_poll(); unsigned int irq; if (hwirq < 0) -- cgit v1.2.3-59-g8ed1b From 5d2949ec86a54b73a6fc2557ef784e76c0d2cba9 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 19 Sep 2016 22:21:20 +0100 Subject: irqchip: i8259: Remove unused i8259A_irq_pending The i8259A_irq_pending function is unused. Remove the dead code. Signed-off-by: Paul Burton Acked-by: Thomas Gleixner Cc: Jason Cooper Cc: Marc Zyngier Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14271/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/i8259.h | 1 - drivers/irqchip/irq-i8259.c | 18 ------------------ 2 files changed, 19 deletions(-) (limited to 'drivers') diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h index b27fcc4ae474..32229c77906a 100644 --- a/arch/mips/include/asm/i8259.h +++ b/arch/mips/include/asm/i8259.h @@ -37,7 +37,6 @@ extern raw_spinlock_t i8259A_lock; -extern int i8259A_irq_pending(unsigned int irq); extern void make_8259A_irq(unsigned int irq); extern void init_i8259_irqs(void); diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 1f4a3442342b..1aec12c6d9ac 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -95,24 +95,6 @@ static void enable_8259A_irq(struct irq_data *d) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -int i8259A_irq_pending(unsigned int irq) -{ - unsigned int mask; - unsigned long flags; - int ret; - - irq -= I8259A_IRQ_BASE; - mask = 1 << irq; - raw_spin_lock_irqsave(&i8259A_lock, flags); - if (irq < 8) - ret = inb(PIC_MASTER_CMD) & mask; - else - ret = inb(PIC_SLAVE_CMD) & (mask >> 8); - raw_spin_unlock_irqrestore(&i8259A_lock, flags); - - return ret; -} - void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); -- cgit v1.2.3-59-g8ed1b From ecd76eded07d1ba58bba410ec4bcbd8630fb9b16 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 19 Sep 2016 22:21:24 +0100 Subject: of/platform: Probe "isa" busses by default Since commit 44a7185c2ae6 ("of/platform: Add common method to populate default bus") platforms calling of_platform_bus_probe from an initcall is either a rather unsafe race with of_platform_default_populate_init or a no-op. The MIPS Malta board needs to probe devices under an ISA bus, which we do support in the of_busses array but until now haven't included in of_default_bus_match_table. Add an "isa" entry to of_default_bus_match_table such that we can just accept use of of_platform_default_populate_init & remove the Malta-specific match table in a later patch. Signed-off-by: Paul Burton Acked-by: Rob Herring Cc: Frank Rowand Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14275/ Signed-off-by: Ralf Baechle --- drivers/of/platform.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/of/platform.c b/drivers/of/platform.c index f39ccd5aa701..d85e4318b099 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -29,6 +29,7 @@ const struct of_device_id of_default_bus_match_table[] = { { .compatible = "simple-bus", }, { .compatible = "simple-mfd", }, + { .compatible = "isa", }, #ifdef CONFIG_ARM_AMBA { .compatible = "arm,amba-bus", }, #endif /* CONFIG_ARM_AMBA */ -- cgit v1.2.3-59-g8ed1b From 1e54134ccad00f76ddf00f3e77db3dc8fdefbb47 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 29 Sep 2016 15:40:54 -0400 Subject: rtl8xxxu: Fix memory leak in handling rxdesc16 packets A device running without RX package aggregation could return more data in the USB packet than the actual network packet. In this case we could would clone the skb but then determine that that there was no packet to handle and exit without freeing the cloned skb first. This has so far only been observed with 8188eu devices, but could affect others. Signed-off-by: Jes Sorensen Cc: stable@vger.kernel.org # 4.8+ Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index b2d7f6e69667..a96ff17759e4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift + sizeof(struct rtl8xxxu_rxdesc16), 128); - if (pkt_cnt > 1) + /* + * Only clone the skb if there's enough data at the end to + * at least cover the rx descriptor + */ + if (pkt_cnt > 1 && + urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16))) next_skb = skb_clone(skb, GFP_ATOMIC); rx_status = IEEE80211_SKB_RXCB(skb); -- cgit v1.2.3-59-g8ed1b From 8a55698f2f29d227825173420d7b99b9277ca88c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 29 Sep 2016 15:40:55 -0400 Subject: rtl8xxxu: Fix big-endian problem reporting mactime The full RX descriptor is converted so converting tsfl again would return it to it's original endian value. Signed-off-by: Jes Sorensen Cc: stable@vger.kernel.org # 4.8+ Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 1016628926d2..08d587a342d3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 { u32 pattern1match:1; u32 pattern0match:1; #endif - __le32 tsfl; + u32 tsfl; #if 0 u32 bassn:12; u32 bavld:1; @@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 { u32 ldcp:1; u32 splcp:1; #endif - __le32 tsfl; + u32 tsfl; }; struct rtl8xxxu_txdesc32 { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index a96ff17759e4..a5e6ec2152bf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5220,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, rx_desc->rxmcs); - rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; if (!rx_desc->swdec) @@ -5290,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, rx_desc->rxmcs); - rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; if (!rx_desc->swdec) -- cgit v1.2.3-59-g8ed1b From ab05e5ec81c76f3a852919c22984c885edd2414a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Fri, 30 Sep 2016 19:35:17 -0400 Subject: rtl8xxxu: Fix rtl8723bu driver reload issue The generic disable_rf() function clears bits 22 and 23 in REG_RX_WAIT_CCA, however we did not re-enable them again in rtl8723b_enable_rf() This resolves the problem for me with 8723bu devices not working again after reloading the driver. Signed-off-by: Jes Sorensen Cc: stable@vger.kernel.org # 4.7+ Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 6c086b5657e9..02b8ddd98a95 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) u32 val32; u8 val8; + val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); + val32 |= (BIT(22) | BIT(23)); + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); + /* * No indication anywhere as to what 0x0790 does. The 2 antenna * vendor code preserves bits 6-7 here. -- cgit v1.2.3-59-g8ed1b From 29d5e6fbd65be89dcd32f070fc45ee0e3b2f82b6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Fri, 30 Sep 2016 19:35:18 -0400 Subject: rtl8xxxu: Fix rtl8192eu driver reload issue The 8192eu suffered from two issues when reloading the driver. The same problems as with the 8723bu where REG_RX_WAIT_CCA bits 22 and 23 didn't get set in rtl8192e_enable_rf(). In addition it also seems prone to issues when setting REG_RF_CTRL to 0 intead of just disabling the RF_ENABLE bit. Similar to what was causing issues with the 8188eu. With this patch I can successfully reload the driver and reassociate to an APi with an 8192eu dongle. Signed-off-by: Jes Sorensen Cc: stable@vger.kernel.org # 4.8+ Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index df54d27e7851..a793fedc3654 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv) int count, ret = 0; /* Turn off RF */ - rtl8xxxu_write8(priv, REG_RF_CTRL, 0); + val8 = rtl8xxxu_read8(priv, REG_RF_CTRL); + val8 &= ~RF_ENABLE; + rtl8xxxu_write8(priv, REG_RF_CTRL, val8); /* Switch DPDT_SEL_P output from register 0x65[2] */ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); @@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) u32 val32; u8 val8; + val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); + val32 |= (BIT(22) | BIT(23)); + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); + val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); val8 |= BIT(5); rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); -- cgit v1.2.3-59-g8ed1b From 62837b3c1a95535d1a287c9c8c6563bbd8d37033 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 5 Oct 2016 22:49:30 -0700 Subject: Input: elantech - add Fujitsu Lifebook E556 to force crc_enabled Another Lifebook machine that needs the same quirk as other similar models to make the driver working. Also let's reorder elantech_dmi_force_crc_enabled list so LIfebook enries are in alphabetical order. Reported-by: William Linna Tested-by: William Linna Reviewed-by: Benjamin Tissoires Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elantech.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 37b52f150664..db7d1d666ac1 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1516,6 +1516,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, + { + /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), + }, + }, { /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ .matches = { @@ -1524,10 +1531,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { }, }, { - /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ + /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), }, }, { -- cgit v1.2.3-59-g8ed1b From 2967999fbceffa8520987ab9b3b00a55d6997dba Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 5 Oct 2016 17:46:50 +0300 Subject: iio: adc: ti-adc081c: Select IIO_TRIGGERED_BUFFER to prevent build errors Commit 08e05d1fce5c ("ti-adc081c: Initial triggered buffer support") added triggered buffer support but that also requires CONFIG_IIO_TRIGGERED_BUFFER, otherwise we get errors from linker such as: drivers/built-in.o: In function `adc081c_remove': drivers/iio/adc/ti-adc081c.c:225: undefined reference to `iio_triggered_buffer_cleanup' Fix these by explicitly selecting both CONFIG_IIO_TRIGGERED_BUFFER and CONFIG_IIO_BUFFER in Kconfig for the driver. Signed-off-by: Mika Westerberg Signed-off-by: Jonathan Cameron --- drivers/iio/adc/Kconfig | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 7edcf3238620..99c051490eff 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -437,6 +437,8 @@ config STX104 config TI_ADC081C tristate "Texas Instruments ADC081C/ADC101C/ADC121C family" depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help If you say yes here you get support for Texas Instruments ADC081C, ADC101C and ADC121C ADC chips. -- cgit v1.2.3-59-g8ed1b From 6449e31ddebdce68508cfaf0915d31aad3835f4f Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sun, 25 Sep 2016 00:19:05 +0300 Subject: firewire: nosy: do not ignore errors in ioremap_nocache() There is no check if ioremap_nocache() returns a valid pointer. Potentially it can lead to null pointer dereference. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Stefan Richter (renamed goto labels) --- drivers/firewire/nosy.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 631c977b0da5..180f0a96528c 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); + if (lynx->registers == NULL) { + dev_err(&dev->dev, "Failed to map registers\n"); + ret = -ENOMEM; + goto fail_deallocate_lynx; + } lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, sizeof(struct pcl), &lynx->rcv_start_pcl_bus); @@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) lynx->rcv_buffer == NULL) { dev_err(&dev->dev, "Failed to allocate receive buffer\n"); ret = -ENOMEM; - goto fail_deallocate; + goto fail_deallocate_buffers; } lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); @@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) dev_err(&dev->dev, "Failed to allocate shared interrupt %d\n", dev->irq); ret = -EIO; - goto fail_deallocate; + goto fail_deallocate_buffers; } lynx->misc.parent = &dev->dev; @@ -668,7 +673,7 @@ fail_free_irq: reg_write(lynx, PCI_INT_ENABLE, 0); free_irq(lynx->pci_device->irq, lynx); -fail_deallocate: +fail_deallocate_buffers: if (lynx->rcv_start_pcl) pci_free_consistent(lynx->pci_device, sizeof(struct pcl), lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); @@ -679,6 +684,8 @@ fail_deallocate: pci_free_consistent(lynx->pci_device, PAGE_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers); + +fail_deallocate_lynx: kfree(lynx); fail_disable: -- cgit v1.2.3-59-g8ed1b From f9f4872df6e1801572949f8a370c886122d4b6da Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Sat, 8 Oct 2016 12:42:38 -0700 Subject: cpufreq: intel_pstate: Fix unsafe HWP MSR access This is a requirement that MSR MSR_PM_ENABLE must be set to 0x01 before reading MSR_HWP_CAPABILITIES on a given CPU. If cpufreq init() is scheduled on a CPU which is not same as policy->cpu or migrates to a different CPU before calling msr read for MSR_HWP_CAPABILITIES, it is possible that MSR_PM_ENABLE was not to set to 0x01 on that CPU. This will cause GP fault. So like other places in this path rdmsrl_on_cpu should be used instead of rdmsrl. Moreover the scope of MSR_HWP_CAPABILITIES is on per thread basis, so it should be read from the same CPU, for which MSR MSR_HWP_REQUEST is getting set. dmesg dump or warning: [ 22.014488] WARNING: CPU: 139 PID: 1 at arch/x86/mm/extable.c:50 ex_handler_rdmsr_unsafe+0x68/0x70 [ 22.014492] unchecked MSR access error: RDMSR from 0x771 [ 22.014493] Modules linked in: [ 22.014507] CPU: 139 PID: 1 Comm: swapper/0 Not tainted 4.7.5+ #1 ... ... [ 22.014516] Call Trace: [ 22.014542] [] dump_stack+0x63/0x82 [ 22.014558] [] __warn+0xcb/0xf0 [ 22.014561] [] warn_slowpath_fmt+0x4f/0x60 [ 22.014563] [] ex_handler_rdmsr_unsafe+0x68/0x70 [ 22.014564] [] fixup_exception+0x39/0x50 [ 22.014604] [] do_general_protection+0x80/0x150 [ 22.014610] [] general_protection+0x28/0x30 [ 22.014635] [] ? get_target_pstate_use_performance+0xb0/0xb0 [ 22.014642] [] ? native_read_msr+0x7/0x40 [ 22.014657] [] intel_pstate_hwp_set+0x23/0x130 [ 22.014660] [] intel_pstate_set_policy+0x1b6/0x340 [ 22.014662] [] cpufreq_set_policy+0xeb/0x2c0 [ 22.014664] [] cpufreq_init_policy+0x79/0xe0 [ 22.014666] [] ? cpufreq_update_policy+0x120/0x120 [ 22.014669] [] cpufreq_online+0x406/0x820 [ 22.014671] [] cpufreq_add_dev+0x5f/0x90 [ 22.014717] [] subsys_interface_register+0xb8/0x100 [ 22.014719] [] cpufreq_register_driver+0x14c/0x210 [ 22.014749] [] intel_pstate_init+0x39d/0x4d5 [ 22.014751] [] ? cpufreq_gov_dbs_init+0x12/0x12 Cc: 4.3+ # 4.3+ Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 806f2039571e..bc976a3db253 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -562,12 +562,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) int min, hw_min, max, hw_max, cpu, range, adj_range; u64 value, cap; - rdmsrl(MSR_HWP_CAPABILITIES, cap); - hw_min = HWP_LOWEST_PERF(cap); - hw_max = HWP_HIGHEST_PERF(cap); - range = hw_max - hw_min; - for_each_cpu(cpu, cpumask) { + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); adj_range = limits->min_perf_pct * range / 100; min = hw_min + adj_range; -- cgit v1.2.3-59-g8ed1b From f00593a4bdd22e0885db89df8ee8afcc6867994f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 30 Sep 2016 03:13:34 +0200 Subject: cpufreq: intel_pstate: Clarify comment in get_target_pstate_use_performance() Make the comment explaining the meaning of the perf_scaled variable in get_target_pstate_use_performance() more straightforward. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index bc976a3db253..1c7b91c5805b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1251,10 +1251,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) u64 duration_ns; /* - * perf_scaled is the average performance during the last sampling - * period scaled by the ratio of the maximum P-state to the P-state - * requested last time (in percent). That measures the system's - * response to the previous P-state selection. + * perf_scaled is the ratio of the average P-state during the last + * sampling period to the P-state requested last time (in percent). + * + * That measures the system's response to the previous P-state + * selection. */ max_pstate = cpu->pstate.max_pstate_physical; current_pstate = cpu->pstate.current_pstate; -- cgit v1.2.3-59-g8ed1b From 84baf1725dc532d9746df2d216c1921154767109 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 4 Oct 2016 13:51:40 -0700 Subject: ACPI / fan: Fix error reading cur_state On some platforms with ACPI4 variable speed fan, reading cur_state from cooling device returns "invalid value" error. This confuses user space applications. This issue occurs as the current driver doesn't take account of "FineGrainControl" from _FIF(Fan Information). When the "FineGrainControl" is set, _FSL(FSL Set Level) takes argument as a percent, which doesn't have to match from any control value from _FPS(Fan Performance States). It is also possible that the Fan is not actually running at the requested speed returning a lower speed. On some platforms the BIOS is setting fan speed to a level during boot, which will not have an exact match to _FPS control values. The current implementation will treat this level as invalid value. The simple change is to atleast return state corresponding to a maximum control value in the _FPS compared to the current level. Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/acpi/fan.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 384cfc3083e1..6cf4988206f2 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) control = obj->package.elements[1].integer.value; for (i = 0; i < fan->fps_count; i++) { - if (control == fan->fps[i].control) + /* + * When Fine Grain Control is set, return the state + * corresponding to maximum fan->fps[i].control + * value compared to the current speed. Here the + * fan->fps[] is sorted array with increasing speed. + */ + if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) { + i = (i > 0) ? i - 1 : 0; break; + } else if (control == fan->fps[i].control) { + break; + } } if (i == fan->fps_count) { dev_dbg(&device->dev, "Invalid control value returned\n"); -- cgit v1.2.3-59-g8ed1b From eab05ec38073f72389386f4a77fb58c06e246a4c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 5 Oct 2016 10:33:16 -0700 Subject: ACPI / EC: Fix unused function warning when CONFIG_PM_SLEEP=n Signed-off-by: Eric Biggers Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 680531062160..48e19d013170 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) acpi_ec_clear(ec); } +#ifdef CONFIG_PM_SLEEP static bool acpi_ec_query_flushed(struct acpi_ec *ec) { bool flushed; @@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_unlock_irqrestore(&ec->lock, flags); __acpi_ec_flush_event(ec); } +#endif /* CONFIG_PM_SLEEP */ static bool acpi_ec_guard_event(struct acpi_ec *ec) { -- cgit v1.2.3-59-g8ed1b From 3d1355b3cfad53feba76a73b052c757a7de7f4de Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 3 Oct 2016 21:21:42 +0200 Subject: HID: hid-led: fix issue with transfer buffer not being dma capable The hid-led driver works fine under 4.8.0, however with the next kernel from today I get this: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 2578 at drivers/usb/core/hcd.c:1584 usb_hcd_map_urb_for_dma+0x373/0x550 [usbcore] transfer buffer not dma capable Modules linked in: hid_led(+) usbhid vfat fat ir_sony_decoder iwlmvm led_class mac80211 snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal iwlwifi crc32c_intel snd_hda_codec_hdmi i2c_i801 i2c_smbus snd_hda_intel cfg80211 snd_hda_codec snd_hda_core snd_pcm r8169 snd_timer mei_me mii snd mei ir_lirc_codec lirc_dev nuvoton_cir rc_core btusb btintel bluetooth rfkill usb_storage efivarfs ipv6 ehci_pci ehci_hcd xhci_pci xhci_hcd usbcore usb_common ext4 jbd2 mbcache ahci libahci libata CPU: 0 PID: 2578 Comm: systemd-udevd Not tainted 4.8.0-rc8-next-20161003 #1 Hardware name: ZOTAC ZBOX-CI321NANO/ZBOX-CI321NANO, BIOS B246P105 06/01/2015 ffffc90003dbb7e0 ffffffff81280425 ffffc90003dbb830 0000000000000000 ffffc90003dbb820 ffffffff8105b086 0000063003dbb800 ffff88006f374480 0000000000000000 0000000000000000 0000000000000001 ffff880079544000 Call Trace: [] dump_stack+0x68/0x93 [] __warn+0xc6/0xe0 [] warn_slowpath_fmt+0x4a/0x50 [] usb_hcd_map_urb_for_dma+0x373/0x550 [usbcore] [] usb_hcd_submit_urb+0x316/0x9c0 [usbcore] [] ? rcu_read_lock_sched_held+0x40/0x80 [] ? module_assert_mutex_or_preempt+0x13/0x50 [] ? __module_address+0x27/0xf0 [] usb_submit_urb+0x2c4/0x520 [usbcore] [] usb_start_wait_urb+0x5a/0xe0 [usbcore] [] usb_control_msg+0xbc/0xf0 [usbcore] [] ? __module_address+0x27/0xf0 [] usbhid_raw_request+0xa4/0x180 [usbhid] [] hidled_recv+0x71/0xe0 [hid_led] [] thingm_init+0x2d/0x50 [hid_led] [] hidled_probe+0xcb/0x24a [hid_led] [] hid_device_probe+0xd2/0x150 [] driver_probe_device+0x1fd/0x2c0 [] __driver_attach+0x9a/0xa0 [] ? driver_probe_device+0x2c0/0x2c0 [] bus_for_each_dev+0x5d/0x90 [] driver_attach+0x19/0x20 [] bus_add_driver+0x11f/0x220 [] ? 0xffffffffa07ac000 [] driver_register+0x5b/0xd0 [] ? 0xffffffffa07ac000 [] __hid_register_driver+0x61/0xa0 [] hidled_driver_init+0x1e/0x20 [hid_led] [] do_one_initcall+0x38/0x150 [] ? rcu_read_lock_sched_held+0x40/0x80 [] ? kmem_cache_alloc_trace+0x1d0/0x230 [] do_init_module+0x5a/0x1cb [] load_module+0x1e42/0x2530 [] ? __symbol_put+0x50/0x50 [] ? show_coresize+0x30/0x30 [] ? kernel_read_file+0x100/0x190 [] ? kernel_read_file_from_fd+0x44/0x70 [] SYSC_finit_module+0xba/0xc0 [] SyS_finit_module+0x9/0x10 [] entry_SYSCALL_64_fastpath+0x18/0xad ---[ end trace c9e6ea27003ecf9e ]--- Fix this by using a kmalloc'ed buffer when calling hid_hw_raw_request. Signed-off-by: Heiner Kallweit Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-led.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c index d8d55f37b4f5..d3e1ab162f7c 100644 --- a/drivers/hid/hid-led.c +++ b/drivers/hid/hid-led.c @@ -100,6 +100,7 @@ struct hidled_device { const struct hidled_config *config; struct hid_device *hdev; struct hidled_rgb *rgb; + u8 *buf; struct mutex lock; }; @@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf) mutex_lock(&ldev->lock); + /* + * buffer provided to hid_hw_raw_request must not be on the stack + * and must not be part of a data structure + */ + memcpy(ldev->buf, buf, ldev->config->report_size); + if (ldev->config->report_type == RAW_REQUEST) - ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); else if (ldev->config->report_type == OUTPUT_REPORT) - ret = hid_hw_output_report(ldev->hdev, buf, + ret = hid_hw_output_report(ldev->hdev, ldev->buf, ldev->config->report_size); else ret = -EINVAL; @@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf) mutex_lock(&ldev->lock); - ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + memcpy(ldev->buf, buf, ldev->config->report_size); + + ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) goto err; - ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + + memcpy(buf, ldev->buf, ldev->config->report_size); err: mutex_unlock(&ldev->lock); @@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id) if (!ldev) return -ENOMEM; + ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL); + if (!ldev->buf) + return -ENOMEM; + ret = hid_parse(hdev); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From e15944099870f374ca7efc62f98cf23ba272ef43 Mon Sep 17 00:00:00 2001 From: Ioan-Adrian Ratiu Date: Tue, 27 Sep 2016 21:41:38 +0300 Subject: HID: hid-dr: add input mapping for axis selection Commit 79346d620e9d ("HID: input: force generic axis to be mapped to their user space axis") made mapping generic axes to their userspace equivalents mandatory and some lower end gamepads which were depending on the previous behaviour suffered severe regressions because they were reusing axes and expecting hid-input to multiplex their map to the respective userspace axis by always searching for and using the next available axis. One solution is to add a hid quirk for this type of "previous" behaviour in hid-input to bypass the new axes policy in favour of the old one, but since only one hardware vendor seems to be affected negatively we're better off making and exception and mapping in the driver for now; if more vendors or drivers turn out to experience the problem we should reconsider the quirk solution. Signed-off-by: Ioan-Adrian Ratiu Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-dr.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 8fd4bf77f264..0ed843939b2c 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -306,6 +306,30 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c)) +#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) + +static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + switch (usage->hid) { + /* + * revert to the old hid-input behavior where axes + * can be randomly assigned when hid->usage is reused. + */ + case HID_GD_X: case HID_GD_Y: case HID_GD_Z: + case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: + if (field->flags & HID_MAIN_ITEM_RELATIVE) + map_rel(usage->hid & 0xf); + else + map_abs(usage->hid & 0xf); + return 1; + } + + return 0; +} + static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; @@ -352,6 +376,7 @@ static struct hid_driver dr_driver = { .id_table = dr_devices, .report_fixup = dr_report_fixup, .probe = dr_probe, + .input_mapping = dr_input_mapping, }; module_hid_driver(dr_driver); -- cgit v1.2.3-59-g8ed1b From 1bcaa05ebee115213e34f1806cc6a4f7a6175a88 Mon Sep 17 00:00:00 2001 From: Ioan-Adrian Ratiu Date: Tue, 27 Sep 2016 21:41:37 +0300 Subject: Revert "HID: dragonrise: fix HID Descriptor for 0x0006 PID" This reverts commit 18339f59c3a6 ("HID: dragonrise: fix HID...") because it breaks certain dragonrise 0079:0006 gamepads. While it may fix a breakage caused by commit 79346d620e9d ("HID: input: force generic axis to be mapped to their user space axis"), it is probable that the manufacturer released different hardware with the same PID so this fix works for only a subset and breaks the other gamepads sharing the PID. What is needed is another more generic solution which fixes 79346d620e9d ("HID: input: force generic axis ...") breakage for this controller: we need to add an exception for this driver to make it keep the old behaviour previous to the initial breakage (this is done in patch 2 of this series). Signed-off-by: Ioan-Adrian Ratiu Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-dr.c | 58 ---------------------------------------------------- 1 file changed, 58 deletions(-) (limited to 'drivers') diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 0ed843939b2c..818ea7d93533 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 pid0006_rdesc_fixed[] = { - 0x05, 0x01, /* Usage Page (Generic Desktop) */ - 0x09, 0x04, /* Usage (Joystick) */ - 0xA1, 0x01, /* Collection (Application) */ - 0xA1, 0x02, /* Collection (Logical) */ - 0x75, 0x08, /* Report Size (8) */ - 0x95, 0x05, /* Report Count (5) */ - 0x15, 0x00, /* Logical Minimum (0) */ - 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ - 0x35, 0x00, /* Physical Minimum (0) */ - 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ - 0x09, 0x30, /* Usage (X) */ - 0x09, 0x33, /* Usage (Ry) */ - 0x09, 0x32, /* Usage (Z) */ - 0x09, 0x31, /* Usage (Y) */ - 0x09, 0x34, /* Usage (Ry) */ - 0x81, 0x02, /* Input (Variable) */ - 0x75, 0x04, /* Report Size (4) */ - 0x95, 0x01, /* Report Count (1) */ - 0x25, 0x07, /* Logical Maximum (7) */ - 0x46, 0x3B, 0x01, /* Physical Maximum (315) */ - 0x65, 0x14, /* Unit (Centimeter) */ - 0x09, 0x39, /* Usage (Hat switch) */ - 0x81, 0x42, /* Input (Variable) */ - 0x65, 0x00, /* Unit (None) */ - 0x75, 0x01, /* Report Size (1) */ - 0x95, 0x0C, /* Report Count (12) */ - 0x25, 0x01, /* Logical Maximum (1) */ - 0x45, 0x01, /* Physical Maximum (1) */ - 0x05, 0x09, /* Usage Page (Button) */ - 0x19, 0x01, /* Usage Minimum (0x01) */ - 0x29, 0x0C, /* Usage Maximum (0x0C) */ - 0x81, 0x02, /* Input (Variable) */ - 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */ - 0x75, 0x01, /* Report Size (1) */ - 0x95, 0x08, /* Report Count (8) */ - 0x25, 0x01, /* Logical Maximum (1) */ - 0x45, 0x01, /* Physical Maximum (1) */ - 0x09, 0x01, /* Usage (0x01) */ - 0x81, 0x02, /* Input (Variable) */ - 0xC0, /* End Collection */ - 0xA1, 0x02, /* Collection (Logical) */ - 0x75, 0x08, /* Report Size (8) */ - 0x95, 0x07, /* Report Count (7) */ - 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ - 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ - 0x09, 0x02, /* Usage (0x02) */ - 0x91, 0x02, /* Output (Variable) */ - 0xC0, /* End Collection */ - 0xC0 /* End Collection */ -}; - static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { @@ -296,12 +244,6 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, *rsize = sizeof(pid0011_rdesc_fixed); } break; - case 0x0006: - if (*rsize == sizeof(pid0006_rdesc_fixed)) { - rdesc = pid0006_rdesc_fixed; - *rsize = sizeof(pid0006_rdesc_fixed); - } - break; } return rdesc; } -- cgit v1.2.3-59-g8ed1b From 4973ca9a01e2354b159acedec1b9b8eb8de02ab7 Mon Sep 17 00:00:00 2001 From: "Steinar H. Gunderson" Date: Sun, 9 Oct 2016 14:21:50 +0200 Subject: HID: add quirk for Akai MIDImix. The Akai MIDImix (09e8:0031) is a MIDI fader controller that speaks regular MIDI and works well with Linux. However, initialization gets delayed due to reports timeout: [3643645.631124] hid-generic 0003:09E8:0031.0020: timeout initializing reports [3643645.632416] hid-generic 0003:09E8:0031.0020: hiddev0: USB HID v1.11 Device [AKAI MIDI Mix] on usb-0000:00:14.0-2/input0 Adding "usbhid.quirks=0x09e8:0x0031:0x20000000" on the kernel command line makes the issues go away. Signed-off-by: Steinar H. Gunderson Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 3 +++ drivers/hid/usbhid/hid-quirks.c | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index cd59c79eebdd..6cfb5cacc253 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -64,6 +64,9 @@ #define USB_VENDOR_ID_AKAI 0x2011 #define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715 +#define USB_VENDOR_ID_AKAI_09E8 0x09E8 +#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031 + #define USB_VENDOR_ID_ALCOR 0x058f #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 0a0eca5da47d..354d49ea36dd 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -56,6 +56,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, -- cgit v1.2.3-59-g8ed1b From 13d62fd26924b30593ffd970be17c7344149b188 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sun, 25 Sep 2016 15:44:03 +0000 Subject: mmc: sdhci-of-arasan: Fix non static symbol warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following sparse warning: drivers/mmc/host/sdhci-of-arasan.c:253:6: warning: symbol 'sdhci_arasan_reset' was not declared. Should it be static? Signed-off-by: Wei Yongjun Acked-by: Sören Brinkmann Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-arasan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index da8e40af6f85..e263671abab8 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); } -void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) +static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) { u8 ctrl; struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -- cgit v1.2.3-59-g8ed1b From 48ab086d262e705c4d042228479aaf0b2daffcf9 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Fri, 30 Sep 2016 09:37:38 +0800 Subject: mmc: block: add missing header dependencies We get 1 warning when building kernel with W=1: drivers/mmc/card/block.c:2147:5: warning: no previous prototype for 'mmc_blk_issue_rq' [-Wmissing-prototypes] In fact, this function is declared in drivers/mmc/card/block.h, so this patch adds missing header dependencies. Signed-off-by: Baoyou Xie Signed-off-by: Ulf Hansson --- drivers/mmc/card/block.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c3335112e68c..0f2cc9f22357 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -46,6 +46,7 @@ #include #include "queue.h" +#include "block.h" MODULE_ALIAS("mmc:block"); #ifdef MODULE_PARAM_PREFIX -- cgit v1.2.3-59-g8ed1b From 1720d3545b772c49b2975eeb3b8f4d3f56dc2085 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Fri, 30 Sep 2016 14:18:58 +0800 Subject: mmc: core: switch to 1V8 or 1V2 for hs400es mode When introducing hs400es, I didn't notice that we haven't switched voltage to 1V2 or 1V8 for it. That happens to work as the first controller claiming to support hs400es, arasan(5.1), which is designed to only support 1V8. So the voltage is fixed to 1V8. But it actually is wrong, and will not fit for other host controllers. Let's fix it. Fixes: commit 81ac2af65793ecf ("mmc: core: implement enhanced strobe support") Cc: Signed-off-by: Shawn Lin Reviewed-by: Douglas Anderson Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 3486bc7fbb64..f4ed5accafd0 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card) goto out_err; } + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V) + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); + + if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V) + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + + /* If fails try again during next card power cycle */ + if (err) + goto out_err; + err = mmc_select_bus_width(card); if (err < 0) goto out_err; -- cgit v1.2.3-59-g8ed1b From 4f25580fb84d934d7ecffa3c0aa8f10f7e23af92 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Fri, 30 Sep 2016 14:18:59 +0800 Subject: mmc: core: changes frequency to hs_max_dtr when selecting hs400es Per JESD84-B51 P49, Host need to change frequency to <=52MHz after setting HS_TIMING to 0x1, and host may changes frequency to <= 200MHz after setting HS_TIMING to 0x3. That means the card expects the clock rate to increase from the current used f_init (which is less than 400KHz, but still being less than 52MHz) to 52MHz, otherwise we find some eMMC devices significantly report failure when sending status. Reported-by: Xiao Yao Signed-off-by: Shawn Lin Reviewed-by: Douglas Anderson Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index f4ed5accafd0..39fc5b2b96c5 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1282,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card) if (err) goto out_err; + mmc_set_clock(host, card->ext_csd.hs_max_dtr); + err = mmc_switch_status(card); if (err) goto out_err; -- cgit v1.2.3-59-g8ed1b From 8a3bee9b13824147dd15efb2993fb1b19c0e5555 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Fri, 30 Sep 2016 14:19:00 +0800 Subject: mmc: sdhci-of-arasan: add sdhci_arasan_voltage_switch for arasan, 5.1 Per the vendor's requirement, we shouldn't do any setting for 1.8V Signaling Enable, otherwise the interaction/behaviour between phy and controller will be undefined. Mostly it works fine if we do that, but we still see failures. Anyway, let's fix it to meet the vendor's requirement. The error log looks like: [ 93.405085] mmc1: unexpected status 0x800900 after switch [ 93.408474] mmc1: switch to bus width 1 failed [ 93.408482] mmc1: mmc_select_hs200 failed, error -110 [ 93.408492] mmc1: error -110 during resume (card was removed?) [ 93.408705] PM: resume of devices complete after 213.453 msecs Signed-off-by: Shawn Lin Acked-by: Adrian Hunter Reviewed-by: Douglas Anderson Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-arasan.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index e263671abab8..410a55b1c25f 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -265,6 +265,28 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) } } +static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_180: + /* + * Plese don't switch to 1V8 as arasan,5.1 doesn't + * actually refer to this setting to indicate the + * signal voltage and the state machine will be broken + * actually if we force to enable 1V8. That's something + * like broken quirk but we could work around here. + */ + return 0; + case MMC_SIGNAL_VOLTAGE_330: + case MMC_SIGNAL_VOLTAGE_120: + /* We don't support 3V3 and 1V2 */ + break; + } + + return -EINVAL; +} + static struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, @@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) host->mmc_host_ops.hs400_enhanced_strobe = sdhci_arasan_hs400_enhanced_strobe; + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_arasan_voltage_switch; } ret = sdhci_add_host(host); -- cgit v1.2.3-59-g8ed1b From 3f2d26643595973e835e8356ea90c7c15cb1b0f1 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 3 Oct 2016 10:58:28 +0200 Subject: mmc: core: Annotate cmd_hdr as __le32 Commit f68381a70bb2 (mmc: block: fix packed command header endianness) correctly fixed endianness handling of packed_cmd_hdr in mmc_blk_packed_hdr_wrq_prep. But now, sparse complains about incorrect types: drivers/mmc/card/block.c:1613:27: sparse: incorrect type in assignment (different base types) drivers/mmc/card/block.c:1613:27: expected unsigned int [unsigned] [usertype] drivers/mmc/card/block.c:1613:27: got restricted __le32 [usertype] ... So annotate cmd_hdr properly using __le32 to make everyone happy. Signed-off-by: Jiri Slaby Fixes: f68381a70bb2 (mmc: block: fix packed command header endianness) Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson --- drivers/mmc/card/block.c | 2 +- drivers/mmc/card/queue.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 0f2cc9f22357..709a872ed484 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1787,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_data *md = mq->data; struct mmc_packed *packed = mqrq->packed; bool do_rel_wr, do_data_tag; - u32 *packed_cmd_hdr; + __le32 *packed_cmd_hdr; u8 hdr_blocks; u8 i = 1; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 3c15a75bae86..342f1e3f301e 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -31,7 +31,7 @@ enum mmc_packed_type { struct mmc_packed { struct list_head list; - u32 cmd_hdr[1024]; + __le32 cmd_hdr[1024]; unsigned int blocks; u8 nr_entries; u8 retries; -- cgit v1.2.3-59-g8ed1b From fc605f1d8060133596bb6083fc4b7b306d1d5931 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 5 Oct 2016 12:11:21 +0300 Subject: mmc: sdhci: Fix SDHCI_QUIRK2_STOP_WITH_TC Multi-block data transfers can specify the number of blocks either using a Set Block Count command (CMD23) or by sending a STOP command (CMD12) after the required number of blocks has transferred. CMD23 is preferred, but some cards don't support it. CMD12 with R1b response is used for writes, and R1 response for reads. Some SDHCI host controllers give a Transfer Complete (TC) interrupt for the STOP command (CMD12) whether or not a R1b response has been specified. The quirk SDHCI_QUIRK2_STOP_WITH_TC identifies those host controllers, but the implementation only considers the case where the TC interrupt arrives at the same time as the Command Complete (CC) interrupt. However, occasionally TC arrives before CC. That is harmless, but does generate an error message "Got data interrupt 0x00000002 even though no data operation was in progress". A simpler approach is to force R1b response onto all STOP commands, because SDHCI will handle TC before CC in the general case, so do that. Signed-off-by: Adrian Hunter Cc: Giuseppe Cavallaro Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 48055666c655..3711813f5654 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) /* Initially, a command has no error */ cmd->error = 0; + if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && + cmd->opcode == MMC_STOP_TRANSMISSION) + cmd->flags |= MMC_RSP_BUSY; + /* Wait max 10 ms */ timeout = 10; @@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data) * * \*****************************************************************************/ -static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) +static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) { if (!host->cmd) { /* @@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) return; } - if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && - !(host->cmd->flags & MMC_RSP_BUSY) && !host->data && - host->cmd->opcode == MMC_STOP_TRANSMISSION) - *mask &= ~SDHCI_INT_DATA_END; - if (intmask & SDHCI_INT_RESPONSE) sdhci_finish_command(host); } @@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) } if (intmask & SDHCI_INT_CMD_MASK) - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, - &intmask); + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); if (intmask & SDHCI_INT_DATA_MASK) sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); -- cgit v1.2.3-59-g8ed1b From 606d313124094d87050896a10894200cdd2b0514 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 5 Oct 2016 12:11:22 +0300 Subject: mmc: sdhci: Rename sdhci_set_power() to sdhci_set_power_noreg() Unlike other cases, sdhci_set_power() does not reflect the default implementation of the ->set_power() callback. Rename it and create sdhci_set_power() that is the default implementation. Signed-off-by: Adrian Hunter Cc: Jisheng Zhang Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pxav3.c | 2 +- drivers/mmc/host/sdhci.c | 26 +++++++++++++------------- drivers/mmc/host/sdhci.h | 2 ++ 3 files changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index dd1938d341f7..d0f5c05fbc19 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode, struct mmc_host *mmc = host->mmc; u8 pwr = host->pwr; - sdhci_set_power(host, mode, vdd); + sdhci_set_power_noreg(host, mode, vdd); if (host->pwr == pwr) return; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3711813f5654..223a91e039dc 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1394,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); } -void sdhci_set_power(struct sdhci_host *host, unsigned char mode, - unsigned short vdd) +void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { u8 pwr = 0; @@ -1459,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode, mdelay(10); } } -EXPORT_SYMBOL_GPL(sdhci_set_power); +EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); -static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, - unsigned short vdd) +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { - struct mmc_host *mmc = host->mmc; - - if (host->ops->set_power) - host->ops->set_power(host, mode, vdd); - else if (!IS_ERR(mmc->supply.vmmc)) - sdhci_set_power_reg(host, mode, vdd); + if (IS_ERR(host->mmc->supply.vmmc)) + sdhci_set_power_noreg(host, mode, vdd); else - sdhci_set_power(host, mode, vdd); + sdhci_set_power_reg(host, mode, vdd); } +EXPORT_SYMBOL_GPL(sdhci_set_power); /*****************************************************************************\ * * @@ -1613,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } - __sdhci_set_power(host, ios->power_mode, ios->vdd); + if (host->ops->set_power) + host->ops->set_power(host, ios->power_mode, ios->vdd); + else + sdhci_set_power(host, ios->power_mode, ios->vdd); if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index c722cd23205c..766df17fb7eb 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); void sdhci_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd); +void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); -- cgit v1.2.3-59-g8ed1b From 6bc090631dfc3394da0619e920662e6636dbe89c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 5 Oct 2016 12:11:23 +0300 Subject: mmc: sdhci-pci: Let devices define their own sdhci_ops Let devices define their own sdhci_ops so that device-specific variations can be implemented without adding quirks. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pci-core.c | 4 +++- drivers/mmc/host/sdhci-pci.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 72a1f1f5180a..5e11e0e3be63 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1648,7 +1648,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( } host->hw_name = "PCI"; - host->ops = &sdhci_pci_ops; + host->ops = chip->fixes && chip->fixes->ops ? + chip->fixes->ops : + &sdhci_pci_ops; host->quirks = chip->quirks; host->quirks2 = chip->quirks2; diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 9c7c08b93223..6bccf56bc5ff 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -65,6 +65,8 @@ struct sdhci_pci_fixes { int (*suspend) (struct sdhci_pci_chip *); int (*resume) (struct sdhci_pci_chip *); + + const struct sdhci_ops *ops; }; struct sdhci_pci_slot { -- cgit v1.2.3-59-g8ed1b From fee686b74a9c115d3c4c851eb6613d1378ad0e0c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 5 Oct 2016 12:11:24 +0300 Subject: mmc: sdhci-pci: Fix bus power failing to enable for some Intel controllers Some Intel controllers (e.g. BXT) might fail to set bus power after a D3 -> D0 transition due to the present state not yet having propagated. Retry for up to 2 milliseconds. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pci-core.c | 50 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 5e11e0e3be63..1d9e00a00e9f 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -32,6 +32,14 @@ #include "sdhci-pci.h" #include "sdhci-pci-o2micro.h" +static int sdhci_pci_enable_dma(struct sdhci_host *host); +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width); +static void sdhci_pci_hw_reset(struct sdhci_host *host); +static int sdhci_pci_select_drive_strength(struct sdhci_host *host, + struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type); + /*****************************************************************************\ * * * Hardware specific quirk handling * @@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) return 0; } +#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 +#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 + +static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + int cntr; + u8 reg; + + sdhci_set_power(host, mode, vdd); + + if (mode == MMC_POWER_OFF) + return; + + /* + * Bus power might not enable after D3 -> D0 transition due to the + * present state not yet having propagated. Retry for up to 2ms. + */ + for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + if (reg & SDHCI_POWER_ON) + break; + udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); + reg |= SDHCI_POWER_ON; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + } +} + +static const struct sdhci_ops sdhci_intel_byt_ops = { + .set_clock = sdhci_set_clock, + .set_power = sdhci_intel_set_power, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_pci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .hw_reset = sdhci_pci_hw_reset, + .select_drive_strength = sdhci_pci_select_drive_strength, +}; + static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { .allow_runtime_pm = true, .probe_slot = byt_emmc_probe_slot, @@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | SDHCI_QUIRK2_STOP_WITH_TC, + .ops = &sdhci_intel_byt_ops, }; static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { @@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .allow_runtime_pm = true, .probe_slot = byt_sdio_probe_slot, + .ops = &sdhci_intel_byt_ops, }; static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { @@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .allow_runtime_pm = true, .own_cd_for_runtime_pm = true, .probe_slot = byt_sd_probe_slot, + .ops = &sdhci_intel_byt_ops, }; /* Define Host controllers for Intel Merrifield platform */ -- cgit v1.2.3-59-g8ed1b From 184892925118d924aa9304b466946ae18c029932 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 6 Oct 2016 16:00:50 -0600 Subject: samples: move blackfin gptimers-example from Documentation Move blackfin gptimers-example to samples and remove it from Documentation Makefile. Update samples Kconfig and Makefile to build gptimers-example. blackfin is the last CONFIG_BUILD_DOCSRC target in Documentation/Makefile. Hence this patch also includes changes to remove CONFIG_BUILD_DOCSRC from Makefile and lib/Kconfig.debug and updates VIDEO_PCI_SKELETON dependency on BUILD_DOCSRC. Documentation/Makefile is not deleted to avoid braking make htmldocs and make distclean. Acked-by: Michal Marek Acked-by: Jonathan Corbet Reviewed-by: Kees Cook Reported-by: Valentin Rothberg Reported-by: Paul Gortmaker Signed-off-by: Shuah Khan --- Documentation/Makefile | 2 +- Documentation/blackfin/00-INDEX | 4 -- Documentation/blackfin/Makefile | 5 -- Documentation/blackfin/gptimers-example.c | 91 ------------------------------- Makefile | 3 - drivers/media/v4l2-core/Kconfig | 2 +- lib/Kconfig.debug | 9 --- samples/Kconfig | 6 ++ samples/Makefile | 2 +- samples/blackfin/Makefile | 1 + samples/blackfin/gptimers-example.c | 91 +++++++++++++++++++++++++++++++ 11 files changed, 101 insertions(+), 115 deletions(-) delete mode 100644 Documentation/blackfin/Makefile delete mode 100644 Documentation/blackfin/gptimers-example.c create mode 100644 samples/blackfin/Makefile create mode 100644 samples/blackfin/gptimers-example.c (limited to 'drivers') diff --git a/Documentation/Makefile b/Documentation/Makefile index 84359654cfcb..c2a469112c37 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -1 +1 @@ -subdir-y := blackfin +subdir-y := diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX index c54fcdd4ae9f..265a1effebde 100644 --- a/Documentation/blackfin/00-INDEX +++ b/Documentation/blackfin/00-INDEX @@ -1,10 +1,6 @@ 00-INDEX - This file -Makefile - - Makefile for gptimers example file. bfin-gpio-notes.txt - Notes in developing/using bfin-gpio driver. bfin-spi-notes.txt - Notes for using bfin spi bus driver. -gptimers-example.c - - gptimers example diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile deleted file mode 100644 index 6782c58fbc29..000000000000 --- a/Documentation/blackfin/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -ifneq ($(CONFIG_BLACKFIN),) -ifneq ($(CONFIG_BFIN_GPTIMERS),) -obj-m := gptimers-example.o -endif -endif diff --git a/Documentation/blackfin/gptimers-example.c b/Documentation/blackfin/gptimers-example.c deleted file mode 100644 index 283eba993d9d..000000000000 --- a/Documentation/blackfin/gptimers-example.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Simple gptimers example - * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:drivers:gptimers - * - * Copyright 2007-2009 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include -#include - -#include -#include - -/* ... random driver includes ... */ - -#define DRIVER_NAME "gptimer_example" - -#ifdef IRQ_TIMER5 -#define SAMPLE_IRQ_TIMER IRQ_TIMER5 -#else -#define SAMPLE_IRQ_TIMER IRQ_TIMER2 -#endif - -struct gptimer_data { - uint32_t period, width; -}; -static struct gptimer_data data; - -/* ... random driver state ... */ - -static irqreturn_t gptimer_example_irq(int irq, void *dev_id) -{ - struct gptimer_data *data = dev_id; - - /* make sure it was our timer which caused the interrupt */ - if (!get_gptimer_intr(TIMER5_id)) - return IRQ_NONE; - - /* read the width/period values that were captured for the waveform */ - data->width = get_gptimer_pwidth(TIMER5_id); - data->period = get_gptimer_period(TIMER5_id); - - /* acknowledge the interrupt */ - clear_gptimer_intr(TIMER5_id); - - /* tell the upper layers we took care of things */ - return IRQ_HANDLED; -} - -/* ... random driver code ... */ - -static int __init gptimer_example_init(void) -{ - int ret; - - /* grab the peripheral pins */ - ret = peripheral_request(P_TMR5, DRIVER_NAME); - if (ret) { - printk(KERN_NOTICE DRIVER_NAME ": peripheral request failed\n"); - return ret; - } - - /* grab the IRQ for the timer */ - ret = request_irq(SAMPLE_IRQ_TIMER, gptimer_example_irq, - IRQF_SHARED, DRIVER_NAME, &data); - if (ret) { - printk(KERN_NOTICE DRIVER_NAME ": IRQ request failed\n"); - peripheral_free(P_TMR5); - return ret; - } - - /* setup the timer and enable it */ - set_gptimer_config(TIMER5_id, - WDTH_CAP | PULSE_HI | PERIOD_CNT | IRQ_ENA); - enable_gptimers(TIMER5bit); - - return 0; -} -module_init(gptimer_example_init); - -static void __exit gptimer_example_exit(void) -{ - disable_gptimers(TIMER5bit); - free_irq(SAMPLE_IRQ_TIMER, &data); - peripheral_free(P_TMR5); -} -module_exit(gptimer_example_exit); - -MODULE_LICENSE("BSD"); diff --git a/Makefile b/Makefile index 70de1448c571..92b38ac151a2 100644 --- a/Makefile +++ b/Makefile @@ -933,9 +933,6 @@ vmlinux_prereq: $(vmlinux-deps) FORCE ifdef CONFIG_HEADERS_CHECK $(Q)$(MAKE) -f $(srctree)/Makefile headers_check endif -ifdef CONFIG_BUILD_DOCSRC - $(Q)$(MAKE) $(build)=Documentation -endif ifdef CONFIG_GDB_SCRIPTS $(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py endif diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 29b3436d0910..367523a3c774 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -27,7 +27,7 @@ config VIDEO_FIXED_MINOR_RANGES config VIDEO_PCI_SKELETON tristate "Skeleton PCI V4L2 driver" - depends on PCI && BUILD_DOCSRC + depends on PCI depends on VIDEO_V4L2 && VIDEOBUF2_CORE depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG ---help--- diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2307d7c89dac..a6620838afc3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1875,15 +1875,6 @@ config PROVIDE_OHCI1394_DMA_INIT See Documentation/debugging-via-ohci1394.txt for more information. -config BUILD_DOCSRC - bool "Build targets in Documentation/ tree" - depends on HEADERS_CHECK - help - This option attempts to build objects from the source files in the - kernel Documentation/ tree. - - Say N if you are unsure. - config DMA_API_DEBUG bool "Enable debugging of DMA-API usage" depends on HAVE_DMA_API_DEBUG diff --git a/samples/Kconfig b/samples/Kconfig index 85c405fcccb0..a6d2a43bbf2e 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -99,4 +99,10 @@ config SAMPLE_SECCOMP Build samples of seccomp filters using various methods of BPF filter construction. +config SAMPLE_BLACKFIN_GPTIMERS + tristate "Build blackfin gptimers sample code -- loadable modules only" + depends on BLACKFIN && BFIN_GPTIMERS && m + help + Build samples of blackfin gptimers sample module. + endif # SAMPLES diff --git a/samples/Makefile b/samples/Makefile index 1a20169d85ac..e17d66d77f09 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \ hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \ - configfs/ connector/ v4l/ trace_printk/ + configfs/ connector/ v4l/ trace_printk/ blackfin/ diff --git a/samples/blackfin/Makefile b/samples/blackfin/Makefile new file mode 100644 index 000000000000..89b86cfd83a2 --- /dev/null +++ b/samples/blackfin/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SAMPLE_BLACKFIN_GPTIMERS) += gptimers-example.o diff --git a/samples/blackfin/gptimers-example.c b/samples/blackfin/gptimers-example.c new file mode 100644 index 000000000000..283eba993d9d --- /dev/null +++ b/samples/blackfin/gptimers-example.c @@ -0,0 +1,91 @@ +/* + * Simple gptimers example + * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:drivers:gptimers + * + * Copyright 2007-2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include +#include + +#include +#include + +/* ... random driver includes ... */ + +#define DRIVER_NAME "gptimer_example" + +#ifdef IRQ_TIMER5 +#define SAMPLE_IRQ_TIMER IRQ_TIMER5 +#else +#define SAMPLE_IRQ_TIMER IRQ_TIMER2 +#endif + +struct gptimer_data { + uint32_t period, width; +}; +static struct gptimer_data data; + +/* ... random driver state ... */ + +static irqreturn_t gptimer_example_irq(int irq, void *dev_id) +{ + struct gptimer_data *data = dev_id; + + /* make sure it was our timer which caused the interrupt */ + if (!get_gptimer_intr(TIMER5_id)) + return IRQ_NONE; + + /* read the width/period values that were captured for the waveform */ + data->width = get_gptimer_pwidth(TIMER5_id); + data->period = get_gptimer_period(TIMER5_id); + + /* acknowledge the interrupt */ + clear_gptimer_intr(TIMER5_id); + + /* tell the upper layers we took care of things */ + return IRQ_HANDLED; +} + +/* ... random driver code ... */ + +static int __init gptimer_example_init(void) +{ + int ret; + + /* grab the peripheral pins */ + ret = peripheral_request(P_TMR5, DRIVER_NAME); + if (ret) { + printk(KERN_NOTICE DRIVER_NAME ": peripheral request failed\n"); + return ret; + } + + /* grab the IRQ for the timer */ + ret = request_irq(SAMPLE_IRQ_TIMER, gptimer_example_irq, + IRQF_SHARED, DRIVER_NAME, &data); + if (ret) { + printk(KERN_NOTICE DRIVER_NAME ": IRQ request failed\n"); + peripheral_free(P_TMR5); + return ret; + } + + /* setup the timer and enable it */ + set_gptimer_config(TIMER5_id, + WDTH_CAP | PULSE_HI | PERIOD_CNT | IRQ_ENA); + enable_gptimers(TIMER5bit); + + return 0; +} +module_init(gptimer_example_init); + +static void __exit gptimer_example_exit(void) +{ + disable_gptimers(TIMER5bit); + free_irq(SAMPLE_IRQ_TIMER, &data); + peripheral_free(P_TMR5); +} +module_exit(gptimer_example_exit); + +MODULE_LICENSE("BSD"); -- cgit v1.2.3-59-g8ed1b From 8c136b590f79f7f4f60ae4709fc1340885ca2eba Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 5 Oct 2016 18:30:43 +0200 Subject: drm/etnaviv: ensure write caches are flushed at end of user cmdstream If the GPU is done with one user command stream the buffers referenced by this command stream may go away and get unmapped from the MMU. If the write caches are still dirty at this point later evictions will run into MMU faults, killing the GPU. Make sure the write caches are flushed before signaling completion of the user command stream. Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index cb86c7e5495c..d9230132dfbc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, /* * Append a LINK to the submitted command buffer to return to * the ring buffer. return_target is the ring target address. - * We need three dwords: event, wait, link. + * We need at most 7 dwords in the return target: 2 cache flush + + * 2 semaphore stall + 1 event + 1 wait + 1 link. */ - return_dwords = 3; + return_dwords = 7; return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); CMD_LINK(cmdbuf, return_dwords, return_target); /* - * Append event, wait and link pointing back to the wait - * command to the ring buffer. + * Append a cache flush, stall, event, wait and link pointing back to + * the wait command to the ring buffer. */ + if (gpu->exec_state == ETNA_PIPE_2D) { + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, + VIVS_GL_FLUSH_CACHE_PE2D); + } else { + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, + VIVS_GL_FLUSH_CACHE_DEPTH | + VIVS_GL_FLUSH_CACHE_COLOR); + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE); CMD_WAIT(buffer); - CMD_LINK(buffer, 2, return_target + 8); + CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + + buffer->user_size - 4); if (drm_debug & DRM_UT_DRIVER) pr_info("stream link to 0x%08x @ 0x%08x %p\n", -- cgit v1.2.3-59-g8ed1b From 8814d2dce00f77c5eeb7278981ac6fd08835629e Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Thu, 6 Oct 2016 17:03:28 +0200 Subject: drm/etnaviv: block 64K of address space behind each cmdstream To make sure we don't place anything there which might confuse the FE prefetcher. This gets rid of another case of FE MMU faults when the address space gets crowded before triggering the reaper. Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index d3796ed8d8c5..169ac96e8f08 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu, return (u32)buf->vram_node.start; mutex_lock(&mmu->lock); - ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size); + ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, + buf->size + SZ_64K); if (ret < 0) { mutex_unlock(&mmu->lock); return 0; -- cgit v1.2.3-59-g8ed1b From 2d8e60e8b0742b7a5cddc806fe38bb81ee876c33 Mon Sep 17 00:00:00 2001 From: Baole Ni Date: Tue, 2 Aug 2016 18:50:25 +0800 Subject: drm/vmwgfx: Replace numeric parameter like 0444 with macro I find that the developers often just specified the numeric value when calling a macro which is defined with a parameter for access permission. As we know, these numeric value for access permission have had the corresponding macro, and that using macro can improve the robustness and readability of the code, thus, I suggest replacing the numeric parameter with the macro. Signed-off-by: Chuansheng Liu Signed-off-by: Baole Ni Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e8ae3dc476d1..18061a4bc2f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); -module_param_named(enable_fbdev, enable_fbdev, int, 0600); +module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); -module_param_named(force_dma_api, vmw_force_iommu, int, 0600); +module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); -module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); +module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); -module_param_named(force_coherent, vmw_force_coherent, int, 0600); +module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); -module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); +module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); -- cgit v1.2.3-59-g8ed1b From 07028959bcc674dc8ca143323aeab05849a83742 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Aug 2016 08:08:28 +0100 Subject: drm/vmwgfx: Remove call to reservation_object_test_signaled_rcu before wait Since fence_wait_timeout_reservation_object_wait_timeout_rcu() with a timeout of 0 becomes reservation_object_test_signaled_rcu(), we do not need to handle such conversion in the caller. The only challenge are those callers that wish to differentiate the error code between the nonblocking busy check and potentially blocking wait. Signed-off-by: Chris Wilson Cc: Sinclair Yeh Cc: Thomas Hellstrom Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6a328d507a28..52ca1c9d070e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); long lret; - if (nonblock) - return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; - - lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); + lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, + nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; else if (lret < 0) -- cgit v1.2.3-59-g8ed1b From 1f982e4e390b31d6c44fb8bf03e3462ab33b8244 Mon Sep 17 00:00:00 2001 From: Charmaine Lee Date: Mon, 10 Oct 2016 10:37:03 -0700 Subject: drm/vmwgfx: Enable SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command And bump VMWGFX_DRIVER_MINOR to 11 Signed-off-by: Charmaine Lee Reviewed-by: Sinclair Yeh Reviewed-by: Brian Paul Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 74304b03f9d4..7d01c183079d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -43,7 +43,7 @@ #define VMWGFX_DRIVER_DATE "20160210" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 10 +#define VMWGFX_DRIVER_MINOR 11 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dc5beff2b4aa..0243acc8845f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3029,6 +3029,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, cmd->body.shaderResourceViewId); } +/** + * vmw_cmd_dx_transfer_from_buffer - + * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXTransferFromBuffer body; + } *cmd = container_of(header, typeof(*cmd), header); + int ret; + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.srcSid, NULL); + if (ret != 0) + return ret; + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.destSid, NULL); +} + static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -3379,6 +3408,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { &vmw_cmd_buffer_copy_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_pred_copy_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, + &vmw_cmd_dx_transfer_from_buffer, + true, false, true), }; static int vmw_cmd_check(struct vmw_private *dev_priv, -- cgit v1.2.3-59-g8ed1b From e7a45284ba1abcea591f7c01b05227b6698b596c Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 10 Oct 2016 10:44:00 -0700 Subject: drm/vmwgfx: Allow resource relocations on byte boundaries So far, resource allocations have only been allowed on 4-byte boundaries. As commands get packed tighter, allow them on byte boundaries. Signed-off-by: Thomas Hellstrom Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 41 +++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 0243acc8845f..b915f621187f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -39,7 +39,7 @@ * * @head: List head for the software context's relocation list. * @res: Non-ref-counted pointer to the resource. - * @offset: Offset of 4 byte entries into the command buffer where the + * @offset: Offset of single byte entries into the command buffer where the * id that needs fixup is located. */ struct vmw_resource_relocation { @@ -109,7 +109,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct vmw_dma_buffer *vbo, bool validate_as_mob, uint32_t *p_val_node); - +/** + * vmw_ptr_diff - Compute the offset from a to b in bytes + * + * @a: A starting pointer. + * @b: A pointer offset in the same address space. + * + * Returns: The offset in bytes between the two pointers. + */ +static size_t vmw_ptr_diff(void *a, void *b) +{ + return (unsigned long) b - (unsigned long) a; +} /** * vmw_resources_unreserve - unreserve resources previously reserved for @@ -409,7 +420,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, * @list: Pointer to head of relocation list. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the - * id that needs fixup is located. Granularity is 4 bytes. + * id that needs fixup is located. Granularity is one byte. */ static int vmw_resource_relocation_add(struct list_head *list, const struct vmw_resource *res, @@ -460,10 +471,11 @@ static void vmw_resource_relocations_apply(uint32_t *cb, struct vmw_resource_relocation *rel; list_for_each_entry(rel, list, head) { + u32 *addr = (u32 *)((unsigned long) cb + rel->offset); if (likely(rel->res != NULL)) - cb[rel->offset] = rel->res->id; + *addr = rel->res->id; else - cb[rel->offset] = SVGA_3D_CMD_NOP; + *addr = SVGA_3D_CMD_NOP; } } @@ -655,7 +667,8 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, *p_val = NULL; ret = vmw_resource_relocation_add(&sw_context->res_relocations, res, - id_loc - sw_context->buf_start); + vmw_ptr_diff(sw_context->buf_start, + id_loc)); if (unlikely(ret != 0)) return ret; @@ -721,7 +734,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, return vmw_resource_relocation_add (&sw_context->res_relocations, res, - id_loc - sw_context->buf_start); + vmw_ptr_diff(sw_context->buf_start, id_loc)); } ret = vmw_user_resource_lookup_handle(dev_priv, @@ -2143,10 +2156,9 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, return ret; return vmw_resource_relocation_add(&sw_context->res_relocations, - NULL, &cmd->header.id - - sw_context->buf_start); - - return 0; + NULL, + vmw_ptr_diff(sw_context->buf_start, + &cmd->header.id)); } /** @@ -2188,10 +2200,9 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, return ret; return vmw_resource_relocation_add(&sw_context->res_relocations, - NULL, &cmd->header.id - - sw_context->buf_start); - - return 0; + NULL, + vmw_ptr_diff(sw_context->buf_start, + &cmd->header.id)); } /** -- cgit v1.2.3-59-g8ed1b From 728c3b53995f71e4b175d5939b8ba3211b6bc34d Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 10 Oct 2016 10:45:55 -0700 Subject: drm/vmwgfx: Remove a leftover debug printout Remove a leftover debug printout Signed-off-by: Thomas Hellstrom Reviewed-by: Brian Paul Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index b915f621187f..ddd5e8a59723 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4275,9 +4275,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); vmw_dmabuf_unreference(&dev_priv->pinned_bo); - DRM_INFO("Dummy query bo pin count: %d\n", - dev_priv->dummy_query_bo->pin_count); - out_unlock: return; -- cgit v1.2.3-59-g8ed1b From 51ab70bed997f64f091a639dbe22b629725a7faf Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 10 Oct 2016 10:51:24 -0700 Subject: drm/vmwgfx: Limit the user-space command buffer size With older hardware versions, the user could specify arbitrarily large command buffer sizes, causing a vmalloc / vmap space exhaustion. Signed-off-by: Thomas Hellstrom Reviewed-by: Brian Paul Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh Cc: stable@vger.kernel.org --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index ddd5e8a59723..d1f4a48dee0f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3891,14 +3891,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, int ret; *header = NULL; - if (!dev_priv->cman || kernel_commands) - return kernel_commands; - if (command_size > SVGA_CB_MAX_SIZE) { DRM_ERROR("Command buffer is too large.\n"); return ERR_PTR(-EINVAL); } + if (!dev_priv->cman || kernel_commands) + return kernel_commands; + /* If possible, add a little space for fencing. */ cmdbuf_size = command_size + 512; cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); -- cgit v1.2.3-59-g8ed1b From a19440304db2d97aed5cee9bfa5017c98d2348bf Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 10 Oct 2016 11:06:45 -0700 Subject: drm/vmwgfx: Avoid validating views on view destruction When a view destruction command was present in the command stream, the view was validated to avoid a device error. That caused excessive and unnecessary validations of views, surfaces and mobs on view destruction. Replace this with a new relocation type that patches the view destruction command to a NOP if the view is not present in the device after the execbuf validation sequence. Also add checks for the member size of the vmw_res_relocation struct. Fixes sporadic command submission errors on google-earth exit. Reported-by: Brian Paul Signed-off-by: Thomas Hellstrom Reviewed-by: Brian Paul Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh Cc: stable@vger.kernel.org --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 71 +++++++++++++++++++++++++++------ 1 file changed, 58 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d1f4a48dee0f..c7b53d987f06 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -34,6 +34,24 @@ #define VMW_RES_HT_ORDER 12 +/** + * enum vmw_resource_relocation_type - Relocation type for resources + * + * @vmw_res_rel_normal: Traditional relocation. The resource id in the + * command stream is replaced with the actual id after validation. + * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced + * with a NOP. + * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id + * after validation is -1, the command is replaced with a NOP. Otherwise no + * action. + */ +enum vmw_resource_relocation_type { + vmw_res_rel_normal, + vmw_res_rel_nop, + vmw_res_rel_cond_nop, + vmw_res_rel_max +}; + /** * struct vmw_resource_relocation - Relocation info for resources * @@ -41,11 +59,13 @@ * @res: Non-ref-counted pointer to the resource. * @offset: Offset of single byte entries into the command buffer where the * id that needs fixup is located. + * @rel_type: Type of relocation. */ struct vmw_resource_relocation { struct list_head head; const struct vmw_resource *res; - unsigned long offset; + u32 offset:29; + enum vmw_resource_relocation_type rel_type:3; }; /** @@ -421,10 +441,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the * id that needs fixup is located. Granularity is one byte. + * @rel_type: Relocation type. */ static int vmw_resource_relocation_add(struct list_head *list, const struct vmw_resource *res, - unsigned long offset) + unsigned long offset, + enum vmw_resource_relocation_type + rel_type) { struct vmw_resource_relocation *rel; @@ -436,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list, rel->res = res; rel->offset = offset; + rel->rel_type = rel_type; list_add_tail(&rel->head, list); return 0; @@ -470,12 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb, { struct vmw_resource_relocation *rel; + /* Validate the struct vmw_resource_relocation member size */ + BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); + BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); + list_for_each_entry(rel, list, head) { u32 *addr = (u32 *)((unsigned long) cb + rel->offset); - if (likely(rel->res != NULL)) + switch (rel->rel_type) { + case vmw_res_rel_normal: *addr = rel->res->id; - else + break; + case vmw_res_rel_nop: *addr = SVGA_3D_CMD_NOP; + break; + default: + if (rel->res->id == -1) + *addr = SVGA_3D_CMD_NOP; + break; + } } } @@ -668,7 +704,8 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, ret = vmw_resource_relocation_add(&sw_context->res_relocations, res, vmw_ptr_diff(sw_context->buf_start, - id_loc)); + id_loc), + vmw_res_rel_normal); if (unlikely(ret != 0)) return ret; @@ -734,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, return vmw_resource_relocation_add (&sw_context->res_relocations, res, - vmw_ptr_diff(sw_context->buf_start, id_loc)); + vmw_ptr_diff(sw_context->buf_start, id_loc), + vmw_res_rel_normal); } ret = vmw_user_resource_lookup_handle(dev_priv, @@ -2158,7 +2196,8 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, return vmw_resource_relocation_add(&sw_context->res_relocations, NULL, vmw_ptr_diff(sw_context->buf_start, - &cmd->header.id)); + &cmd->header.id), + vmw_res_rel_nop); } /** @@ -2202,7 +2241,8 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, return vmw_resource_relocation_add(&sw_context->res_relocations, NULL, vmw_ptr_diff(sw_context->buf_start, - &cmd->header.id)); + &cmd->header.id), + vmw_res_rel_nop); } /** @@ -2859,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, * @header: Pointer to the command header in the command stream. * * Check that the view exists, and if it was not created using this - * command batch, make sure it's validated (present in the device) so that - * the remove command will not confuse the device. + * command batch, conditionally make this command a NOP. */ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -2888,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, return ret; /* - * Add view to the validate list iff it was not created using this - * command batch. + * If the view wasn't created during this command batch, it might + * have been removed due to a context swapout, so add a + * relocation to conditionally make this command a NOP to avoid + * device errors. */ - return vmw_view_res_val_add(sw_context, view); + return vmw_resource_relocation_add(&sw_context->res_relocations, + view, + vmw_ptr_diff(sw_context->buf_start, + &cmd->header.id), + vmw_res_rel_cond_nop); } /** -- cgit v1.2.3-59-g8ed1b From 7ed3b3943281e9da32b52e2aac77bdb2c42c5117 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 22 Sep 2016 21:54:33 +0200 Subject: drm/vmwgfx: Use kmalloc_array() in vmw_surface_define_ioctl() Multiplications for the size determination of memory allocations indicated that array data structures should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index c2a721a8cef9..f55754936d90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -763,14 +763,16 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); srf->num_sizes = num_sizes; user_srf->size = size; - - srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); + srf->sizes = kmalloc_array(srf->num_sizes, + sizeof(*srf->sizes), + GFP_KERNEL); if (unlikely(srf->sizes == NULL)) { ret = -ENOMEM; goto out_no_sizes; } - srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), - GFP_KERNEL); + srf->offsets = kmalloc_array(srf->num_sizes, + sizeof(*srf->offsets), + GFP_KERNEL); if (unlikely(srf->offsets == NULL)) { ret = -ENOMEM; goto out_no_offsets; -- cgit v1.2.3-59-g8ed1b From c138d03f1bf3b9c7bfd449e890cc003658b5c45a Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 23 Sep 2016 17:26:02 +0200 Subject: drm/vmwgfx: Use memdup_user() rather than duplicating its implementation * Reuse existing functionality from memdup_user() instead of keeping duplicate source code. * Try this copy operation before allocating memory for the data structure member "offsets". * Delete the local variable "user_sizes" which became unnecessary with this refactoring. Signed-off-by: Markus Elfring Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index f55754936d90..15504c6ca3e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -700,7 +700,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_create_req *req = &arg->req; struct drm_vmw_surface_arg *rep = &arg->rep; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct drm_vmw_size __user *user_sizes; int ret; int i, j; uint32_t cur_bo_offset; @@ -763,11 +762,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); srf->num_sizes = num_sizes; user_srf->size = size; - srf->sizes = kmalloc_array(srf->num_sizes, - sizeof(*srf->sizes), - GFP_KERNEL); - if (unlikely(srf->sizes == NULL)) { - ret = -ENOMEM; + srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) + req->size_addr, + sizeof(*srf->sizes) * srf->num_sizes); + if (IS_ERR(srf->sizes)) { + ret = PTR_ERR(srf->sizes); goto out_no_sizes; } srf->offsets = kmalloc_array(srf->num_sizes, @@ -778,16 +777,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_no_offsets; } - user_sizes = (struct drm_vmw_size __user *)(unsigned long) - req->size_addr; - - ret = copy_from_user(srf->sizes, user_sizes, - srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) { - ret = -EFAULT; - goto out_no_copy; - } - srf->base_size = *srf->sizes; srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; srf->multisample_count = 0; -- cgit v1.2.3-59-g8ed1b From 862f6157d176c9db5a7ed423245108d9bb3d7038 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 23 Sep 2016 17:53:49 +0200 Subject: drm/vmwgfx: Adjust checks for null pointers in 13 functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The script "checkpatch.pl" can point information out like the following. Comparison to NULL could be written !… Thus fix the affected source code places. Signed-off-by: Markus Elfring Reviewed-by: Sinclair Yeh Signed-off-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 15504c6ca3e0..b445ce9b9757 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) if (res->id != -1) { cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "destruction.\n"); return; @@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) submit_size = vmw_surface_define_size(srf); cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "creation.\n"); ret = -ENOMEM; @@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, uint8_t *cmd; struct vmw_private *dev_priv = res->dev_priv; - BUG_ON(val_buf->bo == NULL); - + BUG_ON(!val_buf->bo); submit_size = vmw_surface_dma_size(srf); cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "DMA.\n"); return -ENOMEM; @@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res) submit_size = vmw_surface_destroy_size(); cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "eviction.\n"); return -ENOMEM; @@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, int ret; struct vmw_resource *res = &srf->res; - BUG_ON(res_free == NULL); + BUG_ON(!res_free); if (!dev_priv->has_mob) vmw_fifo_resource_inc(dev_priv); ret = vmw_resource_init(dev_priv, res, true, res_free, @@ -747,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); - if (unlikely(user_srf == NULL)) { + if (unlikely(!user_srf)) { ret = -ENOMEM; goto out_no_user_srf; } @@ -772,7 +771,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, srf->offsets = kmalloc_array(srf->num_sizes, sizeof(*srf->offsets), GFP_KERNEL); - if (unlikely(srf->offsets == NULL)) { + if (unlikely(!srf->offsets)) { ret = -ENOMEM; goto out_no_offsets; } @@ -914,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, ret = -EINVAL; base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); - if (unlikely(base == NULL)) { + if (unlikely(!base)) { DRM_ERROR("Could not find surface to reference.\n"); goto out_no_lookup; } @@ -1060,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) cmd = vmw_fifo_reserve(dev_priv, submit_len); cmd2 = (typeof(cmd2))cmd; - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "creation.\n"); ret = -ENOMEM; @@ -1126,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); cmd1 = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd1 == NULL)) { + if (unlikely(!cmd1)) { DRM_ERROR("Failed reserving FIFO space for surface " "binding.\n"); return -ENOMEM; @@ -1176,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "unbinding.\n"); return -ENOMEM; @@ -1235,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) vmw_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { + if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "destruction.\n"); mutex_unlock(&dev_priv->binding_mutex); @@ -1401,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - if (srf->res.backup == NULL) { + if (!srf->res.backup) { DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); goto out_bad_resource; } @@ -1515,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, } user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); - if (unlikely(user_srf == NULL)) { + if (unlikely(!user_srf)) { ret = -ENOMEM; goto out_no_user_srf; } -- cgit v1.2.3-59-g8ed1b From 0766f788eb727e2e330d55d30545db65bcf2623f Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Mon, 20 Jun 2016 20:42:34 +0200 Subject: latent_entropy: Mark functions with __latent_entropy The __latent_entropy gcc attribute can be used only on functions and variables. If it is on a function then the plugin will instrument it for gathering control-flow entropy. If the attribute is on a variable then the plugin will initialize it with random contents. The variable must be an integer, an integer array type or a structure with integer fields. These specific functions have been selected because they are init functions (to help gather boot-time entropy), are called at unpredictable times, or they have variable loops, each of which provide some level of latent entropy. Signed-off-by: Emese Revfy [kees: expanded commit message] Signed-off-by: Kees Cook --- block/blk-softirq.c | 2 +- drivers/char/random.c | 4 ++-- fs/namespace.c | 1 + include/linux/compiler-gcc.h | 7 +++++++ include/linux/compiler.h | 4 ++++ include/linux/fdtable.h | 2 +- include/linux/genhd.h | 2 +- include/linux/init.h | 5 +++-- include/linux/random.h | 4 ++-- kernel/fork.c | 6 ++++-- kernel/rcu/tiny.c | 2 +- kernel/rcu/tree.c | 2 +- kernel/sched/fair.c | 2 +- kernel/softirq.c | 4 ++-- kernel/time/timer.c | 2 +- lib/irq_poll.c | 2 +- lib/random32.c | 2 +- mm/page_alloc.c | 2 +- net/core/dev.c | 4 ++-- 19 files changed, 37 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 53b1737e978d..489eab825a8a 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); * Softirq action handler - move entries to local list and loop over them * while passing them to the queue registered handler. */ -static void blk_done_softirq(struct softirq_action *h) +static __latent_entropy void blk_done_softirq(struct softirq_action *h) { struct list_head *cpu_list, local_list; diff --git a/drivers/char/random.c b/drivers/char/random.c index 3efb3bf0ab83..7274ae89ddb3 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf, static void crng_reseed(struct crng_state *crng, struct entropy_store *r); static void push_to_pool(struct work_struct *work); -static __u32 input_pool_data[INPUT_POOL_WORDS]; -static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; +static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], diff --git a/fs/namespace.c b/fs/namespace.c index 7bb2cda3bfef..4a9568b81138 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2759,6 +2759,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) return new_ns; } +__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 573c5a18908f..432f5c97e18f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -188,6 +188,13 @@ #endif /* GCC_VERSION >= 40300 */ #if GCC_VERSION >= 40500 + +#ifndef __CHECKER__ +#ifdef LATENT_ENTROPY_PLUGIN +#define __latent_entropy __attribute__((latent_entropy)) +#endif +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 668569844d37..ceaddaf76ff1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -406,6 +406,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __attribute_const__ /* unimplemented */ #endif +#ifndef __latent_entropy +# define __latent_entropy +#endif + /* * Tell gcc if a function is cold. The compiler will assume any path * directly leading to the call is unlikely. diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 5295535b60c6..9852c7e33466 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); void reset_files_struct(struct files_struct *); int unshare_files(struct files_struct **); -struct files_struct *dup_fd(struct files_struct *, int *); +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 1dbf52f9c24b..e0341af6950e 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask); extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ -extern void add_disk_randomness(struct gendisk *disk); +extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) diff --git a/include/linux/init.h b/include/linux/init.h index 6935d02474aa..1e5c131d5c9a 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -39,7 +39,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold notrace +#define __init __section(.init.text) __cold notrace __latent_entropy #define __initdata __section(.init.data) #define __initconst __constsection(.init.rodata) #define __exitdata __section(.exit.data) @@ -86,7 +86,8 @@ #define __exit __section(.exit.text) __exitused __cold notrace /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(.meminit.text) __cold notrace +#define __meminit __section(.meminit.text) __cold notrace \ + __latent_entropy #define __meminitdata __section(.meminit.data) #define __meminitconst __constsection(.meminit.rodata) #define __memexit __section(.memexit.text) __exitused __cold notrace diff --git a/include/linux/random.h b/include/linux/random.h index a59c74cdb1eb..d80a4388a4fd 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -30,8 +30,8 @@ static inline void add_latent_entropy(void) {} #endif extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value); -extern void add_interrupt_randomness(int irq, int irq_flags); + unsigned int value) __latent_entropy; +extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); diff --git a/kernel/fork.c b/kernel/fork.c index 001b18473a07..05393881ef39 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -404,7 +404,8 @@ free_tsk: } #ifdef CONFIG_MMU -static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +static __latent_entropy int dup_mmap(struct mm_struct *mm, + struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; @@ -1296,7 +1297,8 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ -static struct task_struct *copy_process(unsigned long clone_flags, +static __latent_entropy struct task_struct *copy_process( + unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 944b1b491ed8..1898559e6b60 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) false)); } -static void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) { __rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5d80925e7fc8..e5164deb51e1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ -static void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) { struct rcu_state *rsp; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 039de34f1521..004996df2f10 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8283,7 +8283,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). */ -static void run_rebalance_domains(struct softirq_action *h) +static __latent_entropy void run_rebalance_domains(struct softirq_action *h) { struct rq *this_rq = this_rq(); enum cpu_idle_type idle = this_rq->idle_balance ? diff --git a/kernel/softirq.c b/kernel/softirq.c index 17caf4b63342..34033fd09c8c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) } EXPORT_SYMBOL(__tasklet_hi_schedule_first); -static void tasklet_action(struct softirq_action *a) +static __latent_entropy void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; @@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a) } } -static void tasklet_hi_action(struct softirq_action *a) +static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 32bf6f75a8fe..2d47980a1bc4 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base) /* * This function runs timers and the timer-tq in bottom half context. */ -static void run_timer_softirq(struct softirq_action *h) +static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 836f7db4e548..63be7495dbb2 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop) } EXPORT_SYMBOL(irq_poll_complete); -static void irq_poll_softirq(struct softirq_action *h) +static void __latent_entropy irq_poll_softirq(struct softirq_action *h) { struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = irq_poll_budget; diff --git a/lib/random32.c b/lib/random32.c index 69ed593aab07..a30923573676 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state); +static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 248851d1fc86..901121af4e54 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES]; #endif #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY -volatile u64 latent_entropy; +volatile u64 latent_entropy __latent_entropy; EXPORT_SYMBOL(latent_entropy); #endif diff --git a/net/core/dev.c b/net/core/dev.c index ea6312057a71..ee076c2791f9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3855,7 +3855,7 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); -static void net_tx_action(struct softirq_action *h) +static __latent_entropy void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); @@ -5187,7 +5187,7 @@ out_unlock: return work; } -static void net_rx_action(struct softirq_action *h) +static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; -- cgit v1.2.3-59-g8ed1b From 69e67a0626e503676ea2933b448f7aca8dd544f5 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 25 Sep 2016 20:13:57 +0800 Subject: PM / devfreq: exynos-nocp: Select REGMAP_MMIO This driver uses devm_regmap_init_mmio(), so select REGMAP_MMIO to avoid build failure. Signed-off-by: Axel Lin Acked-by: Chanwoo Choi Signed-off-by: Rafael J. Wysocki --- drivers/devfreq/event/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index 0fdae8608961..cd949800eed9 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP + select REGMAP_MMIO help This add the devfreq-event driver for Exynos SoC. It provides NoC (Network on Chip) Probe counters to measure the bandwidth of AXI bus. -- cgit v1.2.3-59-g8ed1b From 3b91f4b361daad1f3e1c60ca5a7861e8b36f9728 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 25 Sep 2016 20:13:58 +0800 Subject: PM / devfreq: exynos-nocp: Remove redundant code load_count/total_count are reset by devfreq_event_get_event(), so remove the redundant code in exynos_nocp_get_event(). Signed-off-by: Axel Lin Acked-by: Chanwoo Choi [ rjw: Subject/changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/devfreq/event/exynos-nocp.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index a5841403bde8..49e712aca0c1 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev, return 0; out: - edata->load_count = 0; - edata->total_count = 0; - dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n"); return ret; -- cgit v1.2.3-59-g8ed1b From 0f376c9cd86c23f37312d37748b233660ef9d9af Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 29 Sep 2016 10:13:28 +0800 Subject: PM / devfreq: Add proper locking around list_del() Use devfreq_list_lock around list_del() to prevent list corruption. Signed-off-by: Axel Lin Acked-by: MyungJoo Ham Signed-off-by: Rafael J. Wysocki --- drivers/devfreq/devfreq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 478006b7764a..66d3c7184a0f 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -594,17 +594,19 @@ struct devfreq *devfreq_add_device(struct device *dev, if (devfreq->governor) err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); - mutex_unlock(&devfreq_list_lock); if (err) { dev_err(dev, "%s: Unable to start governor for the device\n", __func__); goto err_init; } + mutex_unlock(&devfreq_list_lock); return devfreq; err_init: list_del(&devfreq->node); + mutex_unlock(&devfreq_list_lock); + device_unregister(&devfreq->dev); err_out: return ERR_PTR(err); -- cgit v1.2.3-59-g8ed1b From d0563a039c9d35305f4cb2405665130de596d86a Mon Sep 17 00:00:00 2001 From: Tobias Jakobi Date: Thu, 29 Sep 2016 14:36:36 +0200 Subject: PM / devfreq: Skip status update on uninitialized previous_freq In case devfreq->previous_freq is still uninitialized in devfreq_update_status(), i.e. it has value '0', the lookups in that function fail, eventually leading to some error message: [ 3.041292] devfreq bus_dmc: Couldn't update frequency transition information. Just skip the statup update in this situation. Signed-off-by: Tobias Jakobi Acked-by: MyungJoo Ham Signed-off-by: Rafael J. Wysocki --- drivers/devfreq/devfreq.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 66d3c7184a0f..bf3ea7603a58 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) cur_time = jiffies; + /* Immediately exit if previous_freq is not initialized yet. */ + if (!devfreq->previous_freq) + goto out; + prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); if (prev_lev < 0) { ret = prev_lev; -- cgit v1.2.3-59-g8ed1b From 5c33677c87cbe44ae04df69c4a29c1750a9ec4e5 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Tue, 11 Oct 2016 15:16:57 +0100 Subject: dm raid: fix compat_features validation In ecbfb9f118bce4 ("dm raid: add raid level takeover support") a new compatible feature flag was added. Validation for these compat_features was added but this only passes for new raid mappings with this feature flag. This causes previously created raid mappings to be failed at import. Check compat_features for the only valid combination. Fixes: ecbfb9f118bce4 ("dm raid: add raid level takeover support") Cc: stable@vger.kernel.org # v4.8 Signed-off-by: Andy Whitcroft Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 8abde6b8cedc..2a3970097991 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2258,7 +2258,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) if (!mddev->events && super_init_validation(rs, rdev)) return -EINVAL; - if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { + if (le32_to_cpu(sb->compat_features) && + le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From b60e4ea4a400bde8a4811f94b84a9bb65f81b677 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 29 Sep 2016 16:39:41 +0300 Subject: ACPI / property: Allow holes in reference properties DT allows holes or empty phandles for references. This is used for example in SPI subsystem where some chip selects are native and others are regular GPIOs. In ACPI _DSD we currently do not support this but instead the preceding reference consumes all following integer arguments. For example we would like to support something like the below ASL fragment for SPI: Package () { "cs-gpios", Package () { ^GPIO, 19, 0, 0, // GPIO CS0 0, // Native CS ^GPIO, 20, 0, 0, // GPIO CS1 } } The zero in the middle means "no entry" or NULL reference. To support this we change acpi_data_get_property_reference() to take firmware node and num_args as argument and rename it to __acpi_node_get_property_reference(). The function returns -ENOENT if the given index resolves to "no entry" reference and -ENODATA when there are no more entries in the property. We then add static inline wrapper acpi_node_get_property_reference() that passes MAX_ACPI_REFERENCE_ARGS as num_args to support the existing behaviour which some drivers have been relying on. Signed-off-by: Mika Westerberg Reviewed-by: Andy Shevchenko Signed-off-by: Rafael J. Wysocki --- drivers/acpi/property.c | 117 ++++++++++++++++++++++++++++-------------------- include/linux/acpi.h | 22 +++++++-- 2 files changed, 87 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index f2fd3fee588a..03f5ec11ab31 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, } /** - * acpi_data_get_property_reference - returns handle to the referenced object - * @data: ACPI device data object containing the property + * __acpi_node_get_property_reference - returns handle to the referenced object + * @fwnode: Firmware node to get the property from * @propname: Name of the property * @index: Index of the reference to return + * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments * * Find property with @name, verifify that it is a package containing at least @@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, * If there's more than one reference in the property value package, @index is * used to select the one to return. * + * It is possible to leave holes in the property value set like in the + * example below: + * + * Package () { + * "cs-gpios", + * Package () { + * ^GPIO, 19, 0, 0, + * ^GPIO, 20, 0, 0, + * 0, + * ^GPIO, 21, 0, 0, + * } + * } + * + * Calling this function with index %2 return %-ENOENT and with index %3 + * returns the last entry. If the property does not contain any more values + * %-ENODATA is returned. The NULL entry must be single integer and + * preferably contain value %0. + * * Return: %0 on success, negative error code on failure. */ -static int acpi_data_get_property_reference(struct acpi_device_data *data, - const char *propname, size_t index, - struct acpi_reference_args *args) +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *propname, size_t index, size_t num_args, + struct acpi_reference_args *args) { const union acpi_object *element, *end; const union acpi_object *obj; + struct acpi_device_data *data; struct acpi_device *device; int ret, idx = 0; + data = acpi_device_data_of_node(fwnode); + if (!data) + return -EINVAL; + ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); if (ret) return ret; @@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data, while (element < end) { u32 nargs, i; - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) - return -EPROTO; - - ret = acpi_bus_get_device(element->reference.handle, &device); - if (ret) - return -ENODEV; - - element++; - nargs = 0; - - /* assume following integer elements are all args */ - for (i = 0; element + i < end; i++) { - int type = element[i].type; + if (element->type == ACPI_TYPE_LOCAL_REFERENCE) { + ret = acpi_bus_get_device(element->reference.handle, + &device); + if (ret) + return -ENODEV; + + nargs = 0; + element++; + + /* assume following integer elements are all args */ + for (i = 0; element + i < end && i < num_args; i++) { + int type = element[i].type; + + if (type == ACPI_TYPE_INTEGER) + nargs++; + else if (type == ACPI_TYPE_LOCAL_REFERENCE) + break; + else + return -EPROTO; + } - if (type == ACPI_TYPE_INTEGER) - nargs++; - else if (type == ACPI_TYPE_LOCAL_REFERENCE) - break; - else + if (nargs > MAX_ACPI_REFERENCE_ARGS) return -EPROTO; - } - if (idx++ == index) { - args->adev = device; - args->nargs = nargs; - for (i = 0; i < nargs; i++) - args->args[i] = element[i].integer.value; + if (idx == index) { + args->adev = device; + args->nargs = nargs; + for (i = 0; i < nargs; i++) + args->args[i] = element[i].integer.value; - return 0; + return 0; + } + + element += nargs; + } else if (element->type == ACPI_TYPE_INTEGER) { + if (idx == index) + return -ENOENT; + element++; + } else { + return -EPROTO; } - element += nargs; + idx++; } - return -EPROTO; -} - -/** - * acpi_node_get_property_reference - get a handle to the referenced object. - * @fwnode: Firmware node to get the property from. - * @propname: Name of the property. - * @index: Index of the reference to return. - * @args: Location to store the returned reference with optional arguments. - */ -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args) -{ - struct acpi_device_data *data = acpi_device_data_of_node(fwnode); - - return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL; + return -ENODATA; } -EXPORT_SYMBOL_GPL(acpi_node_get_property_reference); +EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); static int acpi_data_prop_read_single(struct acpi_device_data *data, const char *propname, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4d8452c2384b..632ec16a855e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -927,9 +927,17 @@ struct acpi_reference_args { #ifdef CONFIG_ACPI int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args); +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args); + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args) +{ + return __acpi_node_get_property_reference(fwnode, name, index, + MAX_ACPI_REFERENCE_ARGS, args); +} int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, void **valptr); @@ -1005,6 +1013,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, return -ENXIO; } +static inline int +__acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args) +{ + return -ENXIO; +} + static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, const char *name, size_t index, struct acpi_reference_args *args) -- cgit v1.2.3-59-g8ed1b From 74f47f07e53daeca1d16812fa7c9bf3145bd8216 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 26 Sep 2016 11:54:15 +0200 Subject: gpio: pca953x: add a comment explaining the need for a lockdep subclass This is a follow-up to commit 559b46990e76 ("gpio: pca953x: fix an incorrect lockdep warning"). The reason for calling lockdep_set_subclass() in pca953x_probe() is not explained in the code. Add a comment describing the problem, partial solution and required future extensions. Signed-off-by: Bartosz Golaszewski Acked-by: Linus Walleij Signed-off-by: Wolfram Sang --- drivers/gpio/gpio-pca953x.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 45c8817d068c..e422568e14ad 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -794,6 +794,22 @@ static int pca953x_probe(struct i2c_client *client, } mutex_init(&chip->i2c_lock); + /* + * In case we have an i2c-mux controlled by a GPIO provided by an + * expander using the same driver higher on the device tree, read the + * i2c adapter nesting depth and use the retrieved value as lockdep + * subclass for chip->i2c_lock. + * + * REVISIT: This solution is not complete. It protects us from lockdep + * false positives when the expander controlling the i2c-mux is on + * a different level on the device tree, but not when it's on the same + * level on a different branch (in which case the subclass number + * would be the same). + * + * TODO: Once a correct solution is developed, a similar fix should be + * applied to all other i2c-controlled GPIO expanders (and potentially + * regmap-i2c). + */ lockdep_set_subclass(&chip->i2c_lock, i2c_adapter_depth(client->adapter)); -- cgit v1.2.3-59-g8ed1b From 0ba43a81ef7b78ddf404f7709a2257be59436411 Mon Sep 17 00:00:00 2001 From: Xose Vazquez Perez Date: Fri, 7 Oct 2016 18:19:57 +0200 Subject: scsi: Replace wrong device handler name for CLARiiON arrays At drivers/scsi/device_handler/scsi_dh_emc.c it was defined as: Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Christophe Varoqui Cc: James E.J. Bottomley Cc: Martin K. Petersen Cc: SCSI ML Cc: device-mapper development Signed-off-by: Xose Vazquez Perez Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_dh.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 54d446c9f56e..b8d3b97b217a 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -36,9 +36,9 @@ struct scsi_dh_blist { }; static const struct scsi_dh_blist scsi_dh_blist[] = { - {"DGC", "RAID", "clariion" }, - {"DGC", "DISK", "clariion" }, - {"DGC", "VRAID", "clariion" }, + {"DGC", "RAID", "emc" }, + {"DGC", "DISK", "emc" }, + {"DGC", "VRAID", "emc" }, {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, {"COMPAQ", "HSV110", "hp_sw" }, -- cgit v1.2.3-59-g8ed1b From bcd8f2e94808fcddf6ef3af5f060a36820dcc432 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sun, 9 Oct 2016 13:23:27 +0800 Subject: scsi: Fix use-after-free This patch fixes one use-after-free report[1] by KASAN. In __scsi_scan_target(), when a type 31 device is probed, SCSI_SCAN_TARGET_PRESENT is returned and the target will be scanned again. Inside the following scsi_report_lun_scan(), one new scsi_device instance is allocated, and scsi_probe_and_add_lun() is called again to probe the target and still see type 31 device, finally __scsi_remove_device() is called to remove & free the device at the end of scsi_probe_and_add_lun(), so cause use-after-free in scsi_report_lun_scan(). And the following SCSI log can be observed: scsi 0:0:2:0: scsi scan: INQUIRY pass 1 length 36 scsi 0:0:2:0: scsi scan: INQUIRY successful with code 0x0 scsi 0:0:2:0: scsi scan: peripheral device type of 31, no device added scsi 0:0:2:0: scsi scan: Sending REPORT LUNS to (try 0) scsi 0:0:2:0: scsi scan: REPORT LUNS successful (try 0) result 0x0 scsi 0:0:2:0: scsi scan: REPORT LUN scan scsi 0:0:2:0: scsi scan: INQUIRY pass 1 length 36 scsi 0:0:2:0: scsi scan: INQUIRY successful with code 0x0 scsi 0:0:2:0: scsi scan: peripheral device type of 31, no device added BUG: KASAN: use-after-free in __scsi_scan_target+0xbf8/0xe40 at addr ffff88007b44a104 This patch fixes the issue by moving the putting reference at the end of scsi_report_lun_scan(). [1] KASAN report ================================================================== [ 3.274597] PM: Adding info for serio:serio1 [ 3.275127] BUG: KASAN: use-after-free in __scsi_scan_target+0xd87/0xdf0 at addr ffff880254d8c304 [ 3.275653] Read of size 4 by task kworker/u10:0/27 [ 3.275903] CPU: 3 PID: 27 Comm: kworker/u10:0 Not tainted 4.8.0 #2121 [ 3.276258] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 3.276797] Workqueue: events_unbound async_run_entry_fn [ 3.277083] ffff880254d8c380 ffff880259a37870 ffffffff94bbc6c1 ffff880078402d80 [ 3.277532] ffff880254d8bb80 ffff880259a37898 ffffffff9459fec1 ffff880259a37930 [ 3.277989] ffff880254d8bb80 ffff880078402d80 ffff880259a37920 ffffffff945a0165 [ 3.278436] Call Trace: [ 3.278528] [] dump_stack+0x65/0x84 [ 3.278797] [] kasan_object_err+0x21/0x70 [ 3.279063] device: 'psaux': device_add [ 3.279616] [] kasan_report_error+0x205/0x500 [ 3.279651] PM: Adding info for No Bus:psaux [ 3.280202] [] ? kfree_const+0x22/0x30 [ 3.280486] [] ? kobject_release+0x119/0x370 [ 3.280805] [] __asan_report_load4_noabort+0x43/0x50 [ 3.281170] [] ? __scsi_scan_target+0xd87/0xdf0 [ 3.281506] [] __scsi_scan_target+0xd87/0xdf0 [ 3.281848] [] ? scsi_add_device+0x30/0x30 [ 3.282156] [] ? pm_runtime_autosuspend_expiration+0x60/0x60 [ 3.282570] [] ? _raw_spin_lock+0x17/0x40 [ 3.282880] [] scsi_scan_channel+0x105/0x160 [ 3.283200] [] scsi_scan_host_selected+0x212/0x2f0 [ 3.283563] [] do_scsi_scan_host+0x1bc/0x250 [ 3.283882] [] do_scan_async+0x41/0x450 [ 3.284173] [] async_run_entry_fn+0xfe/0x610 [ 3.284492] [] ? pwq_dec_nr_in_flight+0x124/0x2a0 [ 3.284876] [] ? preempt_count_add+0x130/0x160 [ 3.285207] [] process_one_work+0x544/0x12d0 [ 3.285526] [] worker_thread+0xd9/0x12f0 [ 3.285844] [] ? process_one_work+0x12d0/0x12d0 [ 3.286182] [] kthread+0x1c5/0x260 [ 3.286443] [] ? __switch_to+0x88d/0x1430 [ 3.286745] [] ? kthread_worker_fn+0x5a0/0x5a0 [ 3.287085] [] ret_from_fork+0x1f/0x40 [ 3.287368] [] ? kthread_worker_fn+0x5a0/0x5a0 [ 3.287697] Object at ffff880254d8bb80, in cache kmalloc-2048 size: 2048 [ 3.288064] Allocated: [ 3.288147] PID = 27 [ 3.288218] [] save_stack_trace+0x2b/0x50 [ 3.288531] [] save_stack+0x46/0xd0 [ 3.288806] [] kasan_kmalloc+0xad/0xe0 [ 3.289098] [] __kmalloc+0x13e/0x250 [ 3.289378] [] scsi_alloc_sdev+0xea/0xcf0 [ 3.289701] [] __scsi_scan_target+0xa06/0xdf0 [ 3.290034] [] scsi_scan_channel+0x105/0x160 [ 3.290362] [] scsi_scan_host_selected+0x212/0x2f0 [ 3.290724] [] do_scsi_scan_host+0x1bc/0x250 [ 3.291055] [] do_scan_async+0x41/0x450 [ 3.291354] [] async_run_entry_fn+0xfe/0x610 [ 3.291695] [] process_one_work+0x544/0x12d0 [ 3.292022] [] worker_thread+0xd9/0x12f0 [ 3.292325] [] kthread+0x1c5/0x260 [ 3.292594] [] ret_from_fork+0x1f/0x40 [ 3.292886] Freed: [ 3.292945] PID = 27 [ 3.293016] [] save_stack_trace+0x2b/0x50 [ 3.293327] [] save_stack+0x46/0xd0 [ 3.293600] [] kasan_slab_free+0x71/0xb0 [ 3.293916] [] kfree+0xa2/0x1f0 [ 3.294168] [] scsi_device_dev_release_usercontext+0x50a/0x730 [ 3.294598] [] execute_in_process_context+0xda/0x130 [ 3.294974] [] scsi_device_dev_release+0x1c/0x20 [ 3.295322] [] device_release+0x76/0x1e0 [ 3.295626] [] kobject_release+0x107/0x370 [ 3.295942] [] kobject_put+0x4e/0xa0 [ 3.296222] [] put_device+0x17/0x20 [ 3.296497] [] scsi_device_put+0x7c/0xa0 [ 3.296801] [] __scsi_scan_target+0xd4c/0xdf0 [ 3.297132] [] scsi_scan_channel+0x105/0x160 [ 3.297458] [] scsi_scan_host_selected+0x212/0x2f0 [ 3.297829] [] do_scsi_scan_host+0x1bc/0x250 [ 3.298156] [] do_scan_async+0x41/0x450 [ 3.298453] [] async_run_entry_fn+0xfe/0x610 [ 3.298777] [] process_one_work+0x544/0x12d0 [ 3.299105] [] worker_thread+0xd9/0x12f0 [ 3.299408] [] kthread+0x1c5/0x260 [ 3.299676] [] ret_from_fork+0x1f/0x40 [ 3.299967] Memory state around the buggy address: [ 3.300209] ffff880254d8c200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 3.300608] ffff880254d8c280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 3.300986] >ffff880254d8c300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 3.301408] ^ [ 3.301550] ffff880254d8c380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 3.301987] ffff880254d8c400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 3.302396] ================================================================== Cc: Christoph Hellwig Cc: stable@vger.kernel.org Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 212e98d940bc..bb9b58e21d95 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1470,12 +1470,12 @@ retry: out_err: kfree(lun_data); out: - scsi_device_put(sdev); if (scsi_device_created(sdev)) /* * the sdev we used didn't appear in the report luns scan */ __scsi_remove_device(sdev); + scsi_device_put(sdev); return ret; } -- cgit v1.2.3-59-g8ed1b From 03eb6b8d314e89e94d4f79ee3d3e6596a75bc857 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 10 Oct 2016 23:25:33 +0800 Subject: scsi: Remove one useless stack variable The local variable of 'devname' in scsi_report_lun_scan() isn't used any more, so remove it. Cc: Christoph Hellwig Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_scan.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index bb9b58e21d95..6f7128f49c30 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, enum scsi_scan_mode rescan) { - char devname[64]; unsigned char scsi_cmd[MAX_COMMAND_SIZE]; unsigned int length; u64 lun; @@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, } } - sprintf(devname, "host %d channel %d id %d", - shost->host_no, sdev->channel, sdev->id); - /* * Allocate enough to hold the header (the same size as one scsi_lun) * plus the number of luns we are requesting. 511 was the default -- cgit v1.2.3-59-g8ed1b From 4e1bff07d7e7bc9fc3fc62100dcb8f708e1e96d2 Mon Sep 17 00:00:00 2001 From: Sangwon Jee Date: Fri, 30 Sep 2016 15:38:47 -0700 Subject: Input: melfas_mip4 - add ic_name sysfs attribute Add ic_name sysfs attribute for retrieving IC model name. Signed-off-by: Sangwon Jee Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/melfas_mip4.c | 38 +++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index fb5fb9140ca9..552a3773f79d 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -157,6 +157,7 @@ struct mip4_ts { char phys[32]; char product_name[16]; + char ic_name[4]; unsigned int max_x; unsigned int max_y; @@ -263,6 +264,18 @@ static int mip4_query_device(struct mip4_ts *ts) dev_dbg(&ts->client->dev, "product name: %.*s\n", (int)sizeof(ts->product_name), ts->product_name); + /* IC name */ + cmd[0] = MIP4_R0_INFO; + cmd[1] = MIP4_R1_INFO_IC_NAME; + error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), + ts->ic_name, sizeof(ts->ic_name)); + if (error) + dev_warn(&ts->client->dev, + "Failed to retrieve IC name: %d\n", error); + else + dev_dbg(&ts->client->dev, "IC name: %.*s\n", + (int)sizeof(ts->ic_name), ts->ic_name); + /* Firmware version */ error = mip4_get_fw_version(ts); if (error) @@ -1326,7 +1339,7 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev, * paired with current firmware in the chip. */ count = snprintf(buf, PAGE_SIZE, "%.*s\n", - (int)sizeof(ts->product_name), ts->product_name); + (int)sizeof(ts->product_name), ts->product_name); mutex_unlock(&ts->input->mutex); @@ -1335,9 +1348,30 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev, static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL); +static ssize_t mip4_sysfs_read_ic_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mip4_ts *ts = i2c_get_clientdata(client); + size_t count; + + mutex_lock(&ts->input->mutex); + + count = snprintf(buf, PAGE_SIZE, "%.*s\n", + (int)sizeof(ts->ic_name), ts->ic_name); + + mutex_unlock(&ts->input->mutex); + + return count; +} + +static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL); + static struct attribute *mip4_attrs[] = { &dev_attr_fw_version.attr, &dev_attr_hw_version.attr, + &dev_attr_ic_name.attr, &dev_attr_update_fw.attr, NULL, }; @@ -1538,6 +1572,6 @@ static struct i2c_driver mip4_driver = { module_i2c_driver(mip4_driver); MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen"); -MODULE_VERSION("2016.03.12"); +MODULE_VERSION("2016.09.28"); MODULE_AUTHOR("Sangwon Jee "); MODULE_LICENSE("GPL"); -- cgit v1.2.3-59-g8ed1b From 930e19248e9b61da36c967687ca79c4d5f977919 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Sat, 1 Oct 2016 12:07:35 -0700 Subject: Input: i8042 - skip selftest on ASUS laptops On suspend/resume cycle, selftest is executed to reset i8042 controller. But when this is done in Asus devices, subsequent calls to detect/init functions to elantech driver fails. Skipping selftest fixes this problem. An easier step to reproduce this problem is adding i8042.reset=1 as a kernel parameter. On Asus laptops, it'll make the system to start with the touchpad already stuck, since psmouse_probe forcibly calls the selftest function. This patch was inspired by John Hiesey's change[1], but, since this problem affects a lot of models of Asus, let's avoid running selftests on them. All models affected by this problem: A455LD K401LB K501LB K501LX R409L V502LX X302LA X450LCP X450LD X455LAB X455LDB X455LF Z450LA [1]: https://marc.info/?l=linux-input&m=144312209020616&w=2 Fixes: "ETPS/2 Elantech Touchpad dies after resume from suspend" (https://bugzilla.kernel.org/show_bug.cgi?id=107971) Signed-off-by: Marcos Paulo de Souza Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov --- Documentation/kernel-parameters.txt | 9 +++- drivers/input/serio/i8042-io.h | 2 +- drivers/input/serio/i8042-ip22io.h | 2 +- drivers/input/serio/i8042-ppcio.h | 2 +- drivers/input/serio/i8042-sparcio.h | 2 +- drivers/input/serio/i8042-unicore32io.h | 2 +- drivers/input/serio/i8042-x86ia64io.h | 96 +++++++++++++++++++++++++++++++-- drivers/input/serio/i8042.c | 55 +++++++++++++++---- 8 files changed, 150 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0b3de80ec8f6..3475b3297064 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1409,7 +1409,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX controllers i8042.notimeout [HW] Ignore timeout condition signalled by controller - i8042.reset [HW] Reset the controller during init and cleanup + i8042.reset [HW] Reset the controller during init, cleanup and + suspend-to-ram transitions, only during s2r + transitions, or never reset + Format: { 1 | Y | y | 0 | N | n } + 1, Y, y: always reset controller + 0, N, n: don't ever reset controller + Default: only on s2r transitions on x86; most other + architectures force reset to be always executed i8042.unlock [HW] Unlock (ignore) the keylock i8042.kbdreset [HW] Reset device connected to KBD port diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h index a5eed2ade53d..34da81c006b6 100644 --- a/drivers/input/serio/i8042-io.h +++ b/drivers/input/serio/i8042-io.h @@ -81,7 +81,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h index ee1ad27d6ed0..08a1c10a1448 100644 --- a/drivers/input/serio/i8042-ip22io.h +++ b/drivers/input/serio/i8042-ip22io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) return -EBUSY; #endif - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h index f708c75d16f1..1aabea43329e 100644 --- a/drivers/input/serio/i8042-ppcio.h +++ b/drivers/input/serio/i8042-ppcio.h @@ -44,7 +44,7 @@ static inline void i8042_write_command(int val) static inline int i8042_platform_init(void) { - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index afcd1c1a05b2..6231d63860ee 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -130,7 +130,7 @@ static int __init i8042_platform_init(void) } } - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h index 73f5cc124a36..455747552f85 100644 --- a/drivers/input/serio/i8042-unicore32io.h +++ b/drivers/input/serio/i8042-unicore32io.h @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042")) return -EBUSY; - i8042_reset = 1; + i8042_reset = I8042_RESET_ALWAYS; return 0; } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 68f5f4a0f1e7..f4bfb4b2d50a 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +/* + * On some Asus laptops, just running self tests cause problems. + */ +static const struct dmi_system_id i8042_dmi_noselftest_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "R409L"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"), + }, + }, + { } +}; static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { { /* MSI Wind U-100 */ @@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void) return retval; #if defined(__ia64__) - i8042_reset = true; + i8042_reset = I8042_RESET_ALWAYS; #endif #ifdef CONFIG_X86 - if (dmi_check_system(i8042_dmi_reset_table)) - i8042_reset = true; + /* Honor module parameter when value is not default */ + if (i8042_reset == I8042_RESET_DEFAULT) { + if (dmi_check_system(i8042_dmi_reset_table)) + i8042_reset = I8042_RESET_ALWAYS; + + if (dmi_check_system(i8042_dmi_noselftest_table)) + i8042_reset = I8042_RESET_NEVER; + } if (dmi_check_system(i8042_dmi_noloop_table)) i8042_noloop = true; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b4d34086e73f..674a760f5221 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -48,9 +48,39 @@ static bool i8042_unlock; module_param_named(unlock, i8042_unlock, bool, 0); MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); -static bool i8042_reset; -module_param_named(reset, i8042_reset, bool, 0); -MODULE_PARM_DESC(reset, "Reset controller during init and cleanup."); +enum i8042_controller_reset_mode { + I8042_RESET_NEVER, + I8042_RESET_ALWAYS, + I8042_RESET_ON_S2RAM, +#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM +}; +static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT; +static int i8042_set_reset(const char *val, const struct kernel_param *kp) +{ + enum i8042_controller_reset_mode *arg = kp->arg; + int error; + bool reset; + + if (val) { + error = kstrtobool(val, &reset); + if (error) + return error; + } else { + reset = true; + } + + *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER; + return 0; +} + +static const struct kernel_param_ops param_ops_reset_param = { + .flags = KERNEL_PARAM_OPS_FL_NOARG, + .set = i8042_set_reset, +}; +#define param_check_reset_param(name, p) \ + __param_check(name, p, enum i8042_controller_reset_mode) +module_param_named(reset, i8042_reset, reset_param, 0); +MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both"); static bool i8042_direct; module_param_named(direct, i8042_direct, bool, 0); @@ -1019,7 +1049,7 @@ static int i8042_controller_init(void) * Reset the controller and reset CRT to the original value set by BIOS. */ -static void i8042_controller_reset(bool force_reset) +static void i8042_controller_reset(bool s2r_wants_reset) { i8042_flush(); @@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset) * Reset the controller if requested. */ - if (i8042_reset || force_reset) + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { i8042_controller_selftest(); + } /* * Restore the original control register setting. @@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void) * before suspending. */ -static int i8042_controller_resume(bool force_reset) +static int i8042_controller_resume(bool s2r_wants_reset) { int error; @@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset) if (error) return error; - if (i8042_reset || force_reset) { + if (i8042_reset == I8042_RESET_ALWAYS || + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { error = i8042_controller_selftest(); if (error) return error; @@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev) static int i8042_pm_resume(struct device *dev) { - bool force_reset; + bool want_reset; int i; for (i = 0; i < I8042_NUM_PORTS; i++) { @@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev) * off control to the platform firmware, otherwise we can simply restore * the mode. */ - force_reset = pm_resume_via_firmware(); + want_reset = pm_resume_via_firmware(); - return i8042_controller_resume(force_reset); + return i8042_controller_resume(want_reset); } static int i8042_pm_thaw(struct device *dev) @@ -1481,7 +1514,7 @@ static int __init i8042_probe(struct platform_device *dev) i8042_platform_device = dev; - if (i8042_reset) { + if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) return error; -- cgit v1.2.3-59-g8ed1b From 7f6990c830f3e8d703f13d7963acf51936c52ad2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 5 Oct 2016 15:29:49 +0200 Subject: cfg80211: let ieee80211_amsdu_to_8023s() take only header-less SKB There's only a single case where has_80211_header is passed as true, which is in mac80211. Given that there's only simple code that needs to be done before calling it, export that function from cfg80211 instead and let mac80211 call it itself. Signed-off-by: Johannes Berg --- .../net/wireless/marvell/mwifiex/11n_rxreorder.c | 2 +- include/net/cfg80211.h | 31 +++++++++++++++------- net/mac80211/rx.c | 8 +++++- net/wireless/util.c | 24 ++++------------- 4 files changed, 35 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 94480123efa3..e9f462e3730b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, - priv->wdev.iftype, 0, false); + priv->wdev.iftype, 0); while (!skb_queue_empty(&list)) { struct rx_packet_hdr *rx_hdr; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fe78f02a242e..f372eadd1963 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4039,6 +4039,18 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); * that do not do the 802.11/802.3 conversion on the device. */ +/** + * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 + * @skb: the 802.11 data frame + * @ehdr: pointer to a &struct ethhdr that will get the header, instead + * of it being pushed into the SKB + * @addr: the device MAC address + * @iftype: the virtual interface type + * Return: 0 on success. Non-zero on error. + */ +int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype); + /** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame @@ -4046,8 +4058,11 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); * @iftype: the virtual interface type * Return: 0 on success. Non-zero on error. */ -int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype); +static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) +{ + return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype); +} /** * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 @@ -4065,22 +4080,20 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, /** * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame * - * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of - * 802.3 frames. The @list will be empty if the decode fails. The - * @skb is consumed after the function returns. + * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. + * The @list will be empty if the decode fails. The @skb must be fully + * header-less before being passed in here; it is freed in this function. * - * @skb: The input IEEE 802.11n A-MSDU frame. + * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and * initialized by by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. - * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, - const unsigned int extra_headroom, - bool has_80211_header); + const unsigned int extra_headroom); /** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3ac2f1cba317..de2d75fa5c51 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2298,6 +2298,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) __le16 fc = hdr->frame_control; struct sk_buff_head frame_list; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ethhdr ethhdr; if (unlikely(!ieee80211_is_data(fc))) return RX_CONTINUE; @@ -2329,9 +2330,14 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) skb->dev = dev; __skb_queue_head_init(&frame_list); + if (ieee80211_data_to_8023_exthdr(skb, ðhdr, + rx->sdata->vif.addr, + rx->sdata->vif.type)) + return RX_DROP_UNUSABLE; + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, rx->sdata->vif.type, - rx->local->hw.extra_tx_headroom, true); + rx->local->hw.extra_tx_headroom); while (!skb_queue_empty(&frame_list)) { rx->skb = __skb_dequeue(&frame_list); diff --git a/net/wireless/util.c b/net/wireless/util.c index 8edce22d1b93..e36ede840b88 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) } EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); -static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, - const u8 *addr, enum nl80211_iftype iftype) +int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct { @@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, return 0; } - -int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype) -{ - return __ieee80211_data_to_8023(skb, NULL, addr, iftype); -} -EXPORT_SYMBOL(ieee80211_data_to_8023); +EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr); int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype, @@ -745,25 +739,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen, void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, - const unsigned int extra_headroom, - bool has_80211_header) + const unsigned int extra_headroom) { unsigned int hlen = ALIGN(extra_headroom, 4); struct sk_buff *frame = NULL; u16 ethertype; u8 *payload; - int offset = 0, remaining, err; + int offset = 0, remaining; struct ethhdr eth; bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); bool reuse_skb = false; bool last = false; - if (has_80211_header) { - err = __ieee80211_data_to_8023(skb, ð, addr, iftype); - if (err) - goto out; - } - while (!last) { unsigned int subframe_len; int len; @@ -819,7 +806,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, purge: __skb_queue_purge(list); - out: dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); -- cgit v1.2.3-59-g8ed1b From 8b935ee2ea17db720d70f6420f77f594c0c93f75 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 5 Oct 2016 16:17:01 +0200 Subject: cfg80211: add ability to check DA/SA in A-MSDU decapsulation We should not accept arbitrary DA/SA inside A-MSDUs, it could be used to circumvent protections, like allowing a station to send frames and make them seem to come from somewhere else. Add the necessary infrastructure in cfg80211 to allow such checks, in further patches we'll start using them. Signed-off-by: Johannes Berg --- drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 2 +- include/net/cfg80211.h | 5 ++++- net/mac80211/rx.c | 3 ++- net/wireless/util.c | 14 ++++++++++++-- 4 files changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index e9f462e3730b..274dd5a1574a 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, - priv->wdev.iftype, 0); + priv->wdev.iftype, 0, NULL, NULL); while (!skb_queue_empty(&list)) { struct rx_packet_hdr *rx_hdr; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f372eadd1963..7df600c463eb 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4090,10 +4090,13 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. + * @check_da: DA to check in the inner ethernet header, or NULL + * @check_sa: SA to check in the inner ethernet header, or NULL */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, - const unsigned int extra_headroom); + const unsigned int extra_headroom, + const u8 *check_da, const u8 *check_sa); /** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index de2d75fa5c51..cf53fe1a0aa2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2337,7 +2337,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, rx->sdata->vif.type, - rx->local->hw.extra_tx_headroom); + rx->local->hw.extra_tx_headroom, + NULL, NULL); while (!skb_queue_empty(&frame_list)) { rx->skb = __skb_dequeue(&frame_list); diff --git a/net/wireless/util.c b/net/wireless/util.c index e36ede840b88..5ea12afc7706 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -739,7 +739,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen, void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, - const unsigned int extra_headroom) + const unsigned int extra_headroom, + const u8 *check_da, const u8 *check_sa) { unsigned int hlen = ALIGN(extra_headroom, 4); struct sk_buff *frame = NULL; @@ -767,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, goto purge; offset += sizeof(struct ethhdr); - /* reuse skb for the last subframe */ last = remaining <= subframe_len + padding; + + /* FIXME: should we really accept multicast DA? */ + if ((check_da && !is_multicast_ether_addr(eth.h_dest) && + !ether_addr_equal(check_da, eth.h_dest)) || + (check_sa && !ether_addr_equal(check_sa, eth.h_source))) { + offset += len + padding; + continue; + } + + /* reuse skb for the last subframe */ if (!skb_is_nonlinear(skb) && !reuse_frag && last) { skb_pull(skb, offset); frame = skb; -- cgit v1.2.3-59-g8ed1b From 40c30bbf3377babc4d6bb16b699184236a8bfa27 Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Tue, 11 Oct 2016 19:28:02 -0400 Subject: platform/x86: ideapad-laptop: Add Lenovo Yoga 910-13IKB to no_hw_rfkill dmi list The Lenovo Yoga 910-13IKB does not have a hw rfkill switch, and trying to read the hw rfkill switch through the ideapad module causes it to always report as blocked. This commit adds the Lenovo Yoga 910-13IKB to the no_hw_rfkill dmi list, fixing the WiFI breakage. Signed-off-by: Brian Masney Signed-off-by: Darren Hart --- drivers/platform/x86/ideapad-laptop.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index d1a091b93192..a2323941e677 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), }, }, + { + .ident = "Lenovo YOGA 910-13IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"), + }, + }, {} }; -- cgit v1.2.3-59-g8ed1b From 1cd713762e4cd5378a488fd6eaa7507bf030a832 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Wed, 29 Jun 2016 16:40:01 +0200 Subject: rtc: rv8803: set VDETOFF and SWOFF via device tree There might be designs where the power supply circuit is designed in a way that VDETOFF and SWOFF is required to be set. Otherwise the RTC detects a power loss. Add a device tree interface for this. Signed-off-by: Carsten Resch Signed-off-by: Dirk Behme Signed-off-by: Oleksij Rempel Signed-off-by: Alexandre Belloni --- drivers/rtc/rtc-rv8803.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 9a2f6a95d5a7..f9277e536f7e 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -52,11 +52,21 @@ #define RV8803_CTRL_TIE BIT(4) #define RV8803_CTRL_UIE BIT(5) +#define RX8900_BACKUP_CTRL 0x18 +#define RX8900_FLAG_SWOFF BIT(2) +#define RX8900_FLAG_VDETOFF BIT(3) + +enum rv8803_type { + rv_8803, + rx_8900 +}; + struct rv8803_data { struct i2c_client *client; struct rtc_device *rtc; struct mutex flags_lock; u8 ctrl; + enum rv8803_type type; }; static int rv8803_read_reg(const struct i2c_client *client, u8 reg) @@ -497,6 +507,35 @@ static struct rtc_class_ops rv8803_rtc_ops = { .ioctl = rv8803_ioctl, }; +static int rx8900_trickle_charger_init(struct rv8803_data *rv8803) +{ + struct i2c_client *client = rv8803->client; + struct device_node *node = client->dev.of_node; + int err; + u8 flags; + + if (!node) + return 0; + + if (rv8803->type != rx_8900) + return 0; + + err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL); + if (err < 0) + return err; + + flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err; + + if (of_property_read_bool(node, "epson,vdet-disable")) + flags |= RX8900_FLAG_VDETOFF; + + if (of_property_read_bool(node, "trickle-diode-disable")) + flags |= RX8900_FLAG_SWOFF; + + return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL, + flags); +} + static int rv8803_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -517,6 +556,7 @@ static int rv8803_probe(struct i2c_client *client, mutex_init(&rv8803->flags_lock); rv8803->client = client; + rv8803->type = id->driver_data; i2c_set_clientdata(client, rv8803); flags = rv8803_read_reg(client, RV8803_FLAG); @@ -558,6 +598,12 @@ static int rv8803_probe(struct i2c_client *client, if (err) return err; + err = rx8900_trickle_charger_init(rv8803); + if (err) { + dev_err(&client->dev, "failed to init charger\n"); + return err; + } + err = device_create_bin_file(&client->dev, &rv8803_nvram_attr); if (err) return err; @@ -575,8 +621,8 @@ static int rv8803_remove(struct i2c_client *client) } static const struct i2c_device_id rv8803_id[] = { - { "rv8803", 0 }, - { "rx8900", 0 }, + { "rv8803", rv_8803 }, + { "rx8900", rx_8900 }, { } }; MODULE_DEVICE_TABLE(i2c, rv8803_id); -- cgit v1.2.3-59-g8ed1b From 0df1e4f5e0e831670f43bd198623b303ba09cbc0 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 11 Oct 2016 13:31:58 -0400 Subject: nvme: Stop probing a removed device There is no reason the nvme controller can ever return all 1's from reading the CSTS register. This patch returns an error if we observe that status. Without this, we may incorrectly proceed with controller initialization and unnecessarilly rely on error handling to clean this. Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 329381a28edf..2a57f5ede386 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) int ret; while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { + if (csts == ~0) + return -ENODEV; if ((csts & NVME_CSTS_RDY) == bit) break; -- cgit v1.2.3-59-g8ed1b From 7065906096273b39b90a512a7170a6697ed94b23 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 12 Oct 2016 09:22:16 -0600 Subject: nvme: Delete created IO queues on reset The driver was decrementing the online_queues prior to attempting to delete those IO queues, so the driver ended up not requesting the controller delete any. This patch saves the online_queues prior to suspending them, and adds that parameter for deleting io queues. Fixes: c21377f8 ("nvme: Suspend all queues before deletion") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 68ef1875e8a8..94da3a47775c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1510,9 +1510,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) return 0; } -static void nvme_disable_io_queues(struct nvme_dev *dev) +static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) { - int pass, queues = dev->online_queues - 1; + int pass; unsigned long timeout; u8 opcode = nvme_admin_delete_sq; @@ -1648,7 +1648,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { - int i; + int i, queues; u32 csts = -1; del_timer_sync(&dev->watchdog_timer); @@ -1659,6 +1659,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) csts = readl(dev->bar + NVME_REG_CSTS); } + queues = dev->online_queues - 1; for (i = dev->queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); @@ -1670,7 +1671,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) if (dev->queue_count) nvme_suspend_queue(dev->queues[0]); } else { - nvme_disable_io_queues(dev); + nvme_disable_io_queues(dev, queues); nvme_disable_admin_queue(dev, shutdown); } nvme_pci_disable(dev); -- cgit v1.2.3-59-g8ed1b From c5f6ce97c12104668784ee17fb927c52a944d3d8 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 5 Oct 2016 16:32:45 -0400 Subject: nvme: don't schedule multiple resets The queue_work only fails if the work is pending, but not yet running. If the work is running, the work item would get requeued, triggering a double reset. If the first reset fails for any reason, the second reset triggers: WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING) Hitting that schedules controller deletion for a second time, which potentially takes a reference on the device that is being deleted. If the reset occurs at the same time as a hot removal event, this causes a double-free. This patch has the reset helper function check if the work is busy prior to queueing, and changes all places that schedule resets to use this function. Since most users don't want to sync with that work, the "flush_work" is moved to the only caller that wants to sync. Signed-off-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 94da3a47775c..12357d616eeb 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -892,7 +892,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); nvme_dev_disable(dev, false); - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); /* * Mark the request as handled, since the inline shutdown @@ -1290,7 +1290,7 @@ static void nvme_watchdog_timer(unsigned long data) /* Skip controllers under certain specific conditions. */ if (nvme_should_reset(dev, csts)) { - if (queue_work(nvme_workq, &dev->reset_work)) + if (!nvme_reset(dev)) dev_warn(dev->dev, "Failed status: 0x%x, reset controller.\n", csts); @@ -1818,11 +1818,10 @@ static int nvme_reset(struct nvme_dev *dev) { if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) return -ENODEV; - + if (work_busy(&dev->reset_work)) + return -ENODEV; if (!queue_work(nvme_workq, &dev->reset_work)) return -EBUSY; - - flush_work(&dev->reset_work); return 0; } @@ -1846,7 +1845,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) { - return nvme_reset(to_nvme_dev(ctrl)); + struct nvme_dev *dev = to_nvme_dev(ctrl); + int ret = nvme_reset(dev); + + if (!ret) + flush_work(&dev->reset_work); + return ret; } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { @@ -1940,7 +1944,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) if (prepare) nvme_dev_disable(dev, false); else - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); } static void nvme_shutdown(struct pci_dev *pdev) @@ -2009,7 +2013,7 @@ static int nvme_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - queue_work(nvme_workq, &ndev->reset_work); + nvme_reset(ndev); return 0; } #endif @@ -2048,7 +2052,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); return PCI_ERS_RESULT_RECOVERED; } -- cgit v1.2.3-59-g8ed1b From 202021c1a63c6ed69b3260e0fe10530c51f1e53e Mon Sep 17 00:00:00 2001 From: Stephen Bates Date: Wed, 5 Oct 2016 20:01:12 -0600 Subject: nvme : Add sysfs entry for NVMe CMBs when appropriate Add a sysfs attribute that contains salient information about the NVMe Controller Memory Buffer when one is present. For now, just display the information about the CMB available from the control registers. We attach the CMB attribute file to the existing nvme_ctrl sysfs group so it can handle the sysfs teardown. Reviewed-by: Sagi Grimberg Reviewed-by: Jay Freyensee Signed-off-by: Stephen Bates Acked-by Jon Derrick: Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 12357d616eeb..a7c6e9d74943 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -99,6 +99,7 @@ struct nvme_dev { dma_addr_t cmb_dma_addr; u64 cmb_size; u32 cmbsz; + u32 cmbloc; struct nvme_ctrl ctrl; struct completion ioq_wait; }; @@ -1330,28 +1331,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev) return ret >= 0 ? 0 : ret; } +static ssize_t nvme_cmb_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", + ndev->cmbloc, ndev->cmbsz); +} +static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); + static void __iomem *nvme_map_cmb(struct nvme_dev *dev) { u64 szu, size, offset; - u32 cmbloc; resource_size_t bar_size; struct pci_dev *pdev = to_pci_dev(dev->dev); void __iomem *cmb; dma_addr_t dma_addr; - if (!use_cmb_sqes) - return NULL; - dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!(NVME_CMB_SZ(dev->cmbsz))) return NULL; + dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); - cmbloc = readl(dev->bar + NVME_REG_CMBLOC); + if (!use_cmb_sqes) + return NULL; szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); size = szu * NVME_CMB_SZ(dev->cmbsz); - offset = szu * NVME_CMB_OFST(cmbloc); - bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); + offset = szu * NVME_CMB_OFST(dev->cmbloc); + bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); if (offset > bar_size) return NULL; @@ -1364,7 +1374,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) if (size > bar_size - offset) size = bar_size - offset; - dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; + dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; cmb = ioremap_wc(dma_addr, size); if (!cmb) return NULL; @@ -1615,9 +1625,25 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->q_depth); } - if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) + /* + * CMBs can currently only exist on >=1.2 PCIe devices. We only + * populate sysfs if a CMB is implemented. Note that we add the + * CMB attribute to the nvme_ctrl kobj which removes the need to remove + * it on exit. Since nvme_dev_attrs_group has no name we can pass + * NULL as final argument to sysfs_add_file_to_group. + */ + + if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) { dev->cmb = nvme_map_cmb(dev); + if (dev->cmbsz) { + if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, + &dev_attr_cmb.attr, NULL)) + dev_warn(dev->dev, + "failed to add sysfs attribute for CMB\n"); + } + } + pci_enable_pcie_error_reporting(pdev); pci_save_state(pdev); return 0; -- cgit v1.2.3-59-g8ed1b From 0843e83c1a4aa67e08f424430526c948d591d5f1 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 6 Oct 2016 14:07:51 +0200 Subject: cpufreq: intel_pstate: Proportional algorithm for Atom The PID algorithm used by the intel_pstate driver tends to drive performance to the minimum for workloads with utilization below the setpoint, which is undesirable, so replace it with a modified "proportional" algorithm on Atom. The new algorithm will set the new P-state to be 1.25 times the available maximum times the (frequency-invariant) utilization during the previous sampling period except when the target P-state computed this way is lower than the average P-state during the previous sampling period. In the latter case, it will increase the target by 50% of the difference between it and the average P-state to prevent performance from dropping down too fast in some cases. Signed-off-by: Rafael J. Wysocki Tested-by: Srinivas Pandruvada --- drivers/cpufreq/intel_pstate.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 1c7b91c5805b..6c8c897d0a2d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1232,6 +1232,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int32_t busy_frac, boost; + int target, avg_pstate; busy_frac = div_fp(sample->mperf, sample->tsc); @@ -1242,7 +1243,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) busy_frac = boost; sample->busy_scaled = busy_frac * 100; - return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled); + + target = limits->no_turbo || limits->turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + target += target >> 2; + target = mul_fp(target, busy_frac); + if (target < cpu->pstate.min_pstate) + target = cpu->pstate.min_pstate; + + /* + * If the average P-state during the previous cycle was higher than the + * current target, add 50% of the difference to the target to reduce + * possible performance oscillations and offset possible performance + * loss related to moving the workload from one CPU to another within + * a package/module. + */ + avg_pstate = get_avg_pstate(cpu); + if (avg_pstate > target) + target += (avg_pstate - target) >> 1; + + return target; } static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) -- cgit v1.2.3-59-g8ed1b From 3954517e2f083c8aeb52bfd467b7b2c164232ffc Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Oct 2016 23:07:38 +0200 Subject: cpufreq: intel_pstate: Fix struct pstate_adjust_policy kerneldoc It looks like the name of struct pstate_adjust_policy was updated without updating its kerneldoc comment accordingly, so fix that mistake. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6c8c897d0a2d..f535f8123258 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -225,7 +225,7 @@ struct cpudata { static struct cpudata **all_cpu_data; /** - * struct pid_adjust_policy - Stores static PID configuration data + * struct pstate_adjust_policy - Stores static PID configuration data * @sample_rate_ms: PID calculation sample rate in ms * @sample_rate_ns: Sample rate calculation in ns * @deadband: PID deadband -- cgit v1.2.3-59-g8ed1b From e311404f7925f6879817ebf471651c0bb5935604 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 12 Oct 2016 13:11:45 +0200 Subject: ACPI / PAD: don't register acpi_pad driver if running as Xen dom0 When running as Xen dom0 a special processor_aggregator driver is needed. Don't register the standard driver in this case. Without that check an error message: "Error: Driver 'processor_aggregator' is already registered, aborting..." will be displayed. Signed-off-by: Juergen Gross [ rjw: Minor fixups ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_pad.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 8ea8211b2d58..eb76a4c10dbf 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -26,6 +26,7 @@ #include #include #include +#include #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" @@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = { static int __init acpi_pad_init(void) { + /* Xen ACPI PAD is used when running as Xen Dom0. */ + if (xen_initial_domain()) + return -ENODEV; + power_saving_mwait_init(); if (power_saving_mwait_eax == 0) return -EINVAL; -- cgit v1.2.3-59-g8ed1b From 40492f60794aaf32576cb42d9af86eed785a6e63 Mon Sep 17 00:00:00 2001 From: Grazvydas Ignotas Date: Sun, 9 Oct 2016 20:28:19 +0300 Subject: drm/amdgpu: use .early_unregister hook to remove DP AUX i2c When DisplayPort AUX channel i2c adapter is registered, drm_connector's kdev member is used as a parent, so we get sysfs structure like: /drm/card1/card1-DP-2/i2c-12 Because of that, there is a problem when drm core (and not the driver) calls drm_connector_unregister(), it removes parent sysfs entries ('card1-DP-2' in our example) while the i2c adapter is still registered. Later we get a WARN when we try to unregister the i2c adapter: WARNING: CPU: 3 PID: 1374 at fs/sysfs/group.c:243 sysfs_remove_group+0x14c/0x150 sysfs group ffffffff82911e40 not found for kobject 'i2c-12' To fix it, we can use the .early_unregister hook to unregister the i2c adapter before drm_connector's sysfs is torn down. Signed-off-by: Grazvydas Ignotas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 2e3a0543760d..e3281d4e3e41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) return ret; } -static void amdgpu_connector_destroy(struct drm_connector *connector) +static void amdgpu_connector_unregister(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); @@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector) drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); amdgpu_connector->ddc_bus->has_aux = false; } +} + +static void amdgpu_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + amdgpu_connector_free_edid(connector); kfree(amdgpu_connector->con_priv); drm_connector_unregister(connector); @@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = { .dpms = drm_helper_connector_dpms, .detect = amdgpu_connector_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .set_property = amdgpu_connector_set_lcd_property, }; @@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = { .dpms = drm_helper_connector_dpms, .detect = amdgpu_connector_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .set_property = amdgpu_connector_set_property, }; @@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = { .detect = amdgpu_connector_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; @@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = { .detect = amdgpu_connector_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; @@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .detect = amdgpu_connector_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = amdgpu_connector_set_lcd_property, + .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, }; -- cgit v1.2.3-59-g8ed1b From b0c80bd5d2e317f7596fe2badc1a3379fb3211e5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Oct 2016 10:44:24 -0400 Subject: drm/radeon: fix up dp aux tear down (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port the amdgpu fixes from Grazvydas to radeon. v2: drop unrelated whitespace change. bug: https://bugs.freedesktop.org/show_bug.cgi?id=98200 Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_connectors.c | 17 +++++++++++++++++ drivers/gpu/drm/radeon/radeon_i2c.c | 3 +-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 50e96d2c593d..e18839d52e3e 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) return ret; } +static void radeon_connector_unregister(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_connector->ddc_bus->has_aux) { + drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = false; + } +} + static void radeon_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_lvds_set_property, }; @@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; @@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_tv_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; @@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = { .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, + .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 021aa005623f..29f7817af821 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) return; + WARN_ON(i2c->has_aux); i2c_del_adapter(&i2c->adapter); - if (i2c->has_aux) - drm_dp_aux_unregister(&i2c->aux); kfree(i2c); } -- cgit v1.2.3-59-g8ed1b From 9305ee6fe52035f63d70d023235b792ba22107f0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Oct 2016 10:57:39 -0400 Subject: drm/radeon: fix modeset tear down code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ordering caused problems. bug: https://bugs.freedesktop.org/show_bug.cgi?id=98200 Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_display.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b8ab30a7dd6d..cdb8cb568c15 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev) void radeon_modeset_fini(struct radeon_device *rdev) { - radeon_fbdev_fini(rdev); - kfree(rdev->mode_info.bios_hardcoded_edid); - - /* free i2c buses */ - radeon_i2c_fini(rdev); - if (rdev->mode_info.mode_config_initialized) { - radeon_afmt_fini(rdev); drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); drm_crtc_force_disable_all(rdev->ddev); + radeon_fbdev_fini(rdev); + radeon_afmt_fini(rdev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } + + kfree(rdev->mode_info.bios_hardcoded_edid); + + /* free i2c buses */ + radeon_i2c_fini(rdev); } static bool is_hdtv_mode(const struct drm_display_mode *mode) -- cgit v1.2.3-59-g8ed1b From 6ae81452f9278ba1accdd4152d75061a8349a0f3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 7 Oct 2016 16:00:47 -0400 Subject: drm/amdgpu/gfx8: fix CGCG_CGLS handling When setting up the RLC, only disable the CGCG and CGLS bits rather than clearing the entire register to avoid losing the golden settings. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6c6ff57b1c95..5b289186dc92 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) { int r; + u32 tmp; gfx_v8_0_rlc_stop(adev); /* disable CG */ - WREG32(mmRLC_CGCG_CGLS_CTRL, 0); + tmp = RREG32(mmRLC_CGCG_CGLS_CTRL); + tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) - WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); + adev->asic_type == CHIP_POLARIS10) { + tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); + tmp &= ~0x3; + WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); + } /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); -- cgit v1.2.3-59-g8ed1b From ce199ad690bd0a6ac6bf9e4df2c87b59d76f84da Mon Sep 17 00:00:00 2001 From: Nicolai Hähnle Date: Tue, 4 Oct 2016 09:43:30 +0200 Subject: drm/amdgpu: initialize the context reset_counter in amdgpu_ctx_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure that we really only report a GPU reset if one has happened since the creation of the context. Signed-off-by: Nicolai Hähnle Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e203e5561107..a5e2fcbef0f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) ctx->rings[i].sequence = 1; ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; } + + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); + /* create context entity for each ring */ for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; -- cgit v1.2.3-59-g8ed1b From 113d0f9db7be5a3038d9800ea1dddfb373c2b5a6 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Mon, 10 Oct 2016 13:23:25 +0200 Subject: drm/radeon: allow TA_CS_BC_BASE_ADDR on SI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Required for border colors in compute shaders. Reviewed-by: Edward O'Callaghan Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Marek Olšák Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_drv.c | 3 ++- drivers/gpu/drm/radeon/si.c | 1 + drivers/gpu/drm/radeon/sid.h | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 91c8f4339566..00ea0002b539 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -96,9 +96,10 @@ * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI * 2.46.0 - Add PFP_SYNC_ME support on evergreen * 2.47.0 - Add UVD_NO_OP register support + * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 47 +#define KMS_DRIVER_MINOR 48 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 7ee9aafbdf74..e402be8821c4 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg) case SPI_CONFIG_CNTL: case SPI_CONFIG_CNTL_1: case TA_CNTL_AUX: + case TA_CS_BC_BASE_ADDR: return true; default: DRM_ERROR("Invalid register 0x%x in CS\n", reg); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index eb220eecba78..65a911ddd509 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1145,6 +1145,7 @@ #define SPI_LB_CU_MASK 0x9354 #define TA_CNTL_AUX 0x9508 +#define TA_CS_BC_BASE_ADDR 0x950C #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) -- cgit v1.2.3-59-g8ed1b From b0b00ff16f2715562b7ea0dfa3a9c5b33328c8cb Mon Sep 17 00:00:00 2001 From: Arindam Nath Date: Fri, 7 Oct 2016 19:01:37 +0530 Subject: drm/amd/amdgpu: enable clockgating only after late init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes during multiple reboots, the system hangs during bootup. The issue is very random and happens once in around 50 reboots or so. It seems if clockgating is enabled before late init, the GFX engine sometimes does not respond. This patch changes the ordering a little so that both powergating and clockgating are enabled only after late init calls. Reviewed-by: Christian König Signed-off-by: Arindam Nath Tested-by: Sunil Uttarwar Reviewed-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a58513f271e3..928774f5a7e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1411,13 +1411,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD || adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE) continue; - /* enable clockgating to save power */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); - if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); - return r; - } if (adev->ip_blocks[i].funcs->late_init) { r = adev->ip_blocks[i].funcs->late_init((void *)adev); if (r) { @@ -1426,6 +1419,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev) } adev->ip_block_status[i].late_initialized = true; } + /* enable clockgating to save power */ + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_GATE); + if (r) { + DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + return r; + } } return 0; -- cgit v1.2.3-59-g8ed1b From 4a446d55843fb82fc5bc6c72d27bfc20b6c294c3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 7 Oct 2016 14:48:18 -0400 Subject: drm/amdgpu: clarify UVD/VCE special handling for CG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit UVD and VCE CG are handled specially, however the previous fix for this skipped late init for those blocks rather than just CG. Just protect the CG function call. No functional change since UVD and VCE don't currently utilize a late_init function. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 928774f5a7e0..15afe22ca890 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1408,9 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_block_status[i].valid) continue; - if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD || - adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE) - continue; if (adev->ip_blocks[i].funcs->late_init) { r = adev->ip_blocks[i].funcs->late_init((void *)adev); if (r) { @@ -1419,12 +1416,17 @@ static int amdgpu_late_init(struct amdgpu_device *adev) } adev->ip_block_status[i].late_initialized = true; } - /* enable clockgating to save power */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); - if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); - return r; + /* skip CG for VCE/UVD, it's handled specially */ + if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD && + adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) { + /* enable clockgating to save power */ + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_GATE); + if (r) { + DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + return r; + } } } -- cgit v1.2.3-59-g8ed1b From abc4b9a53ea8153e0e028762b22cb213685c52e3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Oct 2016 11:50:34 -0700 Subject: acpi_os_vprintf: Use printk_get_level() to avoid unnecessary KERN_CONT acpi_os_vprintf currently always uses a KERN_CONT prefix which may be followed immediately by a proper KERN_. Check if the buffer already has a KERN_ at the start of the buffer and avoid the unnecessary KERN_CONT. Signed-off-by: Joe Perches Signed-off-by: Rafael J. Wysocki --- drivers/acpi/osl.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 4305ee9db4b2..416953a42510 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args) if (acpi_in_debugger) { kdb_printf("%s", buffer); } else { - printk(KERN_CONT "%s", buffer); + if (printk_get_level(buffer)) + printk("%s", buffer); + else + printk(KERN_CONT "%s", buffer); } #else - if (acpi_debugger_write_log(buffer) < 0) - printk(KERN_CONT "%s", buffer); + if (acpi_debugger_write_log(buffer) < 0) { + if (printk_get_level(buffer)) + printk("%s", buffer); + else + printk(KERN_CONT "%s", buffer); + } #endif } -- cgit v1.2.3-59-g8ed1b From ea893695ec1131a5fed0523ff8094bc6e8723bbe Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 12 Oct 2016 21:31:40 +0200 Subject: platform/x86: asus-wmi: add SERIO_I8042 dependency A recent bugfix added a call to i8042_install_filter but did not add the dependency, leading to possible link errors: drivers/platform/built-in.o: In function `asus_nb_wmi_quirks': asus-nb-wmi.c:(.text+0x23af): undefined reference to `i8042_install_filter' This adds a dependency on SERIO_I8042||SERIO_I8042=n to indicate that we can build the driver when the i8042 driver is disabled, but it cannot be built-in when that is a loadable module. Fixes: b5643539b825 ("platform/x86: asus-wmi: Filter buggy scan codes on ASUS Q500A") Signed-off-by: Arnd Bergmann Signed-off-by: Darren Hart --- drivers/platform/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 81b8dcca8891..b8a21d7b25d4 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -576,6 +576,7 @@ config ASUS_WMI config ASUS_NB_WMI tristate "Asus Notebook WMI Driver" depends on ASUS_WMI + depends on SERIO_I8042 || SERIO_I8042 = n ---help--- This is a driver for newer Asus notebooks. It adds extra features like wireless radio and bluetooth control, leds, hotkeys, backlight... -- cgit v1.2.3-59-g8ed1b From 77da3da0b22a67508eb1cf2b241a1fe852a6cb1a Mon Sep 17 00:00:00 2001 From: Aaron Brice Date: Mon, 10 Oct 2016 11:39:52 -0700 Subject: mmc: sdhci-esdhc-imx: Correct two register accesses - The DMA error interrupt bit is in a different position as compared to the sdhci standard. This is accounted for in many cases, but not handled in the case of clearing the INT_STATUS register by writing a 1 to that location. - The HOST_CONTROL register is very different as compared to the sdhci standard. This is accounted for in the write case, but not when read back out (which it is in the sdhci code). Signed-off-by: Dave Russell Signed-off-by: Aaron Brice Acked-by: Dong Aisheng Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-esdhc-imx.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1f54fd8755c8..7123ef96ed18 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); u32 data; - if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { + if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE || + reg == SDHCI_INT_STATUS)) { if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { /* * Clear and then set D3CD bit to avoid missing the @@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) esdhc_clrset_le(host, 0xffff, val, reg); } +static u8 esdhc_readb_le(struct sdhci_host *host, int reg) +{ + u8 ret; + u32 val; + + switch (reg) { + case SDHCI_HOST_CONTROL: + val = readl(host->ioaddr + reg); + + ret = val & SDHCI_CTRL_LED; + ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK; + ret |= (val & ESDHC_CTRL_4BITBUS); + ret |= (val & ESDHC_CTRL_8BITBUS) << 3; + return ret; + } + + return readb(host->ioaddr + reg); +} + static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, + .read_b = esdhc_readb_le, .write_l = esdhc_writel_le, .write_w = esdhc_writew_le, .write_b = esdhc_writeb_le, -- cgit v1.2.3-59-g8ed1b From 5e2bd93b8fcac8c0cf83f189d996831fb21f2db3 Mon Sep 17 00:00:00 2001 From: Jérôme de Bretagne Date: Sun, 9 Oct 2016 15:51:05 +0200 Subject: Bluetooth: hci_bcm: Fix autosuspend PM for Lenovo ThinkPad 8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ACPI table for BCM2E55 of Lenovo ThinkPad 8 is not correct. Set correctly IRQ polarity for this device, fixing the issue of bluetooth never resuming after autosuspend PM. Signed-off-by: Jérôme de Bretagne Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 5ccb90ef0146..8f6c23c20c52 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { }, .driver_data = &acpi_active_low, }, + { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ + .ident = "Lenovo ThinkPad 8", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), + }, + .driver_data = &acpi_active_low, + }, { } }; -- cgit v1.2.3-59-g8ed1b From cf4747d7535a936105f0abe8d8109d3fe339162b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 12 Oct 2016 13:54:04 -0500 Subject: rtlwifi: Fix regression caused by commit d86e64768859 In commit d86e64768859 ("rtlwifi: rtl818x: constify local structures"), the configuration struct for most of the drivers was changed to be constant. The problem is that five of the modified drivers need to be able to update the firmware name based on the exact model of the card. As the file names were stored in one of the members of that struct, these drivers would fail with a kernel BUG splat when they tried to update the firmware name. Rather than reverting the previous commit, I used a suggestion by Johannes Berg and made the firmware file name pointers be local to the routines that update the software variables. The configuration struct of rtl8192cu, which was not touched in the previous patch, is now constantfied. Fixes: d86e64768859 ("rtlwifi: rtl818x: constify local structures") Suggested-by: Johannes Berg Signed-off-by: Larry Finger Cc: Stable # 4.8 Cc: Julia Lawall Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | 13 +++++-------- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 12 ++++++------ drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 8 ++++---- drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c | 9 +++++---- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 12 +++++------- drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 18 +++++++++--------- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 -- 11 files changed, 45 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index f95760c13c56..8e7f23c11680 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, if (!err) goto found_alt; } - pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); + pr_err("Selected firmware is not available\n"); rtlpriv->max_fw_size = 0; return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index e7b11b40e68d..f361808def47 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 tid; + char *fw_name; rtl8188ee_bt_reg_init(hw); rtlpriv->dm.dm_initialgain_enable = 1; @@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) return 1; } - rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin"; + fw_name = "rtlwifi/rtl8188efw.bin"; rtlpriv->max_fw_size = 0x8000; - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl88e_pci", - .fw_name = "rtlwifi/rtl8188efw.bin", .ops = &rtl8188ee_hal_ops, .mod_params = &rtl88ee_mod_params, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 87aa209ae325..8b6e37ce3f66 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + char *fw_name = "rtlwifi/rtl8192cfwU.bin"; rtl8192ce_bt_reg_init(hw); @@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) } /* request fw */ - if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && - !IS_92C_SERIAL(rtlhal->version)) - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) + fw_name = "rtlwifi/rtl8192cfwU_B.bin"; rtlpriv->max_fw_size = 0x4000; - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl92c_pci", - .fw_name = "rtlwifi/rtl8192cfw.bin", .ops = &rtl8192ce_hal_ops, .mod_params = &rtl92ce_mod_params, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 7c6f7f0d18c6..f953320f0e23 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); int err; + char *fw_name; rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; @@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) } if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin"; + fw_name = "rtlwifi/rtl8192cufw_A.bin"; } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin"; + fw_name = "rtlwifi/rtl8192cufw_B.bin"; } else { - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; + fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; } /* provide name of alternative file */ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; - pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name); + pr_info("Loading firmware %s\n", fw_name); rtlpriv->max_fw_size = 0x4000; err = request_firmware_nowait(THIS_MODULE, 1, - rtlpriv->cfg->fw_name, rtlpriv->io.dev, + fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); return err; } @@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { static struct rtl_hal_cfg rtl92cu_hal_cfg = { .name = "rtl92c_usb", - .fw_name = "rtlwifi/rtl8192cufw.bin", .ops = &rtl8192cu_hal_ops, .mod_params = &rtl92cu_mod_params, .usb_interface_cfg = &rtl92cu_interface_cfg, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 0538a4d09568..1ebfee18882f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) u8 tid; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + char *fw_name = "rtlwifi/rtl8192defw.bin"; rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; @@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->max_fw_size = 0x8000; pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); - pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); + pr_info("Loading firmware file %s\n", fw_name); /* request fw */ - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8192de", - .fw_name = "rtlwifi/rtl8192defw.bin", .ops = &rtl8192de_hal_ops, .mod_params = &rtl92de_mod_params, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index ac299cbe59b0..46b605de36e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int err = 0; + char *fw_name; rtl92ee_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; @@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) } /* request fw */ - rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin"; + fw_name = "rtlwifi/rtl8192eefw.bin"; rtlpriv->max_fw_size = 0x8000; - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl92ee_pci", - .fw_name = "rtlwifi/rtl8192eefw.bin", .ops = &rtl8192ee_hal_ops, .mod_params = &rtl92ee_mod_params, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 5e8e02d5de8a..3e1eaeac4fdc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) struct ieee80211_hw *hw = context; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rt_firmware *pfirmware = NULL; + char *fw_name = "rtlwifi/rtl8192sefw.bin"; RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "Firmware callback routine entered!\n"); complete(&rtlpriv->firmware_loading_complete); if (!firmware) { - pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); + pr_err("Firmware %s not available\n", fw_name); rtlpriv->max_fw_size = 0; return; } @@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int err = 0; u16 earlyrxthreshold = 7; + char *fw_name = "rtlwifi/rtl8192sefw.bin"; rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; @@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + sizeof(struct fw_hdr); pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" - "Loading firmware %s\n", rtlpriv->cfg->fw_name); + "Loading firmware %s\n", fw_name); /* request fw */ - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl92se_fw_cb); if (err) { @@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = { .bar_id = 1, .write_readback = false, .name = "rtl92s_pci", - .fw_name = "rtlwifi/rtl8192sefw.bin", .ops = &rtl8192se_hal_ops, .mod_params = &rtl92se_mod_params, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 89c828ad89f4..c51a9e8234e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int err = 0; + char *fw_name = "rtlwifi/rtl8723fw.bin"; rtl8723e_bt_reg_init(hw); @@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) return 1; } - if (IS_VENDOR_8723_A_CUT(rtlhal->version)) - rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) - rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin"; + if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + fw_name = "rtlwifi/rtl8723fw_B.bin"; rtlpriv->max_fw_size = 0x6000; - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723e_pci", - .fw_name = "rtlwifi/rtl8723efw.bin", .ops = &rtl8723e_hal_ops, .mod_params = &rtl8723e_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 20b53f035483..847644d1f5f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + char *fw_name = "rtlwifi/rtl8723befw.bin"; rtl8723be_bt_reg_init(hw); rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); @@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) } rtlpriv->max_fw_size = 0x8000; - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723be_pci", - .fw_name = "rtlwifi/rtl8723befw.bin", .ops = &rtl8723be_hal_ops, .mod_params = &rtl8723be_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 22f687b1f133..297938e0effd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + char *fw_name, *wowlan_fw_name; rtl8821ae_bt_reg_init(hw); rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); @@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; - rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; + fw_name = "rtlwifi/rtl8812aefw.bin"; + wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; } else { - rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; - rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; + fw_name = "rtlwifi/rtl8821aefw.bin"; + wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; } rtlpriv->max_fw_size = 0x8000; /*load normal firmware*/ - pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { @@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) return 1; } /*load wowlan firmware*/ - pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); + pr_info("Using firmware %s\n", wowlan_fw_name); err = request_firmware_nowait(THIS_MODULE, 1, - rtlpriv->cfg->wowlan_fw_name, + wowlan_fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_wowlan_fw_cb); if (err) { @@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8821ae_pci", - .fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 595f7d5d091a..dafe486f8448 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2278,9 +2278,7 @@ struct rtl_hal_cfg { u8 bar_id; bool write_readback; char *name; - char *fw_name; char *alt_fw_name; - char *wowlan_fw_name; struct rtl_hal_ops *ops; struct rtl_mod_params *mod_params; struct rtl_hal_usbint_cfg *usb_interface_cfg; -- cgit v1.2.3-59-g8ed1b From f67b107d4ceddcf7aa65b706aaaf50d68edb52a6 Mon Sep 17 00:00:00 2001 From: Marty Faltesek Date: Mon, 10 Oct 2016 19:00:04 +0300 Subject: ath10k: cache calibration data when the core is stopped Commit 0b8e3c4ca29f ("ath10k: move cal data len to hw_params") broke retrieving the calibration data from cal_data debugfs file. The length of file was always zero. The reason is: static ssize_t ath10k_debug_cal_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; void *buf = file->private_data; This is obviously bogus, private_data cannot contain both struct ath10k and the buffer. Fix it by caching calibration data to ar->debug.cal_data. This also allows it to be accessed when the device is not active (interface is down). The cal_data buffer is fixed size because during the first firmware probe we don't yet know what will be the lenght of the calibration data. It was simplest just to use a fixed length. There's a WARN_ON() in ath10k_debug_cal_data_fetch() if the buffer is too small. Tested with qca988x and firmware 10.2.4.70.56. Reported-by: Nikolay Martynov Fixes: 0b8e3c4ca29f ("ath10k: move cal data len to hw_params") Cc: stable@vger.kernel.org # 4.7+ Signed-off-by: Marty Faltesek [kvalo@qca.qualcomm.com: improve commit log and minor other changes] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/debug.c | 75 +++++++++++++++++---------------- 2 files changed, 40 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index dda49af1eb74..521f1c55c19e 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -450,6 +450,7 @@ struct ath10k_debug { u32 pktlog_filter; u32 reg_addr; u32 nf_cal_period; + void *cal_data; struct ath10k_fw_crash_data *fw_crash_data; }; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 832da6ed9f13..82a4c67f3672 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -30,6 +30,8 @@ /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 +#define ATH10K_DEBUG_CAL_DATA_LEN 12064 + #define ATH10K_FW_CRASH_DUMP_VERSION 1 /** @@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = { .llseek = default_llseek, }; -static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +static int ath10k_debug_cal_data_fetch(struct ath10k *ar) { - struct ath10k *ar = inode->i_private; - void *buf; u32 hi_addr; __le32 addr; int ret; - mutex_lock(&ar->conf_mutex); - - if (ar->state != ATH10K_STATE_ON && - ar->state != ATH10K_STATE_UTF) { - ret = -ENETDOWN; - goto err; - } + lockdep_assert_held(&ar->conf_mutex); - buf = vmalloc(ar->hw_params.cal_data_len); - if (!buf) { - ret = -ENOMEM; - goto err; - } + if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) + return -EINVAL; hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); if (ret) { - ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); - goto err_vfree; + ath10k_warn(ar, "failed to read hi_board_data address: %d\n", + ret); + return ret; } - ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, ar->hw_params.cal_data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data: %d\n", ret); - goto err_vfree; + return ret; } - file->private_data = buf; + return 0; +} - mutex_unlock(&ar->conf_mutex); +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; - return 0; + mutex_lock(&ar->conf_mutex); -err_vfree: - vfree(buf); + if (ar->state == ATH10K_STATE_ON || + ar->state == ATH10K_STATE_UTF) { + ath10k_debug_cal_data_fetch(ar); + } -err: + file->private_data = ar; mutex_unlock(&ar->conf_mutex); - return ret; + return 0; } static ssize_t ath10k_debug_cal_data_read(struct file *file, @@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - void *buf = file->private_data; - return simple_read_from_buffer(user_buf, count, ppos, - buf, ar->hw_params.cal_data_len); -} + mutex_lock(&ar->conf_mutex); -static int ath10k_debug_cal_data_release(struct inode *inode, - struct file *file) -{ - vfree(file->private_data); + count = simple_read_from_buffer(user_buf, count, ppos, + ar->debug.cal_data, + ar->hw_params.cal_data_len); - return 0; + mutex_unlock(&ar->conf_mutex); + + return count; } static ssize_t ath10k_write_ani_enable(struct file *file, @@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = { static const struct file_operations fops_cal_data = { .open = ath10k_debug_cal_data_open, .read = ath10k_debug_cal_data_read, - .release = ath10k_debug_cal_data_release, .owner = THIS_MODULE, .llseek = default_llseek, }; @@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); + ath10k_debug_cal_data_fetch(ar); + /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid * warning from del_timer(). */ @@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar) if (!ar->debug.fw_crash_data) return -ENOMEM; + ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); + if (!ar->debug.cal_data) + return -ENOMEM; + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); @@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar) vfree(ar->debug.fw_crash_data); ar->debug.fw_crash_data = NULL; + vfree(ar->debug.cal_data); + ar->debug.cal_data = NULL; + ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); -- cgit v1.2.3-59-g8ed1b From 304e5ac118cc351eb047b6c433a89e13ea7259cf Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 11 Oct 2016 19:46:49 +0200 Subject: Revert "ath9k_hw: implement temperature compensation support for AR9003+" This reverts commit 171f6402e4aa ("ath9k_hw: implement temperature compensation support for AR9003+"). Some users report that this commit causes a regression in performance under some conditions. Fixes: 171f6402e4aa ("ath9k_hw: implement temperature compensation support for AR9003+") Cc: # 4.8 Signed-off-by: Felix Fietkau [kvalo@qca.qualcomm.com: improve commit log] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_calib.c | 25 +++---------------------- drivers/net/wireless/ath/ath9k/hw.h | 1 - 2 files changed, 3 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index b6f064a8d264..7e27a06e5df1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -33,7 +33,6 @@ struct coeff { enum ar9003_cal_types { IQ_MISMATCH_CAL = BIT(0), - TEMP_COMP_CAL = BIT(1), }; static void ar9003_hw_setup_calibration(struct ath_hw *ah, @@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, /* Kick-off cal */ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); break; - case TEMP_COMP_CAL: - ath_dbg(common, CALIBRATE, - "starting Temperature Compensation Calibration\n"); - REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL); - REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START); - break; default: ath_err(common, "Invalid calibration type\n"); break; @@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, /* * Accumulate cal measures for active chains */ - if (cur_caldata->calCollect) - cur_caldata->calCollect(ah); + cur_caldata->calCollect(ah); ah->cal_samples++; if (ah->cal_samples >= cur_caldata->calNumSamples) { @@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, /* * Process accumulated data */ - if (cur_caldata->calPostProc) - cur_caldata->calPostProc(ah, numChains); + cur_caldata->calPostProc(ah, numChains); /* Calibration has finished. */ caldata->CalValid |= cur_caldata->calType; @@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = { ar9003_hw_iqcalibrate }; -static const struct ath9k_percal_data temp_cal_single_sample = { - TEMP_COMP_CAL, - MIN_CAL_SAMPLES, - PER_MAX_LOG_COUNT, -}; - static void ar9003_hw_init_cal_settings(struct ath_hw *ah) { ah->iq_caldata.calData = &iq_cal_single_sample; - ah->temp_caldata.calData = &temp_cal_single_sample; if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; @@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } - ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL; + ah->supp_cals = IQ_MISMATCH_CAL; } #define OFF_UPPER_LT 24 @@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah) INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); - INIT_CAL(&ah->temp_caldata); - INSERT_CAL(ah, &ah->temp_caldata); - /* Initialize current pointer to first element in list */ ah->cal_list_curr = ah->cal_list; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 2a5d3ad1169c..9cbca1229bac 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -830,7 +830,6 @@ struct ath_hw { /* Calibration */ u32 supp_cals; struct ath9k_cal_list iq_caldata; - struct ath9k_cal_list temp_caldata; struct ath9k_cal_list adcgain_caldata; struct ath9k_cal_list adcdc_caldata; struct ath9k_cal_list *cal_list; -- cgit v1.2.3-59-g8ed1b From 1ea2643961b0d1b8d0e4a11af5aa69b0f92d0533 Mon Sep 17 00:00:00 2001 From: Adam Williamson Date: Wed, 12 Oct 2016 19:08:40 +0100 Subject: ath6kl: add Dell OEM SDIO I/O for the Venue 8 Pro SDIO ID 0271:0418 Signed-off-by: Alan Cox Bugzilla-ID: https://bugzilla.kernel.org/show_bug.cgi?id=67921 Reviewed-by: Steve deRosier Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/sdio.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index eab0ab976af2..76eb33679d4b 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))}, {}, }; -- cgit v1.2.3-59-g8ed1b From abb6627910a1e783c8e034b35b7c80e5e7f98f41 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 12 Oct 2016 21:47:03 +0200 Subject: cpufreq: conservative: Fix next frequency selection Commit d352cf47d93e (cpufreq: conservative: Do not use transition notifications) overlooked the case when the "frequency step" used by the conservative governor is small relative to the distances between the available frequencies and broke the algorithm by using policy->cur instead of the previously requested frequency when computing the next one. As a result, the governor may not be able to go outside of a narrow range between two consecutive available frequencies. Fix the problem by making the governor save the previously requested frequency and select the next one relative that value (unless it is out of range, in which case policy->cur will be used instead). Fixes: d352cf47d93e (cpufreq: conservative: Do not use transition notifications) Link: https://bugzilla.kernel.org/show_bug.cgi?id=177171 Reported-and-tested-by: Aleksey Rybalkin Acked-by: Viresh Kumar Cc: 4.8+ # 4.8+ Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_conservative.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 18da4f8051d3..13475890d792 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -17,6 +17,7 @@ struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; + unsigned int requested_freq; }; static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) @@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); + unsigned int requested_freq = dbs_info->requested_freq; struct dbs_data *dbs_data = policy_dbs->dbs_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int load = dbs_update(policy); @@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) if (cs_tuners->freq_step == 0) goto out; + /* + * If requested_freq is out of range, it is likely that the limits + * changed in the meantime, so fall back to current frequency in that + * case. + */ + if (requested_freq > policy->max || requested_freq < policy->min) + requested_freq = policy->cur; + /* Check for frequency increase */ if (load > dbs_data->up_threshold) { - unsigned int requested_freq = policy->cur; - dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ @@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) goto out; requested_freq += get_freq_target(cs_tuners, policy); + if (requested_freq > policy->max) + requested_freq = policy->max; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); + dbs_info->requested_freq = requested_freq; goto out; } @@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { - unsigned int freq_target, requested_freq = policy->cur; + unsigned int freq_target; /* * if we cannot reduce the frequency anymore, break out early */ @@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) requested_freq = policy->min; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); + dbs_info->requested_freq = requested_freq; } out: @@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy) struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); dbs_info->down_skip = 0; + dbs_info->requested_freq = policy->cur; } static struct dbs_governor cs_governor = { -- cgit v1.2.3-59-g8ed1b From c197d758036d8c77923ae3f88571cf198283107e Mon Sep 17 00:00:00 2001 From: Hoan Tran Date: Thu, 13 Oct 2016 10:33:35 -0700 Subject: cpufreq: CPPC: Correct desired_perf calculation The desired_perf is an abstract performance number. Its value should be in the range of [lowest perf, highest perf] of CPPC. The correct calculation is desired_perf = freq * cppc_highest_perf / cppc_dmi_max_khz And cppc_cpufreq_set_target() returns if desired_perf is exactly the same with the old perf. Signed-off-by: Hoan Tran Reviewed-by: Prashanth Prakash Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cppc_cpufreq.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 99db4227ae38..f7e98fc077c1 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, { struct cpudata *cpu; struct cpufreq_freqs freqs; + u32 desired_perf; int ret = 0; cpu = all_cpu_data[policy->cpu]; - cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz; + desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz; + /* Return if it is exactly the same perf */ + if (desired_perf == cpu->perf_ctrls.desired_perf) + return ret; + + cpu->perf_ctrls.desired_perf = desired_perf; freqs.old = policy->cur; freqs.new = target_freq; -- cgit v1.2.3-59-g8ed1b From dc8184aa8621ee8048652496884d9f40d4bb407f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 10 Oct 2016 15:57:21 +0800 Subject: drm/amdgpu: change vblank_time's calculation method to reduce computational error. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index fe36caf1b7d7..14f57d9915e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, printk("\n"); } + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / - amdgpu_crtc->hw_mode.clock; - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - + vblank_in_pixels = + amdgpu_crtc->hw_mode.crtc_htotal * + (amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + (amdgpu_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; break; } } -- cgit v1.2.3-59-g8ed1b From 02cfb5fccb0f9f968f0e208d89d9769aa16267bc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 12 Oct 2016 15:28:55 -0400 Subject: drm/radeon: change vblank_time's calculation method to reduce computational error. Ported from Rex's amdgpu change. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/r600_dpm.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 6a4b020dd0b4..5a26eb4545aa 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + vblank_in_pixels = + radeon_crtc->hw_mode.crtc_htotal * + (radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; break; } } -- cgit v1.2.3-59-g8ed1b From e07053241b0264e7b107b2b3f4d899635985d353 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 11 Oct 2016 18:44:46 +0800 Subject: drm/amd/powerplay: fix static checker warnings in iceland_smc.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add array length check to avoid buffer overflow. Signed-off-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index eda802bc63c8..8c889caba420 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), "Invalid VramInfo table.", return -EINVAL); - if (!data->is_memory_gddr5) { + if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) { table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; for (k = 0; k < table->num_entries; k++) { -- cgit v1.2.3-59-g8ed1b From 9faa6b0277fab4ab91db4d69bc47566fdfbae48b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 11 Oct 2016 18:51:16 +0800 Subject: drm/amd/powerplay: fix static checker warnings in smu7_hwmgr.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit variable dereferenced before check it Signed-off-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 508245d49d33..7c67a5a91182 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3802,13 +3802,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) { - const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); - const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); + const struct smu7_power_state *psa; + const struct smu7_power_state *psb; int i; if (pstate1 == NULL || pstate2 == NULL || equal == NULL) return -EINVAL; + psa = cast_const_phw_smu7_power_state(pstate1); + psb = cast_const_phw_smu7_power_state(pstate2); /* If the two states don't even have the same number of performance levels they cannot be the same state. */ if (psa->performance_level_count != psb->performance_level_count) { *equal = false; -- cgit v1.2.3-59-g8ed1b From eeb2fa0c97ba661f8b7fb210a1de10928b67a47b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Oct 2016 09:17:30 +0300 Subject: drm/amdgpu: potential NULL dereference in debugfs code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit debugfs_create_file() returns NULL on error, it only returns error pointers if debugfs isn't enabled in the config and we checked for that earlier so it can't happen. Fixes: 4f4824b55650 ('drm/amd/amdgpu: Convert ring debugfs entries to binary') Reviewed-by: Christian König Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index e1fa8731d1e2..3cb5e903cd62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, ent = debugfs_create_file(name, S_IFREG | S_IRUGO, root, ring, &amdgpu_debugfs_ring_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); + if (!ent) + return -ENOMEM; i_size_write(ent->d_inode, ring->ring_size + 12); ring->ent = ent; -- cgit v1.2.3-59-g8ed1b From 24e8df6a6837d6cff182e84b838dc1d6971251fc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 13 Oct 2016 15:32:04 +0800 Subject: drm/amd/powerplay: fix static checker warnings in smu7_hwmgr.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 7c67a5a91182..e7fb8e95436b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1452,8 +1452,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; - if (table_info != NULL) - sclk_table = table_info->vdd_dep_on_sclk; + if (table_info == NULL) + return -EINVAL; + + sclk_table = table_info->vdd_dep_on_sclk; for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; -- cgit v1.2.3-59-g8ed1b From aee3960a0ca34f8bf7dbcdb330c4f37a0f94dd8a Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Thu, 13 Oct 2016 17:46:45 -0400 Subject: drm/amdgpu/si_dpm: Limit clocks on HD86xx part Limit clocks on a specific HD86xx part to avoid crashes (while awaiting an appropriate PP fix). Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 8bd08925b370..3de7bca5854b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, max_sclk = 75000; max_mclk = 80000; } + /* Limit clocks for some HD8600 parts */ + if (adev->pdev->device == 0x6660 && + adev->pdev->revision == 0x83) { + max_sclk = 75000; + max_mclk = 80000; + } if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; -- cgit v1.2.3-59-g8ed1b From c0ca8df717061ae3d2ea624024033103c64210ae Mon Sep 17 00:00:00 2001 From: Noam Camus Date: Thu, 13 Oct 2016 16:15:32 +0300 Subject: irqchip/eznps: Acknowledge NPS_IPI before calling the handler IPI_IRQ (also TIMER0_IRQ) should be acked before the action->handler is called in handle_percpu_devid_irq. The IPI irq is edge sensitive and we might miss an IPI interrupt if it is triggered again while the handler runs. Fixes: 44df427c894a ("irqchip: add nps Internal and external irqchips") Signed-off-by: Noam Camus Cc: marc.zyngier@arm.com Cc: jason@lakedaemon.net Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1476364532-12634-1-git-send-email-noamca@mellanox.com Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-eznps.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c index efbf0e4304b7..ebc2b0b15f67 100644 --- a/drivers/irqchip/irq-eznps.c +++ b/drivers/irqchip/irq-eznps.c @@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd) nps_ack_gic(); } -static void nps400_irq_eoi(struct irq_data *irqd) +static void nps400_irq_ack(struct irq_data *irqd) { unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); @@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = { .name = "NPS400 IC", .irq_mask = nps400_irq_mask, .irq_unmask = nps400_irq_unmask, - .irq_eoi = nps400_irq_eoi, + .irq_ack = nps400_irq_ack, }; static int nps400_irq_map(struct irq_domain *d, unsigned int virq, -- cgit v1.2.3-59-g8ed1b From c024f06b3de372cd67f86b142992ac88fc3a7d18 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Thu, 13 Oct 2016 20:35:30 +0000 Subject: irqchip/jcore: Fix lost per-cpu interrupts The J-Core AIC does not have separate interrupt numbers reserved for cpu-local vs global interrupts. Instead, the driver requesting the irq is expected to know whether its device uses per-cpu interrupts or not. Previously it was assumed that handle_simple_irq could work for both cases, but it intentionally drops interrupts for an irq number that already has a handler running. This resulted in the timer interrupt for one cpu being lost when multiple cpus' timers were set for approximately the same expiration time, leading to stalls. In theory the same could also happen with IPIs. To solve the problem, instead of registering handle_simple_irq as the handler, register a wrapper function which checks whether the irq to be handled was requested as per-cpu or not, and passes it to handle_simple_irq or handle_percpu_irq accordingly. Fixes: 981b58f66cfc ("irqchip/jcore-aic: Add J-Core AIC driver") Signed-off-by: Rich Felker Cc: Marc Zyngier Cc: Jason Cooper Cc: linux-sh@vger.kernel.org Link: http://lkml.kernel.org/r/f18cec30bc17e3f52e478dd9f6714bfab02f227f.1476390724.git.dalias@libc.org Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-jcore-aic.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 84b01dec277d..033bccb41455 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -25,12 +25,30 @@ static struct irq_chip jcore_aic; +/* + * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do + * not distinguish or use distinct irq number ranges for per-cpu event + * interrupts (timer, IPI). Since information to determine whether a + * particular irq number should be treated as per-cpu is not available + * at mapping time, we use a wrapper handler function which chooses + * the right handler at runtime based on whether IRQF_PERCPU was used + * when requesting the irq. + */ + +static void handle_jcore_irq(struct irq_desc *desc) +{ + if (irqd_is_per_cpu(irq_desc_get_irq_data(desc))) + handle_percpu_irq(desc); + else + handle_simple_irq(desc); +} + static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct irq_chip *aic = d->host_data; - irq_set_chip_and_handler(irq, aic, handle_simple_irq); + irq_set_chip_and_handler(irq, aic, handle_jcore_irq); return 0; } -- cgit v1.2.3-59-g8ed1b From d102eb5c1ac5e6743b1c6d145c06a25d98ad1375 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 14 Oct 2016 10:26:21 +0300 Subject: irqchip/gicv3: Handle loop timeout proper The timeout loop terminates when the loop count is zero, but the decrement of the count variable is post check. So count is -1 when we check for the timeout and therefor the error message is supressed. Change it to predecrement, so the error message is emitted. [ tglx: Massaged changelog ] Fixes: a2c225101234 ("irqchip: gic-v3: Refactor gic_enable_redist to support both enabling and disabling") Signed-off-by: Dan Carpenter Acked-by: Sudeep Holla Cc: Marc Zyngier Cc: kernel-janitors@vger.kernel.org Cc: Jason Cooper Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20161014072534.GA15168@mwanda Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-gic-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9b81bd8b929c..19d642eae096 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (count--) { + while (--count) { val = readl_relaxed(rbase + GICR_WAKER); if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) break; -- cgit v1.2.3-59-g8ed1b From 558c5eb58a5c029745b29558782d528b05aba8a5 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Wed, 12 Oct 2016 15:55:40 -0300 Subject: net: wan: slic_ds26522: add SPI device ID table to fix module autoload If the driver is built as a module, module alias information isn't filled so the module won't be autoloaded. Add a SPI device ID table and use the MODULE_DEVICE_TABLE() macro so the information is exported in the module. Before this patch: $ modinfo drivers/net/wan/slic_ds26522.ko | grep alias $ After this patch: $ modinfo drivers/net/wan/slic_ds26522.ko | grep alias alias: spi:ds26522 Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/wan/slic_ds26522.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index d06a887a2352..53366a2232f0 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -223,6 +223,12 @@ static int slic_ds26522_probe(struct spi_device *spi) return ret; } +static const struct spi_device_id slic_ds26522_id[] = { + { .name = "ds26522" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(spi, slic_ds26522_id); + static const struct of_device_id slic_ds26522_match[] = { { .compatible = "maxim,ds26522", @@ -239,6 +245,7 @@ static struct spi_driver slic_ds26522_driver = { }, .probe = slic_ds26522_probe, .remove = slic_ds26522_remove, + .id_table = slic_ds26522_id, }; static int __init slic_ds26522_init(void) -- cgit v1.2.3-59-g8ed1b From 485c9d43382172c1b2ecf42deb4476b5e37e1264 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Wed, 12 Oct 2016 15:55:41 -0300 Subject: net: wan: slic_ds26522: Export OF module alias information When the device is registered via OF, the OF table is used to match the driver instead of the SPI device ID table, but the entries in the later are used as aliasses to load the module if the driver was not built-in. This is because the SPI core always reports an SPI module alias instead of an OF one, but that could change so it's better to always export it. Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/wan/slic_ds26522.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index 53366a2232f0..b776a0ab106c 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -235,6 +235,7 @@ static const struct of_device_id slic_ds26522_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, slic_ds26522_match); static struct spi_driver slic_ds26522_driver = { .driver = { -- cgit v1.2.3-59-g8ed1b From 059f01410822b9a37fac0f4da8c2202c2d39e7a5 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Wed, 12 Oct 2016 16:05:59 -0300 Subject: net: wan: slic_ds26522: Allow driver to built if COMPILE_TEST is enabled The driver only has runtime but no build time dependency with FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE. So it can be built for testing purposes if the COMPILE_TEST option is enabled. This is useful to have more build coverage and make sure that the driver is not affected by changes that could cause build regressions. Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/wan/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 33ab3345d333..4e9fe75d7067 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -294,7 +294,7 @@ config FSL_UCC_HDLC config SLIC_DS26522 tristate "Slic Maxim ds26522 card support" depends on SPI - depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST help This module initializes and configures the slic maxim card in T1 or E1 mode. -- cgit v1.2.3-59-g8ed1b From fc791b6335152c5278dc4a4991bcb2d329f806f9 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 13 Oct 2016 18:26:56 +0200 Subject: IB/ipoib: move back IB LL address into the hard header After the commit 9207f9d45b0a ("net: preserve IP control block during GSO segmentation"), the GSO CB and the IPoIB CB conflict. That destroy the IPoIB address information cached there, causing a severe performance regression, as better described here: http://marc.info/?l=linux-kernel&m=146787279825501&w=2 This change moves the data cached by the IPoIB driver from the skb control lock into the IPoIB hard header, as done before the commit 936d7de3d736 ("IPoIB: Stop lying about hard_header_len and use skb->cb to stash LL addresses"). In order to avoid GRO issue, on packet reception, the IPoIB driver stash into the skb a dummy pseudo header, so that the received packets have actually a hard header matching the declared length. To avoid changing the connected mode maximum mtu, the allocated head buffer size is increased by the pseudo header length. After this commit, IPoIB performances are back to pre-regression value. v2 -> v3: rebased v1 -> v2: avoid changing the max mtu, increasing the head buf size Fixes: 9207f9d45b0a ("net: preserve IP control block during GSO segmentation") Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/infiniband/ulp/ipoib/ipoib.h | 20 +++++++--- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 15 +++---- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 12 +++--- drivers/infiniband/ulp/ipoib/ipoib_main.c | 54 ++++++++++++++++---------- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 6 ++- 5 files changed, 64 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 7b8d2d9e2263..da12717a3eb7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -63,6 +63,8 @@ enum ipoib_flush_level { enum { IPOIB_ENCAP_LEN = 4, + IPOIB_PSEUDO_LEN = 20, + IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN, IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ @@ -134,15 +136,21 @@ struct ipoib_header { u16 reserved; }; -struct ipoib_cb { - struct qdisc_skb_cb qdisc_cb; - u8 hwaddr[INFINIBAND_ALEN]; +struct ipoib_pseudo_header { + u8 hwaddr[INFINIBAND_ALEN]; }; -static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) +static inline void skb_add_pseudo_hdr(struct sk_buff *skb) { - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); - return (struct ipoib_cb *)skb->cb; + char *data = skb_push(skb, IPOIB_PSEUDO_LEN); + + /* + * only the ipoib header is present now, make room for a dummy + * pseudo header and set skb field accordingly + */ + memset(data, 0, IPOIB_PSEUDO_LEN); + skb_reset_mac_header(skb); + skb_pull(skb, IPOIB_HARD_LEN); } /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 4ad297d3de89..339a1eecdfe3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level, #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) #define IPOIB_CM_RX_UPDATE_MASK (0x3) +#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN) + static struct ib_qp_attr ipoib_cm_err_attr = { .qp_state = IB_QPS_ERR }; @@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, struct sk_buff *skb; int i; - skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); + skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16)); if (unlikely(!skb)) return NULL; /* - * IPoIB adds a 4 byte header. So we need 12 more bytes to align the + * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the * IP header to a multiple of 16. */ - skb_reserve(skb, 12); + skb_reserve(skb, IPOIB_CM_RX_RESERVE); mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); @@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) if (wc->byte_len < IPOIB_CM_COPYBREAK) { int dlen = wc->byte_len; - small_skb = dev_alloc_skb(dlen + 12); + small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE); if (small_skb) { - skb_reserve(small_skb, 12); + skb_reserve(small_skb, IPOIB_CM_RX_RESERVE); ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], dlen, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, small_skb->data, dlen); @@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) copied: skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb_reset_mac_header(skb); - skb_pull(skb, IPOIB_ENCAP_LEN); + skb_add_pseudo_hdr(skb); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index be11d5d5b8c1..830fecb6934c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); - skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); + skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN); if (unlikely(!skb)) return NULL; /* - * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte - * header. So we need 4 more bytes to get to 48 and align the - * IP header to a multiple of 16. + * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is + * 64 bytes aligned */ - skb_reserve(skb, 4); + skb_reserve(skb, sizeof(struct ipoib_pseudo_header)); mapping = priv->rx_ring[id].mapping; mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, @@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb_reset_mac_header(skb); - skb_pull(skb, IPOIB_ENCAP_LEN); + skb_add_pseudo_hdr(skb); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5636fc3da6b8..b58d9dca5c93 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ipoib_neigh_free(neigh); goto err_drop; } - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) + if (skb_queue_len(&neigh->queue) < + IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&neigh->queue, skb); - else { + } else { ipoib_warn(priv, "queue length limit %d. Packet drop.\n", skb_queue_len(&neigh->queue)); goto err_drop; @@ -964,7 +967,7 @@ err_drop: } static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, - struct ipoib_cb *cb) + struct ipoib_pseudo_header *phdr) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; @@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, cb->hwaddr + 4); + path = __path_find(dev, phdr->hwaddr + 4); if (!path || !path->valid) { int new_path = 0; if (!path) { - path = path_rec_create(dev, cb->hwaddr + 4); + path = path_rec_create(dev, phdr->hwaddr + 4); new_path = 1; } if (path) { if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&path->queue, skb); } else { ++dev->stats.tx_dropped; @@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, be16_to_cpu(path->pathrec.dlid)); spin_unlock_irqrestore(&priv->lock, flags); - ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); + ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); return; } else if ((path->query || !path_rec_start(dev, path)) && skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, IPOIB_PSEUDO_LEN); __skb_queue_tail(&path->queue, skb); } else { ++dev->stats.tx_dropped; @@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; - struct ipoib_cb *cb = ipoib_skb_cb(skb); + struct ipoib_pseudo_header *phdr; struct ipoib_header *header; unsigned long flags; + phdr = (struct ipoib_pseudo_header *) skb->data; + skb_pull(skb, sizeof(*phdr)); header = (struct ipoib_header *) skb->data; - if (unlikely(cb->hwaddr[4] == 0xff)) { + if (unlikely(phdr->hwaddr[4] == 0xff)) { /* multicast, arrange "if" according to probability */ if ((header->proto != htons(ETH_P_IP)) && (header->proto != htons(ETH_P_IPV6)) && @@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } /* Add in the P_Key for multicast*/ - cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; - cb->hwaddr[9] = priv->pkey & 0xff; + phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; + phdr->hwaddr[9] = priv->pkey & 0xff; - neigh = ipoib_neigh_get(dev, cb->hwaddr); + neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (likely(neigh)) goto send_using_neigh; - ipoib_mcast_send(dev, cb->hwaddr, skb); + ipoib_mcast_send(dev, phdr->hwaddr, skb); return NETDEV_TX_OK; } @@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) case htons(ETH_P_IP): case htons(ETH_P_IPV6): case htons(ETH_P_TIPC): - neigh = ipoib_neigh_get(dev, cb->hwaddr); + neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (unlikely(!neigh)) { - neigh_add_path(skb, cb->hwaddr, dev); + neigh_add_path(skb, phdr->hwaddr, dev); return NETDEV_TX_OK; } break; case htons(ETH_P_ARP): case htons(ETH_P_RARP): /* for unicast ARP and RARP should always perform path find */ - unicast_arp_send(skb, dev, cb); + unicast_arp_send(skb, dev, phdr); return NETDEV_TX_OK; default: /* ethertype not supported by IPoIB */ @@ -1086,11 +1095,13 @@ send_using_neigh: goto unref; } } else if (neigh->ah) { - ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); + ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr)); goto unref; } if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, sizeof(*phdr)); spin_lock_irqsave(&priv->lock, flags); __skb_queue_tail(&neigh->queue, skb); spin_unlock_irqrestore(&priv->lock, flags); @@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb, unsigned short type, const void *daddr, const void *saddr, unsigned len) { + struct ipoib_pseudo_header *phdr; struct ipoib_header *header; - struct ipoib_cb *cb = ipoib_skb_cb(skb); header = (struct ipoib_header *) skb_push(skb, sizeof *header); @@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb, /* * we don't rely on dst_entry structure, always stuff the - * destination address into skb->cb so we can figure out where + * destination address into skb hard header so we can figure out where * to send the packet later. */ - memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); + phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr)); + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); - return sizeof *header; + return IPOIB_HARD_LEN; } static void ipoib_set_mcast_list(struct net_device *dev) @@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev) dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - dev->hard_header_len = IPOIB_ENCAP_LEN; + dev->hard_header_len = IPOIB_HARD_LEN; dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; dev->tx_queue_len = ipoib_sendq_size * 2; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d3394b6add24..1909dd252c94 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); } - if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) + if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, sizeof(struct ipoib_pseudo_header)); skb_queue_tail(&mcast->pkt_queue, skb); - else { + } else { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } -- cgit v1.2.3-59-g8ed1b From ce6b04ee8b112cc9d5ef41ba697a3ffabc630f42 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Thu, 13 Oct 2016 22:57:01 +0300 Subject: qed: Fix static checker warning. Smatch compains about qed_roce_ll2_tx() dereference of the 'cdev' variable while testing its validity later. As the validation checking is an over-kill [variable would always be set], simply remove it. Reported-by: Dan Carpenter Fixes: abd49676c707 ("qed: Add RoCE ll2 & GSI support") Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_roce.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 76831a398bed..c73638ceb1ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2809,11 +2809,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev) struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; int rc; - if (!cdev) { - DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n"); - return -EINVAL; - } - if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); return -EINVAL; @@ -2850,7 +2845,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, int rc; int i; - if (!cdev || !pkt || !params) { + if (!pkt || !params) { DP_ERR(cdev, "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", cdev, pkt, params); -- cgit v1.2.3-59-g8ed1b From 0189efb8f4f830b9ac7a7c56c0c6e260859e950d Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Thu, 13 Oct 2016 22:57:02 +0300 Subject: qed*: Fix Kconfig dependencies with INFINIBAND_QEDR The qedr driver would require a tristate Kconfig option [to allow it to compile as a module], and toward that end we've added the INFINIBAND_QEDR option. But as we've made the compilation of the qed/qede infrastructure required for RoCE dependent on the option we'd be facing linking difficulties in case that QED=y or QEDE=y, and INFINIBAND_QEDR=m. To resolve this, we seperate between the INFINIBAND_QEDR option and the infrastructure support in qed/qede by introducing a new QED_RDMA option which would be selected by INFINIBAND_QEDR but would be a boolean instead of a tristate; Following that, the qed/qede is fixed based on this new option so that all config combinations would be supported. Fixes: cee9fbd8e2e9 ("qede: add qedr framework") Reported-by: Arnd Bergmann Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/Kconfig | 4 ++ drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 7 +-- drivers/net/ethernet/qlogic/qed/qed_dev.c | 14 ++--- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.h | 20 ------- drivers/net/ethernet/qlogic/qed/qed_main.c | 28 +++++---- drivers/net/ethernet/qlogic/qed/qed_roce.c | 91 +++++++++++++++--------------- drivers/net/ethernet/qlogic/qed/qed_roce.h | 75 +++++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 4 -- drivers/net/ethernet/qlogic/qede/Makefile | 2 +- include/linux/qed/qede_roce.h | 2 +- 12 files changed, 120 insertions(+), 130 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 0df1391f9663..77567727528a 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -107,10 +107,14 @@ config QEDE ---help--- This enables the support for ... +config QED_RDMA + bool + config INFINIBAND_QEDR tristate "QLogic qede RoCE sources [debug]" depends on QEDE && 64BIT select QED_LL2 + select QED_RDMA default n ---help--- This provides a temporary node that allows the compilation diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index cda0af7fbc20..967acf322c09 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_selftest.o qed_dcbx.o qed_debug.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o -qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o +qed-$(CONFIG_QED_RDMA) += qed_roce.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 82370a1a59ad..277db7831f7b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -47,13 +47,8 @@ #define TM_ALIGN BIT(TM_SHIFT) #define TM_ELEM_SIZE 4 -/* ILT constants */ -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ -#define ILT_DEFAULT_HW_P_SIZE 4 -#else -#define ILT_DEFAULT_HW_P_SIZE 3 -#endif +#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3) #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 754f6a908858..21adf5208320 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1422,19 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) u32 *feat_num = p_hwfn->hw_info.feat_num; int num_features = 1; -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) - /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the - * status blocks equally between L2 / RoCE but with consideration as - * to how many l2 queues / cnqs we have - */ - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + if (IS_ENABLED(CONFIG_QED_RDMA) && + p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide + * the status blocks equally between L2 / RoCE but with + * consideration as to how many l2 queues / cnqs we have. + */ num_features++; feat_num[QED_RDMA_CNQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); } -#endif + feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_L2_QUEUE)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 02a8be2faed7..7856e5241b52 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -38,6 +38,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_roce.h" #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 80a5dc2d652d..4e3d62a16cab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn, */ void qed_ll2_free(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_connections); -void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, bool b_last_packet); -void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); -void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4ee3151e80c2..6eb2401b9e22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -33,10 +33,8 @@ #include "qed_hw.h" #include "qed_selftest.h" -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) -#endif static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, enum qed_int_mode int_mode) { struct qed_sb_cnt_info sb_cnt_info; -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) - int num_l2_queues; -#endif + int num_l2_queues = 0; int rc; int i; @@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - cdev->num_hwfns; -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) - num_l2_queues = 0; + if (!IS_ENABLED(CONFIG_QED_RDMA)) + return 0; + for_each_hwfn(cdev, i) num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); @@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", cdev->int_params.rdma_msix_cnt, cdev->int_params.rdma_msix_base); -#endif return 0; } @@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev, { int i; -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) - params->rdma_pf_params.num_qps = QED_ROCE_QPS; - params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; - /* divide by 3 the MRs to avoid MF ILT overflow */ - params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; - params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; -#endif for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->pf_params = *params; } + + if (!IS_ENABLED(CONFIG_QED_RDMA)) + return; + + params->rdma_pf_params.num_qps = QED_ROCE_QPS; + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + /* divide by 3 the MRs to avoid MF ILT overflow */ + params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } static int qed_slowpath_start(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index c73638ceb1ad..187df38542f7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, } } -u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) { /* First sb id for RoCE is after all the l2 sb */ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } -u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) -{ - return QED_CAU_DEF_RX_TIMER_RES; -} - static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params) @@ -275,7 +270,7 @@ free_rdma_info: return rc; } -void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) +static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; @@ -527,6 +522,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) + goto out; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); + return rc; +} + static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) { struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; @@ -573,7 +588,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, return qed_rdma_start_fw(p_hwfn, params, p_ptt); } -int qed_rdma_stop(void *rdma_cxt) +static int qed_rdma_stop(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_close_func_ramrod_data *p_ramrod; @@ -629,8 +644,8 @@ out: return rc; } -int qed_rdma_add_user(void *rdma_cxt, - struct qed_rdma_add_user_out_params *out_params) +static int qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 dpi_start_offset; @@ -664,7 +679,7 @@ int qed_rdma_add_user(void *rdma_cxt, return rc; } -struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) +static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; @@ -680,7 +695,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) return p_port; } -struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) +static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; @@ -690,7 +705,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) return p_hwfn->p_rdma_info->dev; } -void qed_rdma_free_tid(void *rdma_cxt, u32 itid) +static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; @@ -701,27 +716,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); - - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, - &p_hwfn->p_rdma_info->tid_map, itid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); - if (rc) - goto out; - - rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); -out: - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); - return rc; -} - -void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) +static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) { struct qed_hwfn *p_hwfn; u16 qz_num; @@ -816,7 +811,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) return 0; } -int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) +static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 returned_id; @@ -1985,9 +1980,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return 0; } -int qed_rdma_query_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) +static int qed_rdma_query_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc; @@ -2022,7 +2017,7 @@ int qed_rdma_query_qp(void *rdma_cxt, return rc; } -int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) +static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc = 0; @@ -2215,9 +2210,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, return rc; } -int qed_rdma_modify_qp(void *rdma_cxt, - struct qed_rdma_qp *qp, - struct qed_rdma_modify_qp_in_params *params) +static int qed_rdma_modify_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; enum qed_roce_qp_state prev_state; @@ -2312,8 +2307,9 @@ int qed_rdma_modify_qp(void *rdma_cxt, return rc; } -int qed_rdma_register_tid(void *rdma_cxt, - struct qed_rdma_register_tid_in_params *params) +static int +qed_rdma_register_tid(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_register_tid_ramrod_data *p_ramrod; @@ -2450,7 +2446,7 @@ int qed_rdma_register_tid(void *rdma_cxt, return rc; } -int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) +static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_deregister_tid_ramrod_data *p_ramrod; @@ -2561,7 +2557,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_conf(p_hwfn, p_ptt); } -int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) +static int qed_rdma_start(void *rdma_cxt, + struct qed_rdma_start_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_ptt *p_ptt; @@ -2601,7 +2598,7 @@ static int qed_rdma_init(struct qed_dev *cdev, return qed_rdma_start(QED_LEADING_HWFN(cdev), params); } -void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) +static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 2f091e8a0f40..691413176734 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -181,36 +181,55 @@ struct qed_rdma_qp { dma_addr_t shared_queue_phys_addr; }; -int -qed_rdma_add_user(void *rdma_cxt, - struct qed_rdma_add_user_out_params *out_params); -int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd); -int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid); -int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid); -void qed_rdma_free_tid(void *rdma_cxt, u32 tid); -struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt); -struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt); -int -qed_rdma_register_tid(void *rdma_cxt, - struct qed_rdma_register_tid_in_params *params); -void qed_rdma_remove_user(void *rdma_cxt, u16 dpi); -int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params); -int qed_rdma_stop(void *rdma_cxt); -u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id); -u32 qed_rdma_query_cau_timer_res(void *p_hwfn); -void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod); -void qed_rdma_resc_free(struct qed_hwfn *p_hwfn); +#if IS_ENABLED(CONFIG_QED_RDMA) +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe); -int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp); -int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, - struct qed_rdma_modify_qp_in_params *params); -int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params); - -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) -void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet); +void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet); +void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + u16 data_length, + u8 data_length_error, + u16 parse_flags, + u16 vlan, + u32 src_mac_addr_hi, + u16 src_mac_addr_lo, bool b_last_packet); #else -void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {} +static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet) {} +static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet) {} +static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + u16 data_length, + u8 data_length_error, + u16 parse_flags, + u16 vlan, + u32 src_mac_addr_hi, + u16 src_mac_addr_lo, + bool b_last_packet) {} #endif #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index caff41544898..9fbaf9429fd0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -28,9 +28,7 @@ #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) #include "qed_roce.h" -#endif /*************************************************************************** * Structures & Definitions @@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { switch (p_eqe->protocol_id) { -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) case PROTOCOLID_ROCE: qed_async_roce_event(p_hwfn, p_eqe); return 0; -#endif case PROTOCOLID_COMMON: return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 28dc58919c85..048a230c3ce0 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_ethtool.o qede-$(CONFIG_DCB) += qede_dcbnl.o -qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o +qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index 99fbe6d55acb..f48d64b0e2fb 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h @@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv); bool qede_roce_supported(struct qede_dev *dev); -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +#if IS_ENABLED(CONFIG_QED_RDMA) int qede_roce_dev_add(struct qede_dev *dev); void qede_roce_dev_event_open(struct qede_dev *dev); void qede_roce_dev_event_close(struct qede_dev *dev); -- cgit v1.2.3-59-g8ed1b From 8c93beaf5714b9ddfa4a0b4bcf89725d2021e903 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Thu, 13 Oct 2016 22:57:03 +0300 Subject: qed: Additional work toward cleaning C=1 This cleans many of the warnings that would arise in qed as a result of compilations with C=1; Most of those are the addition of missing 'static' to functions, although there are several other fixes as well. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 8 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 53 +++++++------ drivers/net/ethernet/qlogic/qed/qed_dev.c | 5 +- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 15 ++-- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 118 ++++------------------------ drivers/net/ethernet/qlogic/qed/qed_roce.h | 20 ----- drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 - 8 files changed, 58 insertions(+), 164 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 277db7831f7b..0c42c240b5cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -344,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, return NULL; } -void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) +static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; p_mgr->srq_count = num_srqs; } -u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; @@ -1799,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) return 0; } -void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, - struct qed_rdma_pf_params *p_params) +static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, + struct qed_rdma_pf_params *p_params) { u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; enum protocol_type proto; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 88e7d5bef909..68f19ca57f96 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -405,7 +405,7 @@ struct phy_defs { /***************************** Constant Arrays *******************************/ /* Debug arrays */ -static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; +static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { @@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, } /* Dump MCP Trace */ -enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - bool dump, u32 *num_dumped_dwords) +static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) { u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; u32 trace_meta_size_dwords, running_bundle_id, offset = 0; @@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, } /* Dump GRC FIFO */ -enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - bool dump, u32 *num_dumped_dwords) +static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) { u32 offset = 0, dwords_read, size_param_offset; bool fifo_has_data; @@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, } /* Dump IGU FIFO */ -enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - bool dump, u32 *num_dumped_dwords) +static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) { u32 offset = 0, dwords_read, size_param_offset; bool fifo_has_data; @@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, } /* Protection Override dump */ -enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - bool dump, u32 *num_dumped_dwords) +static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 *num_dumped_dwords) { u32 offset = 0, size_param_offset, override_window_dwords; @@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, } /* Wrapper for unifying the idle_chk and mcp_trace api */ -enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf) +static enum dbg_status +qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) { u32 num_errors, num_warnnings; @@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) #define QED_RESULTS_BUF_MIN_SIZE 16 /* Generic function for decoding debug feature info */ -enum dbg_status format_feature(struct qed_hwfn *p_hwfn, - enum qed_dbg_features feature_idx) +static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, + enum qed_dbg_features feature_idx) { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_params.features[feature_idx]; @@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn, } /* Generic function for performing the dump of a debug feature. */ -enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_dbg_features feature_idx) +static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_dbg_features feature_idx) { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_params.features[feature_idx]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 21adf5208320..edae5fc5fccd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev) if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ROCE, - 0) * 2; + NULL) * 2; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, - PROTOCOLID_ISCSI, 0); + PROTOCOLID_ISCSI, + NULL); n_eqes += 2 * num_cons; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 7856e5241b52..ce171a80bdaf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -141,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) qed_ll2_dealloc_buffer(cdev, buffer); } -void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - struct qed_ll2_rx_packet *p_pkt, - struct core_rx_fast_path_cqe *p_cqe, - bool b_last_packet) +static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, + u8 connection_handle, + struct qed_ll2_rx_packet *p_pkt, + struct core_rx_fast_path_cqe *p_cqe, + bool b_last_packet) { u16 packet_length = le16_to_cpu(p_cqe->packet_length); struct qed_ll2_buffer *buffer = p_pkt->cookie; @@ -516,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) return rc; } -void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) +static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) { struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; @@ -1124,9 +1124,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(start_bd->addr, first_frag); start_bd->nbytes = cpu_to_le16(first_frag_len); - SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV, - type); - DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6eb2401b9e22..8dc3f4670f64 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1430,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } -struct qed_selftest_ops qed_selftest_ops_pass = { +static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, .selftest_register = &qed_selftest_register, diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 187df38542f7..f3a825a8f8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -157,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, p_hwfn->p_rdma_info = p_rdma_info; p_rdma_info->proto = PROTOCOLID_ROCE; - num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, + NULL); p_rdma_info->num_qps = num_cons / 2; @@ -831,7 +832,7 @@ static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) return rc; } -void qed_rdma_free_pd(void *rdma_cxt, u16 pd) +static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; @@ -868,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) return toggle_bit; } -int qed_rdma_create_cq(void *rdma_cxt, - struct qed_rdma_create_cq_in_params *params, u16 *icid) +static int qed_rdma_create_cq(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; @@ -952,98 +954,10 @@ err: return rc; } -int qed_rdma_resize_cq(void *rdma_cxt, - struct qed_rdma_resize_cq_in_params *in_params, - struct qed_rdma_resize_cq_out_params *out_params) -{ - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - struct rdma_resize_cq_output_params *p_ramrod_res; - struct rdma_resize_cq_ramrod_data *p_ramrod; - enum qed_rdma_toggle_bit toggle_bit; - struct qed_sp_init_data init_data; - struct qed_spq_entry *p_ent; - dma_addr_t ramrod_res_phys; - u8 fw_return_code; - int rc = -ENOMEM; - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); - - p_ramrod_res = - (struct rdma_resize_cq_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - &ramrod_res_phys, GFP_KERNEL); - if (!p_ramrod_res) { - DP_NOTICE(p_hwfn, - "qed resize cq failed: cannot allocate memory (ramrod)\n"); - return rc; - } - - /* Get SPQ entry */ - memset(&init_data, 0, sizeof(init_data)); - init_data.cid = in_params->icid; - init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - - rc = qed_sp_init_request(p_hwfn, &p_ent, - RDMA_RAMROD_RESIZE_CQ, - p_hwfn->p_rdma_info->proto, &init_data); - if (rc) - goto err; - - p_ramrod = &p_ent->ramrod.rdma_resize_cq; - - p_ramrod->flags = 0; - - /* toggle the bit for every resize or create cq for a given icid */ - toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, - in_params->icid); - - SET_FIELD(p_ramrod->flags, - RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit); - - SET_FIELD(p_ramrod->flags, - RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL, - in_params->pbl_two_level); - - p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12; - p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages); - p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size); - DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr); - DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); - - rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); - if (rc) - goto err; - - if (fw_return_code != RDMA_RETURN_OK) { - DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); - rc = -EINVAL; - goto err; - } - - out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod); - out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons); - - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - p_ramrod_res, ramrod_res_phys); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc); - - return rc; - -err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(struct rdma_resize_cq_output_params), - p_ramrod_res, ramrod_res_phys); - DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc); - - return rc; -} - -int qed_rdma_destroy_cq(void *rdma_cxt, - struct qed_rdma_destroy_cq_in_params *in_params, - struct qed_rdma_destroy_cq_out_params *out_params) +static int +qed_rdma_destroy_cq(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *in_params, + struct qed_rdma_destroy_cq_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_destroy_cq_output_params *p_ramrod_res; @@ -1164,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) return flavor; } -int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) +static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 responder_icid; @@ -1788,9 +1702,9 @@ err: return rc; } -int qed_roce_query_qp(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *out_params) +static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) { struct roce_query_qp_resp_output_params *p_resp_ramrod_res; struct roce_query_qp_req_output_params *p_req_ramrod_res; @@ -1931,7 +1845,7 @@ err_resp: return rc; } -int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; @@ -2033,7 +1947,7 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) return rc; } -struct qed_rdma_qp * +static struct qed_rdma_qp * qed_rdma_create_qp(void *rdma_cxt, struct qed_rdma_create_qp_in_params *in_params, struct qed_rdma_create_qp_out_params *out_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 691413176734..279f342af8db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -95,26 +95,6 @@ struct qed_rdma_info { enum protocol_type proto; }; -struct qed_rdma_resize_cq_in_params { - u16 icid; - u32 cq_size; - bool pbl_two_level; - u64 pbl_ptr; - u16 pbl_num_pages; - u8 pbl_page_size_log; -}; - -struct qed_rdma_resize_cq_out_params { - u32 prod; - u32 cons; -}; - -struct qed_rdma_resize_cnq_in_params { - u32 cnq_id; - u32 pbl_page_size_log; - u64 pbl_ptr; -}; - struct qed_rdma_qp { struct regpair qp_handle; struct regpair qp_handle_async; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 652c90819758..b2c08e4d2a9b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -80,7 +80,6 @@ union ramrod_data { struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; struct rdma_create_cq_ramrod_data rdma_create_cq; - struct rdma_resize_cq_ramrod_data rdma_resize_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; -- cgit v1.2.3-59-g8ed1b From 958b3d396d7f80755e2c2e6a8f873a669f38de10 Mon Sep 17 00:00:00 2001 From: Brenden Blanco Date: Thu, 13 Oct 2016 13:13:11 -0700 Subject: net/mlx4_en: fixup xdp tx irq to match rx In cases where the number of tx rings is not a multiple of the number of rx rings, the tx completion event will be handled on a different core from the transmit and population of the ring. Races on the ring will lead to a double-free of the page, and possibly other corruption. The rings are initialized by default with a valid multiple of rings, based on the number of cpus, therefore an invalid configuration requires ethtool to change the ring layout. For instance 'ethtool -L eth0 rx 9 tx 8' will cause packets received on rx0, and XDP_TX'd to tx48, to be completed on cpu3 (48 % 9 == 3). Resolve this discrepancy by shifting the irq for the xdp tx queues to start again from 0, modulo rx_ring_num. Fixes: 9ecc2d86171a ("net/mlx4_en: add xdp forwarding and data write support") Reported-by: Jesper Dangaard Brouer Signed-off-by: Brenden Blanco Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 132cea655920..e3be7e44ff51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, /* For TX we use the same irq per ring we assigned for the RX */ struct mlx4_en_cq *rx_cq; - + int xdp_index; + + /* The xdp tx irq must align with the rx ring that forwards to + * it, so reindex these from 0. This should only happen when + * tx_ring_num is not a multiple of rx_ring_num. + */ + xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx; + if (xdp_index >= 0) + cq_idx = xdp_index; cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; -- cgit v1.2.3-59-g8ed1b From da146d3b5262c1866c868b9dec1bd0f834d6ded6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Oct 2016 16:07:03 -0400 Subject: drm/amdgpu: fix amdgpu_need_full_reset (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit IP types are not an index. Each asic may have number and type of IPs. Properly check the the type rather than using the type id as an index. v2: fix all the IPs to not use IP type as an idx as well. Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 12 ++---------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 17 +++++++++-------- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 13 ++++++------- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 14 ++++++-------- drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 14 ++++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 15 +++++++-------- drivers/gpu/drm/amd/include/amd_shared.h | 2 +- 9 files changed, 60 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 15afe22ca890..fda0e57c5e0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2075,7 +2075,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) if (!adev->ip_block_status[i].valid) continue; if (adev->ip_blocks[i].funcs->check_soft_reset) - adev->ip_blocks[i].funcs->check_soft_reset(adev); + adev->ip_block_status[i].hang = + adev->ip_blocks[i].funcs->check_soft_reset(adev); if (adev->ip_block_status[i].hang) { DRM_INFO("IP block:%d is hang!\n", i); asic_hang = true; @@ -2104,12 +2105,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) static bool amdgpu_need_full_reset(struct amdgpu_device *adev) { - if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { - DRM_INFO("Some block need full reset!\n"); - return true; + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) || + (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) { + if (adev->ip_block_status[i].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + } } return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 613ebb7ed50f..4108c686aa7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } -static int dce_v10_0_check_soft_reset(void *handle) +static bool dce_v10_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (dce_v10_0_is_display_hung(adev)) - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; - else - adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; - - return 0; + return dce_v10_0_is_display_hung(adev); } static int dce_v10_0_soft_reset(void *handle) @@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle) u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) - return 0; - if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5b289186dc92..ee6a48a09214 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5144,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_check_soft_reset(void *handle) +static bool gfx_v8_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; @@ -5196,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; adev->gfx.grbm_soft_reset = grbm_soft_reset; adev->gfx.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; adev->gfx.grbm_soft_reset = 0; adev->gfx.srbm_soft_reset = 0; + return false; } - - return 0; } static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, @@ -5233,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; @@ -5271,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle) u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; @@ -5341,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + if ((!adev->gfx.grbm_soft_reset) && + (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1b319f5bc696..c22ef140a542 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_check_soft_reset(void *handle) +static bool gmc_v8_0_check_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; adev->mc.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; adev->mc.srbm_soft_reset = 0; + return false; } - return 0; } static int gmc_v8_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev, &adev->mc.save); @@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; srbm_soft_reset = adev->mc.srbm_soft_reset; @@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + if (!adev->mc.srbm_soft_reset) return 0; gmc_v8_0_mc_resume(adev, &adev->mc.save); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index f325fd86430b..a9d10941fb53 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle) } if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; adev->sdma.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; adev->sdma.srbm_soft_reset = 0; + return false; } - - return 0; } static int sdma_v3_0_pre_soft_reset(void *handle) @@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; @@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; @@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle) u32 srbm_soft_reset = 0; u32 tmp; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + if (!adev->sdma.srbm_soft_reset) return 0; srbm_soft_reset = adev->sdma.srbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index d127d59f953a..b4ea229bb449 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_check_soft_reset(void *handle) +static bool tonga_ih_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; adev->irq.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; adev->irq.srbm_soft_reset = 0; + return false; } - - return 0; } static int tonga_ih_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_fini(adev); @@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; return tonga_ih_hw_init(adev); @@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + if (!adev->irq.srbm_soft_reset) return 0; srbm_soft_reset = adev->irq.srbm_soft_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index e0fd9f21ed95..ab3df6d75656 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle) } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static int uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; adev->uvd.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; adev->uvd.srbm_soft_reset = 0; + return false; } - return 0; } + static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; uvd_v6_0_stop(adev); @@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; srbm_soft_reset = adev->uvd.srbm_soft_reset; @@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + if (!adev->uvd.srbm_soft_reset) return 0; mdelay(5); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 3f6db4ec0102..8533269ec160 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle) #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) -static int vce_v3_0_check_soft_reset(void *handle) +static bool vce_v3_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset = 0; @@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; adev->vce.srbm_soft_reset = srbm_soft_reset; + return true; } else { - adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; adev->vce.srbm_soft_reset = 0; + return false; } - mutex_unlock(&adev->grbm_idx_mutex); - return 0; } static int vce_v3_0_soft_reset(void *handle) @@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; srbm_soft_reset = adev->vce.srbm_soft_reset; @@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); @@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index c934b78c9e2f..bec8125bceb0 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -165,7 +165,7 @@ struct amd_ip_funcs { /* poll for idle */ int (*wait_for_idle)(void *handle); /* check soft reset the IP block */ - int (*check_soft_reset)(void *handle); + bool (*check_soft_reset)(void *handle); /* pre soft reset the IP block */ int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ -- cgit v1.2.3-59-g8ed1b From 3e96dbfd5899c562e08c7ff27e5d5b21bb218e8a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Oct 2016 11:22:17 -0400 Subject: drm/amdgpu: disable smu hw first on tear down Otherwise, you can't disable dpm. Tested-by and Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fda0e57c5e0f..c5e6fc580662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1437,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev) { int i, r; + /* need to disable SMC first */ + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].hw) + continue; + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) { + /* ungate blocks before hw fini so that we can shutdown the blocks safely */ + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + return r; + } + r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].funcs->name, r); + } + adev->ip_block_status[i].hw = false; + break; + } + } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_block_status[i].hw) continue; -- cgit v1.2.3-59-g8ed1b From ca3d28de6268e75739f0c5d40ec6010b1d84f03a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Oct 2016 10:08:00 -0400 Subject: drm/amdgpu/powerplay: implement thermal sensor for CZ/ST Add missing functionality. Reviewed-by: Tom St Denis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 7e4fcbbbe086..960424913496 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c return 0; } +static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int actual_temp = 0; + uint32_t val = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP); + uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); + + if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) + actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + else + actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return actual_temp; +} + static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) case AMDGPU_PP_SENSOR_VCE_POWER: *value = cz_hwmgr->vce_power_gated ? 0 : 1; return 0; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *value = cz_thermal_get_temperature(hwmgr); + return 0; default: return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From 154061db8853035deb258b685b8a791cf97f023f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Oct 2016 09:55:47 -0400 Subject: drm/amdgpu/dpm: implement thermal sensor for CZ/ST Previous code was just a copy/paste from KV. Reviewed-by: Tom St Denis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index f80a0834e889..3c082e143730 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle, return 0; } -/* borrowed from KV, need future unify */ static int cz_dpm_get_temperature(struct amdgpu_device *adev) { int actual_temp = 0; - uint32_t temp = RREG32_SMC(0xC0300E0C); + uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP); + uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); - if (temp) + if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) actual_temp = 1000 * ((temp / 8) - 49); + else + actual_temp = 1000 * (temp / 8); return actual_temp; } -- cgit v1.2.3-59-g8ed1b From dcb2ff56417362c31f6b430c3c531a84581e8721 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Mon, 10 Oct 2016 17:58:32 +0200 Subject: dm mirror: fix read error on recovery after default leg failure If a default leg has failed, any read will cause a new operational default leg to be selected and the read is resubmitted. But until now the read will return failure even though it was successful due to resubmission. The reason for this is bio->bi_error was not being cleared before resubmitting the bio. Fix by clearing bio->bi_error before resubmission. Fixes: 4246a0b63bd8 ("block: add a bi_error field to struct bio") Cc: stable@vger.kernel.org # 4.3+ Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid1.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index bdf1606f67bc..7a6254d54baf 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1292,6 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) dm_bio_restore(bd, bio); bio_record->details.bi_bdev = NULL; + bio->bi_error = 0; queue_bio(ms, bio, rw); return DM_ENDIO_INCOMPLETE; -- cgit v1.2.3-59-g8ed1b From 12a7cf5ba6c776a2621d8972c7d42e8d3d959d20 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Mon, 10 Oct 2016 18:48:06 +0200 Subject: dm mirror: use all available legs on multiple failures When any leg(s) have failed, any read will cause a new operational default leg to be selected and the read is resubmitted to it. If that new default leg fails the read too, no other still accessible legs are used to resubmit the read again -- thus failing the io. Fix by allowing the read to get resubmitted until all operational legs have been exhausted. Also, remove any details.bi_dev use as a flag. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid1.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 7a6254d54baf..9a8b71067c6e 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) struct dm_raid1_bio_record { struct mirror *m; - /* if details->bi_bdev == NULL, details were not saved */ struct dm_bio_details details; region_t write_region; }; @@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) struct dm_raid1_bio_record *bio_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); - bio_record->details.bi_bdev = NULL; - if (rw == WRITE) { /* Save region for mirror_end_io() handler */ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); @@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) } if (error == -EOPNOTSUPP) - goto out; + return error; if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) - goto out; + return error; if (unlikely(error)) { - if (!bio_record->details.bi_bdev) { - /* - * There wasn't enough memory to record necessary - * information for a retry or there was no other - * mirror in-sync. - */ - DMERR_LIMIT("Mirror read failed."); - return -EIO; - } - m = bio_record->m; DMERR("Mirror read failed from %s. Trying alternative device.", @@ -1291,7 +1278,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) bd = &bio_record->details; dm_bio_restore(bd, bio); - bio_record->details.bi_bdev = NULL; bio->bi_error = 0; queue_bio(ms, bio, rw); @@ -1300,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) DMERR("All replicated volumes dead, failing I/O"); } -out: - bio_record->details.bi_bdev = NULL; - return error; } -- cgit v1.2.3-59-g8ed1b From 36c285c5335931d2c67b79e0d0f4e873c858c50d Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 13 Oct 2016 15:25:09 +0800 Subject: drm/amd/powerplay: notify smu no display by default. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index e7fb8e95436b..1c73ac7c24ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1226,7 +1226,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay); tmp_result = smu7_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), -- cgit v1.2.3-59-g8ed1b From f28a9b65c9e3697ba8d2ab371fae4fea15638676 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 13 Oct 2016 15:24:12 +0800 Subject: drm/amd/powerplay: fix bug stop dpm can't work on Vi. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/eventmgr/eventactionchains.c | 1 + drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 39 +++++++++++++--------- 2 files changed, 25 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 92b117843875..8cee4e0f9fde 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = { uninitialize_display_phy_access_tasks, disable_gfx_voltage_island_power_gating_tasks, disable_gfx_clock_gating_tasks, + uninitialize_thermal_controller_tasks, set_boot_state_tasks, adjust_power_state_tasks, disable_dynamic_state_management_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 1c73ac7c24ba..609996c84ad5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -EINVAL); + if (!data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable SCLK DPM when DPM is disabled", + return 0); + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable); + } /* disable MCLK dpm */ if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -EINVAL); + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable MCLK DPM when DPM is disabled", + return 0); + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable); } return 0; @@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) return -EINVAL); } - if (smu7_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -EINVAL; - } + smu7_disable_sclk_mclk_dpm(hwmgr); + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to disable voltage DPM when DPM is disabled", + return 0); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable); return 0; } @@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable thermal auto throttle!", result = tmp_result); + if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)), + "Failed to disable AVFS!", + return -EINVAL); + } + tmp_result = smu7_stop_dpm(hwmgr); PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to stop DPM!", result = tmp_result); @@ -4328,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .set_mclk_od = smu7_set_mclk_od, .get_clock_by_type = smu7_get_clock_by_type, .read_sensor = smu7_read_sensor, + .dynamic_state_management_disable = smu7_disable_dpm_tasks, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, -- cgit v1.2.3-59-g8ed1b From 2e0cbc4dd077aea4f1693583fd68eaed4d60464b Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:30 +0300 Subject: qedr: Add RoCE driver framework Adds a skeletal implementation of the qed* RoCE driver - basically the ability to communicate with the qede driver and receive notifications from it regarding various init/exit events. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/Kconfig | 2 + drivers/infiniband/hw/Makefile | 1 + drivers/infiniband/hw/qedr/Kconfig | 7 + drivers/infiniband/hw/qedr/Makefile | 3 + drivers/infiniband/hw/qedr/main.c | 254 ++++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/qedr.h | 61 +++++++++ drivers/net/ethernet/qlogic/Kconfig | 11 -- include/uapi/linux/pci_regs.h | 3 + 8 files changed, 331 insertions(+), 11 deletions(-) create mode 100644 drivers/infiniband/hw/qedr/Kconfig create mode 100644 drivers/infiniband/hw/qedr/Makefile create mode 100644 drivers/infiniband/hw/qedr/main.c create mode 100644 drivers/infiniband/hw/qedr/qedr.h (limited to 'drivers') diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 19a418a1b631..fb3fb89640e5 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig" source "drivers/infiniband/hw/hfi1/Kconfig" +source "drivers/infiniband/hw/qedr/Kconfig" + endif # INFINIBAND diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 21fe401ff178..e7a5ed9f6f3f 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ obj-$(CONFIG_INFINIBAND_HNS) += hns/ +obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig new file mode 100644 index 000000000000..7c06d85568d4 --- /dev/null +++ b/drivers/infiniband/hw/qedr/Kconfig @@ -0,0 +1,7 @@ +config INFINIBAND_QEDR + tristate "QLogic RoCE driver" + depends on 64BIT && QEDE + select QED_LL2 + ---help--- + This driver provides low-level InfiniBand over Ethernet + support for QLogic QED host channel adapters (HCAs). diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile new file mode 100644 index 000000000000..3a5b7a288281 --- /dev/null +++ b/drivers/infiniband/hw/qedr/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o + +qedr-y := main.o diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c new file mode 100644 index 000000000000..7387d029a35b --- /dev/null +++ b/drivers/infiniband/hw/qedr/main.c @@ -0,0 +1,254 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include "qedr.h" + +MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); +MODULE_AUTHOR("QLogic Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(QEDR_MODULE_VERSION); + +void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, + enum ib_event_type type) +{ + struct ib_event ibev; + + ibev.device = &dev->ibdev; + ibev.element.port_num = port_num; + ibev.event = type; + + ib_dispatch_event(&ibev); +} + +static enum rdma_link_layer qedr_link_layer(struct ib_device *device, + u8 port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +static int qedr_register_device(struct qedr_dev *dev) +{ + strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX); + + memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); + dev->ibdev.owner = THIS_MODULE; + + dev->ibdev.get_link_layer = qedr_link_layer; + + return 0; +} + +/* QEDR sysfs interface */ +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct qedr_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); +} + +static ssize_t show_hca_type(struct device *device, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); + +static struct device_attribute *qedr_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type +}; + +static void qedr_remove_sysfiles(struct qedr_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) + device_remove_file(&dev->ibdev.dev, qedr_attributes[i]); +} + +static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) +{ + struct pci_dev *bridge; + u32 val; + + dev->atomic_cap = IB_ATOMIC_NONE; + + bridge = pdev->bus->self; + if (!bridge) + return; + + /* Check whether we are connected directly or via a switch */ + while (bridge && bridge->bus->parent) { + DP_DEBUG(dev, QEDR_MSG_INIT, + "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n", + bridge->bus->number, bridge->bus->primary); + /* Need to check Atomic Op Routing Supported all the way to + * root complex. + */ + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); + if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) { + pcie_capability_clear_word(pdev, + PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + return; + } + bridge = bridge->bus->parent->self; + } + bridge = pdev->bus->self; + + /* according to bridge capability */ + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); + if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) { + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + dev->atomic_cap = IB_ATOMIC_GLOB; + } else { + pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + } +} + +static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, + struct net_device *ndev) +{ + struct qedr_dev *dev; + int rc = 0, i; + + dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) { + pr_err("Unable to allocate ib device\n"); + return NULL; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); + + dev->pdev = pdev; + dev->ndev = ndev; + dev->cdev = cdev; + + qedr_pci_set_atomic(dev, pdev); + + rc = qedr_register_device(dev); + if (rc) { + DP_ERR(dev, "Unable to allocate register device\n"); + goto init_err; + } + + for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) + if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) + goto init_err; + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); + return dev; + +init_err: + ib_dealloc_device(&dev->ibdev); + DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); + + return NULL; +} + +static void qedr_remove(struct qedr_dev *dev) +{ + /* First unregister with stack to stop all the active traffic + * of the registered clients. + */ + qedr_remove_sysfiles(dev); + + ib_dealloc_device(&dev->ibdev); +} + +static int qedr_close(struct qedr_dev *dev) +{ + qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); + + return 0; +} + +static void qedr_shutdown(struct qedr_dev *dev) +{ + qedr_close(dev); + qedr_remove(dev); +} + +/* event handling via NIC driver ensures that all the NIC specific + * initialization done before RoCE driver notifies + * event to stack. + */ +static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +{ + switch (event) { + case QEDE_UP: + qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + break; + case QEDE_DOWN: + qedr_close(dev); + break; + case QEDE_CLOSE: + qedr_shutdown(dev); + break; + case QEDE_CHANGE_ADDR: + qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + break; + default: + pr_err("Event not supported\n"); + } +} + +static struct qedr_driver qedr_drv = { + .name = "qedr_driver", + .add = qedr_add, + .remove = qedr_remove, + .notify = qedr_notify, +}; + +static int __init qedr_init_module(void) +{ + return qede_roce_register_driver(&qedr_drv); +} + +static void __exit qedr_exit_module(void) +{ + qede_roce_unregister_driver(&qedr_drv); +} + +module_init(qedr_init_module); +module_exit(qedr_exit_module); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h new file mode 100644 index 000000000000..801417080f11 --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -0,0 +1,61 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_H__ +#define __QEDR_H__ + +#include +#include +#include +#include + +#define QEDR_MODULE_VERSION "8.10.10.0" +#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" +#define DP_NAME(dev) ((dev)->ibdev.name) + +#define DP_DEBUG(dev, module, fmt, ...) \ + pr_debug("(%s) " module ": " fmt, \ + DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__) + +#define QEDR_MSG_INIT "INIT" + +struct qedr_dev { + struct ib_device ibdev; + struct qed_dev *cdev; + struct pci_dev *pdev; + struct net_device *ndev; + + enum ib_atomic_cap atomic_cap; + + u32 dp_module; + u8 dp_level; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 0df1391f9663..1e8339a67f6e 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -107,15 +107,4 @@ config QEDE ---help--- This enables the support for ... -config INFINIBAND_QEDR - tristate "QLogic qede RoCE sources [debug]" - depends on QEDE && 64BIT - select QED_LL2 - default n - ---help--- - This provides a temporary node that allows the compilation - and logical testing of the InfiniBand over Ethernet support - for QLogic QED. This would be replaced by the 'real' option - once the QEDR driver is added [+relocated]. - endif # NET_VENDOR_QLOGIC diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index d812172d1d7b..e5a2e68b2236 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -612,6 +612,8 @@ */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ @@ -619,6 +621,7 @@ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ -- cgit v1.2.3-59-g8ed1b From ec72fce401c6dc6fc89c49f30dc2c67920c4d5bf Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:31 +0300 Subject: qedr: Add support for RoCE HW init Allocate and setup RoCE resources, interrupts and completion queues. Adds device attributes. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 418 ++++++++++++++++++++++++++++- drivers/infiniband/hw/qedr/qedr.h | 123 +++++++++ drivers/infiniband/hw/qedr/qedr_hsi.h | 56 ++++ drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 96 +++++++ 4 files changed, 691 insertions(+), 2 deletions(-) create mode 100644 drivers/infiniband/hw/qedr/qedr_hsi.h create mode 100644 drivers/infiniband/hw/qedr/qedr_hsi_rdma.h (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7387d029a35b..67805941d138 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include "qedr.h" MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); @@ -61,6 +63,17 @@ static enum rdma_link_layer qedr_link_layer(struct ib_device *device, return IB_LINK_LAYER_ETHERNET; } +static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str, + size_t str_len) +{ + struct qedr_dev *qedr = get_qedr_dev(ibdev); + u32 fw_ver = (u32)qedr->attr.fw_ver; + + snprintf(str, str_len, "%d. %d. %d. %d", + (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, + (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); +} + static int qedr_register_device(struct qedr_dev *dev) { strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX); @@ -69,10 +82,139 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.owner = THIS_MODULE; dev->ibdev.get_link_layer = qedr_link_layer; + dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; return 0; } +/* This function allocates fast-path status block memory */ +static int qedr_alloc_mem_sb(struct qedr_dev *dev, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct status_block *sb_virt; + dma_addr_t sb_phys; + int rc; + + sb_virt = dma_alloc_coherent(&dev->pdev->dev, + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); + if (!sb_virt) + return -ENOMEM; + + rc = dev->ops->common->sb_init(dev->cdev, sb_info, + sb_virt, sb_phys, sb_id, + QED_SB_TYPE_CNQ); + if (rc) { + pr_err("Status block initialization failed\n"); + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), + sb_virt, sb_phys); + return rc; + } + + return 0; +} + +static void qedr_free_mem_sb(struct qedr_dev *dev, + struct qed_sb_info *sb_info, int sb_id) +{ + if (sb_info->sb_virt) { + dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); + dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), + (void *)sb_info->sb_virt, sb_info->sb_phys); + } +} + +static void qedr_free_resources(struct qedr_dev *dev) +{ + int i; + + for (i = 0; i < dev->num_cnq; i++) { + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); + } + + kfree(dev->cnq_array); + kfree(dev->sb_array); + kfree(dev->sgid_tbl); +} + +static int qedr_alloc_resources(struct qedr_dev *dev) +{ + struct qedr_cnq *cnq; + __le16 *cons_pi; + u16 n_entries; + int i, rc; + + dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * + QEDR_MAX_SGID, GFP_KERNEL); + if (!dev->sgid_tbl) + return -ENOMEM; + + spin_lock_init(&dev->sgid_lock); + + /* Allocate Status blocks for CNQ */ + dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), + GFP_KERNEL); + if (!dev->sb_array) { + rc = -ENOMEM; + goto err1; + } + + dev->cnq_array = kcalloc(dev->num_cnq, + sizeof(*dev->cnq_array), GFP_KERNEL); + if (!dev->cnq_array) { + rc = -ENOMEM; + goto err2; + } + + dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); + + /* Allocate CNQ PBLs */ + n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); + for (i = 0; i < dev->num_cnq; i++) { + cnq = &dev->cnq_array[i]; + + rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], + dev->sb_start + i); + if (rc) + goto err3; + + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, + n_entries, + sizeof(struct regpair *), + &cnq->pbl); + if (rc) + goto err4; + + cnq->dev = dev; + cnq->sb = &dev->sb_array[i]; + cons_pi = dev->sb_array[i].sb_virt->pi_array; + cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; + cnq->index = i; + sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); + + DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", + i, qed_chain_get_cons_idx(&cnq->pbl)); + } + + return 0; +err4: + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); +err3: + for (--i; i >= 0; i--) { + dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); + qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); + } + kfree(dev->cnq_array); +err2: + kfree(dev->sb_array); +err1: + kfree(dev->sgid_tbl); + return rc; +} + /* QEDR sysfs interface */ static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) @@ -146,9 +288,240 @@ static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) } } +static const struct qed_rdma_ops *qed_ops; + +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + +static irqreturn_t qedr_irq_handler(int irq, void *handle) +{ + u16 hw_comp_cons, sw_comp_cons; + struct qedr_cnq *cnq = handle; + + qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); + + qed_sb_update_sb_idx(cnq->sb); + + hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); + + /* Align protocol-index and chain reads */ + rmb(); + + while (sw_comp_cons != hw_comp_cons) { + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); + cnq->n_comp++; + } + + qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, + sw_comp_cons); + + qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); + + return IRQ_HANDLED; +} + +static void qedr_sync_free_irqs(struct qedr_dev *dev) +{ + u32 vector; + int i; + + for (i = 0; i < dev->int_info.used_cnt; i++) { + if (dev->int_info.msix_cnt) { + vector = dev->int_info.msix[i * dev->num_hwfns].vector; + synchronize_irq(vector); + free_irq(vector, &dev->cnq_array[i]); + } + } + + dev->int_info.used_cnt = 0; +} + +static int qedr_req_msix_irqs(struct qedr_dev *dev) +{ + int i, rc = 0; + + if (dev->num_cnq > dev->int_info.msix_cnt) { + DP_ERR(dev, + "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", + dev->num_cnq, dev->int_info.msix_cnt); + return -EINVAL; + } + + for (i = 0; i < dev->num_cnq; i++) { + rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, + qedr_irq_handler, 0, dev->cnq_array[i].name, + &dev->cnq_array[i]); + if (rc) { + DP_ERR(dev, "Request cnq %d irq failed\n", i); + qedr_sync_free_irqs(dev); + } else { + DP_DEBUG(dev, QEDR_MSG_INIT, + "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", + dev->cnq_array[i].name, i, + &dev->cnq_array[i]); + dev->int_info.used_cnt++; + } + } + + return rc; +} + +static int qedr_setup_irqs(struct qedr_dev *dev) +{ + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); + + /* Learn Interrupt configuration */ + rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); + if (rc < 0) + return rc; + + rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); + if (rc) { + DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); + return rc; + } + + if (dev->int_info.msix_cnt) { + DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", + dev->int_info.msix_cnt); + rc = qedr_req_msix_irqs(dev); + if (rc) + return rc; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); + + return 0; +} + +static int qedr_set_device_attr(struct qedr_dev *dev) +{ + struct qed_rdma_device *qed_attr; + struct qedr_device_attr *attr; + u32 page_size; + + /* Part 1 - query core capabilities */ + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); + + /* Part 2 - check capabilities */ + page_size = ~dev->attr.page_size_caps + 1; + if (page_size > PAGE_SIZE) { + DP_ERR(dev, + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", + PAGE_SIZE, page_size); + return -ENODEV; + } + + /* Part 3 - copy and update capabilities */ + attr = &dev->attr; + attr->vendor_id = qed_attr->vendor_id; + attr->vendor_part_id = qed_attr->vendor_part_id; + attr->hw_ver = qed_attr->hw_ver; + attr->fw_ver = qed_attr->fw_ver; + attr->node_guid = qed_attr->node_guid; + attr->sys_image_guid = qed_attr->sys_image_guid; + attr->max_cnq = qed_attr->max_cnq; + attr->max_sge = qed_attr->max_sge; + attr->max_inline = qed_attr->max_inline; + attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); + attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); + attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; + attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; + attr->max_dev_resp_rd_atomic_resc = + qed_attr->max_dev_resp_rd_atomic_resc; + attr->max_cq = qed_attr->max_cq; + attr->max_qp = qed_attr->max_qp; + attr->max_mr = qed_attr->max_mr; + attr->max_mr_size = qed_attr->max_mr_size; + attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); + attr->max_mw = qed_attr->max_mw; + attr->max_fmr = qed_attr->max_fmr; + attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; + attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; + attr->max_pd = qed_attr->max_pd; + attr->max_ah = qed_attr->max_ah; + attr->max_pkey = qed_attr->max_pkey; + attr->max_srq = qed_attr->max_srq; + attr->max_srq_wr = qed_attr->max_srq_wr; + attr->dev_caps = qed_attr->dev_caps; + attr->page_size_caps = qed_attr->page_size_caps; + attr->dev_ack_delay = qed_attr->dev_ack_delay; + attr->reserved_lkey = qed_attr->reserved_lkey; + attr->bad_pkey_counter = qed_attr->bad_pkey_counter; + attr->max_stats_queues = qed_attr->max_stats_queues; + + return 0; +} + +static int qedr_init_hw(struct qedr_dev *dev) +{ + struct qed_rdma_add_user_out_params out_params; + struct qed_rdma_start_in_params *in_params; + struct qed_rdma_cnq_params *cur_pbl; + struct qed_rdma_events events; + dma_addr_t p_phys_table; + u32 page_cnt; + int rc = 0; + int i; + + in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); + if (!in_params) { + rc = -ENOMEM; + goto out; + } + + in_params->desired_cnq = dev->num_cnq; + for (i = 0; i < dev->num_cnq; i++) { + cur_pbl = &in_params->cnq_pbl_list[i]; + + page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); + cur_pbl->num_pbl_pages = page_cnt; + + p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); + cur_pbl->pbl_ptr = (u64)p_phys_table; + } + + events.context = dev; + + in_params->events = &events; + in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; + in_params->max_mtu = dev->ndev->mtu; + ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); + + rc = dev->ops->rdma_init(dev->cdev, in_params); + if (rc) + goto out; + + rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); + if (rc) + goto out; + + dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr; + dev->db_phys_addr = out_params.dpi_phys_addr; + dev->db_size = out_params.dpi_size; + dev->dpi = out_params.dpi; + + rc = qedr_set_device_attr(dev); +out: + kfree(in_params); + if (rc) + DP_ERR(dev, "Init HW Failed rc = %d\n", rc); + + return rc; +} + +void qedr_stop_hw(struct qedr_dev *dev) +{ + dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); + dev->ops->rdma_stop(dev->rdma_ctx); +} + static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, struct net_device *ndev) { + struct qed_dev_rdma_info dev_info; struct qedr_dev *dev; int rc = 0, i; @@ -164,21 +537,59 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, dev->ndev = ndev; dev->cdev = cdev; + qed_ops = qed_get_rdma_ops(); + if (!qed_ops) { + DP_ERR(dev, "Failed to get qed roce operations\n"); + goto init_err; + } + + dev->ops = qed_ops; + rc = qed_ops->fill_dev_info(cdev, &dev_info); + if (rc) + goto init_err; + + dev->num_hwfns = dev_info.common.num_hwfns; + dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); + + dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); + if (!dev->num_cnq) { + DP_ERR(dev, "not enough CNQ resources.\n"); + goto init_err; + } + qedr_pci_set_atomic(dev, pdev); + rc = qedr_alloc_resources(dev); + if (rc) + goto init_err; + + rc = qedr_init_hw(dev); + if (rc) + goto alloc_err; + + rc = qedr_setup_irqs(dev); + if (rc) + goto irq_err; + rc = qedr_register_device(dev); if (rc) { DP_ERR(dev, "Unable to allocate register device\n"); - goto init_err; + goto reg_err; } for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) - goto init_err; + goto reg_err; DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; +reg_err: + qedr_sync_free_irqs(dev); +irq_err: + qedr_stop_hw(dev); +alloc_err: + qedr_free_resources(dev); init_err: ib_dealloc_device(&dev->ibdev); DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); @@ -193,6 +604,9 @@ static void qedr_remove(struct qedr_dev *dev) */ qedr_remove_sysfiles(dev); + qedr_stop_hw(dev); + qedr_sync_free_irqs(dev); + qedr_free_resources(dev); ib_dealloc_device(&dev->ibdev); } diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 801417080f11..1adbebb887eb 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -35,7 +35,10 @@ #include #include #include +#include +#include #include +#include "qedr_hsi.h" #define QEDR_MODULE_VERSION "8.10.10.0" #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" @@ -47,6 +50,60 @@ #define QEDR_MSG_INIT "INIT" +struct qedr_dev; + +struct qedr_cnq { + struct qedr_dev *dev; + struct qed_chain pbl; + struct qed_sb_info *sb; + char name[32]; + u64 n_comp; + __le16 *hw_cons_ptr; + u8 index; +}; + +#define QEDR_MAX_SGID 128 + +struct qedr_device_attr { + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 fw_ver; + u64 node_guid; + u64 sys_image_guid; + u8 max_cnq; + u8 max_sge; + u16 max_inline; + u32 max_sqe; + u32 max_rqe; + u8 max_qp_resp_rd_atomic_resc; + u8 max_qp_req_rd_atomic_resc; + u64 max_dev_resp_rd_atomic_resc; + u32 max_cq; + u32 max_qp; + u32 max_mr; + u64 max_mr_size; + u32 max_cqe; + u32 max_mw; + u32 max_fmr; + u32 max_mr_mw_fmr_pbl; + u64 max_mr_mw_fmr_size; + u32 max_pd; + u32 max_ah; + u8 max_pkey; + u32 max_srq; + u32 max_srq_wr; + u8 max_srq_sge; + u8 max_stats_queues; + u32 dev_caps; + + u64 page_size_caps; + u8 dev_ack_delay; + u32 reserved_lkey; + u32 bad_pkey_counter; + struct qed_rdma_events events; +}; + struct qedr_dev { struct ib_device ibdev; struct qed_dev *cdev; @@ -55,7 +112,73 @@ struct qedr_dev { enum ib_atomic_cap atomic_cap; + void *rdma_ctx; + struct qedr_device_attr attr; + + const struct qed_rdma_ops *ops; + struct qed_int_info int_info; + + struct qed_sb_info *sb_array; + struct qedr_cnq *cnq_array; + int num_cnq; + int sb_start; + + void __iomem *db_addr; + u64 db_phys_addr; + u32 db_size; + u16 dpi; + + union ib_gid *sgid_tbl; + + /* Lock for sgid table */ + spinlock_t sgid_lock; + + u64 guid; + u32 dp_module; u8 dp_level; + u8 num_hwfns; }; + +#define QEDR_MAX_SQ_PBL (0x8000) +#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) +#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) +#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \ + QEDR_SQE_ELEMENT_SIZE) +#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ + QEDR_SQE_ELEMENT_SIZE) +#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\ + (RDMA_RING_PAGE_SIZE) / \ + (QEDR_SQE_ELEMENT_SIZE) /\ + (QEDR_MAX_SQE_ELEMENTS_PER_SQE)) +/* RQ */ +#define QEDR_MAX_RQ_PBL (0x2000) +#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) +#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) +#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) +#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \ + QEDR_RQE_ELEMENT_SIZE) +#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\ + (RDMA_RING_PAGE_SIZE) / \ + (QEDR_RQE_ELEMENT_SIZE) /\ + (QEDR_MAX_RQE_ELEMENTS_PER_RQE)) + +#define QEDR_CQE_SIZE (sizeof(union rdma_cqe)) +#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024) +#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \ + sizeof(u64)) - 1) +#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \ + (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE)) + +#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) + +#define QEDR_MAX_PORT (1) + +#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) + +static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct qedr_dev, ibdev); +} + #endif diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h new file mode 100644 index 000000000000..66d27521373f --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_hsi.h @@ -0,0 +1,56 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QED_HSI_ROCE__ +#define __QED_HSI_ROCE__ + +#include +#include +#include "qedr_hsi_rdma.h" + +/* Affiliated asynchronous events / errors enumeration */ +enum roce_async_events_type { + ROCE_ASYNC_EVENT_NONE = 0, + ROCE_ASYNC_EVENT_COMM_EST = 1, + ROCE_ASYNC_EVENT_SQ_DRAINED, + ROCE_ASYNC_EVENT_SRQ_LIMIT, + ROCE_ASYNC_EVENT_LAST_WQE_REACHED, + ROCE_ASYNC_EVENT_CQ_ERR, + ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, + ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, + ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, + ROCE_ASYNC_EVENT_SRQ_EMPTY, + MAX_ROCE_ASYNC_EVENTS_TYPE +}; + +#endif /* __QED_HSI_ROCE__ */ diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h new file mode 100644 index 000000000000..3e508fbd5e78 --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -0,0 +1,96 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QED_HSI_RDMA__ +#define __QED_HSI_RDMA__ + +#include + +/* rdma completion notification queue element */ +struct rdma_cnqe { + struct regpair cq_handle; +}; + +struct rdma_cqe_responder { + struct regpair srq_wr_id; + struct regpair qp_handle; + __le32 imm_data_or_inv_r_Key; + __le32 length; + __le32 imm_data_hi; + __le16 rq_cons; + u8 flags; +}; + +struct rdma_cqe_requester { + __le16 sq_cons; + __le16 reserved0; + __le32 reserved1; + struct regpair qp_handle; + struct regpair reserved2; + __le32 reserved3; + __le16 reserved4; + u8 flags; + u8 status; +}; + +struct rdma_cqe_common { + struct regpair reserved0; + struct regpair qp_handle; + __le16 reserved1[7]; + u8 flags; + u8 status; +}; + +/* rdma completion queue element */ +union rdma_cqe { + struct rdma_cqe_responder resp; + struct rdma_cqe_requester req; + struct rdma_cqe_common cmn; +}; + +struct rdma_sq_sge { + __le32 length; + struct regpair addr; + __le32 l_key; +}; + +struct rdma_rq_sge { + struct regpair addr; + __le32 length; + __le32 flags; +}; + +struct rdma_srq_sge { + struct regpair addr; + __le32 length; + __le32 l_key; +}; +#endif /* __QED_HSI_RDMA__ */ -- cgit v1.2.3-59-g8ed1b From ac1b36e55a5137e2f146e60be36d0cc81069feb6 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:32 +0300 Subject: qedr: Add support for user context verbs Add support for ucontext, query port, add and del gid verbs. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/Makefile | 2 +- drivers/infiniband/hw/qedr/main.c | 26 ++ drivers/infiniband/hw/qedr/qedr.h | 34 +++ drivers/infiniband/hw/qedr/verbs.c | 456 ++++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 52 ++++ include/uapi/rdma/qedr-abi.h | 53 +++++ 6 files changed, 622 insertions(+), 1 deletion(-) create mode 100644 drivers/infiniband/hw/qedr/verbs.c create mode 100644 drivers/infiniband/hw/qedr/verbs.h create mode 100644 include/uapi/rdma/qedr-abi.h (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile index 3a5b7a288281..b10f2b17ada5 100644 --- a/drivers/infiniband/hw/qedr/Makefile +++ b/drivers/infiniband/hw/qedr/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o -qedr-y := main.o +qedr-y := main.o verbs.o diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 67805941d138..55c4f565c214 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,8 @@ #include #include #include "qedr.h" +#include "verbs.h" +#include MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); MODULE_AUTHOR("QLogic Corporation"); @@ -80,6 +83,29 @@ static int qedr_register_device(struct qedr_dev *dev) memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); dev->ibdev.owner = THIS_MODULE; + dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION; + + dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | + QEDR_UVERBS(QUERY_DEVICE) | + QEDR_UVERBS(QUERY_PORT); + + dev->ibdev.phys_port_cnt = 1; + dev->ibdev.num_comp_vectors = dev->num_cnq; + dev->ibdev.node_type = RDMA_NODE_IB_CA; + + dev->ibdev.query_device = qedr_query_device; + dev->ibdev.query_port = qedr_query_port; + dev->ibdev.modify_port = qedr_modify_port; + + dev->ibdev.query_gid = qedr_query_gid; + dev->ibdev.add_gid = qedr_add_gid; + dev->ibdev.del_gid = qedr_del_gid; + + dev->ibdev.alloc_ucontext = qedr_alloc_ucontext; + dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext; + dev->ibdev.mmap = qedr_mmap; + + dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 1adbebb887eb..2091c0d6e67d 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -49,6 +49,7 @@ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__) #define QEDR_MSG_INIT "INIT" +#define QEDR_MSG_MISC "MISC" struct qedr_dev; @@ -176,6 +177,39 @@ struct qedr_dev { #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) +#define QEDR_ROCE_PKEY_MAX 1 +#define QEDR_ROCE_PKEY_TABLE_LEN 1 +#define QEDR_ROCE_PKEY_DEFAULT 0xffff + +struct qedr_ucontext { + struct ib_ucontext ibucontext; + struct qedr_dev *dev; + struct qedr_pd *pd; + u64 dpi_addr; + u64 dpi_phys_addr; + u32 dpi_size; + u16 dpi; + + struct list_head mm_head; + + /* Lock to protect mm list */ + struct mutex mm_list_lock; +}; + +struct qedr_mm { + struct { + u64 phy_addr; + unsigned long len; + } key; + struct list_head entry; +}; + +static inline +struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct qedr_ucontext, ibucontext); +} + static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) { return container_of(ibdev, struct qedr_dev, ibdev); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c new file mode 100644 index 000000000000..b9dcade1cb9f --- /dev/null +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -0,0 +1,456 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "qedr_hsi.h" +#include +#include "qedr.h" +#include "verbs.h" +#include + +int qedr_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *sgid) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + int rc = 0; + + if (!rdma_cap_roce_gid_table(ibdev, port)) + return -ENODEV; + + rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL); + if (rc == -EAGAIN) { + memcpy(sgid, &zgid, sizeof(*sgid)); + return 0; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index, + sgid->global.interface_id, sgid->global.subnet_prefix); + + return rc; +} + +int qedr_add_gid(struct ib_device *device, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +{ + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > QEDR_MAX_PORT) + return -EINVAL; + + if (!context) + return -EINVAL; + + return 0; +} + +int qedr_del_gid(struct ib_device *device, u8 port_num, + unsigned int index, void **context) +{ + if (!rdma_cap_roce_gid_table(device, port_num)) + return -EINVAL; + + if (port_num > QEDR_MAX_PORT) + return -EINVAL; + + if (!context) + return -EINVAL; + + return 0; +} + +int qedr_query_device(struct ib_device *ibdev, + struct ib_device_attr *attr, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qedr_device_attr *qattr = &dev->attr; + + if (!dev->rdma_ctx) { + DP_ERR(dev, + "qedr_query_device called with invalid params rdma_ctx=%p\n", + dev->rdma_ctx); + return -EINVAL; + } + + memset(attr, 0, sizeof(*attr)); + + attr->fw_ver = qattr->fw_ver; + attr->sys_image_guid = qattr->sys_image_guid; + attr->max_mr_size = qattr->max_mr_size; + attr->page_size_cap = qattr->page_size_caps; + attr->vendor_id = qattr->vendor_id; + attr->vendor_part_id = qattr->vendor_part_id; + attr->hw_ver = qattr->hw_ver; + attr->max_qp = qattr->max_qp; + attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe); + attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | + IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS; + + attr->max_sge = qattr->max_sge; + attr->max_sge_rd = qattr->max_sge; + attr->max_cq = qattr->max_cq; + attr->max_cqe = qattr->max_cqe; + attr->max_mr = qattr->max_mr; + attr->max_mw = qattr->max_mw; + attr->max_pd = qattr->max_pd; + attr->atomic_cap = dev->atomic_cap; + attr->max_fmr = qattr->max_fmr; + attr->max_map_per_fmr = 16; + attr->max_qp_init_rd_atom = + 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1); + attr->max_qp_rd_atom = + min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1), + attr->max_qp_init_rd_atom); + + attr->max_srq = qattr->max_srq; + attr->max_srq_sge = qattr->max_srq_sge; + attr->max_srq_wr = qattr->max_srq_wr; + + attr->local_ca_ack_delay = qattr->dev_ack_delay; + attr->max_fast_reg_page_list_len = qattr->max_mr / 8; + attr->max_pkeys = QEDR_ROCE_PKEY_MAX; + attr->max_ah = qattr->max_ah; + + return 0; +} + +#define QEDR_SPEED_SDR (1) +#define QEDR_SPEED_DDR (2) +#define QEDR_SPEED_QDR (4) +#define QEDR_SPEED_FDR10 (8) +#define QEDR_SPEED_FDR (16) +#define QEDR_SPEED_EDR (32) + +static inline void get_link_speed_and_width(int speed, u8 *ib_speed, + u8 *ib_width) +{ + switch (speed) { + case 1000: + *ib_speed = QEDR_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + break; + case 10000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_1X; + break; + + case 20000: + *ib_speed = QEDR_SPEED_DDR; + *ib_width = IB_WIDTH_4X; + break; + + case 25000: + *ib_speed = QEDR_SPEED_EDR; + *ib_width = IB_WIDTH_1X; + break; + + case 40000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_4X; + break; + + case 50000: + *ib_speed = QEDR_SPEED_QDR; + *ib_width = IB_WIDTH_4X; + break; + + case 100000: + *ib_speed = QEDR_SPEED_EDR; + *ib_width = IB_WIDTH_4X; + break; + + default: + /* Unsupported */ + *ib_speed = QEDR_SPEED_SDR; + *ib_width = IB_WIDTH_1X; + } +} + +int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) +{ + struct qedr_dev *dev; + struct qed_rdma_port *rdma_port; + + dev = get_qedr_dev(ibdev); + if (port > 1) { + DP_ERR(dev, "invalid_port=0x%x\n", port); + return -EINVAL; + } + + if (!dev->rdma_ctx) { + DP_ERR(dev, "rdma_ctx is NULL\n"); + return -EINVAL; + } + + rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx); + memset(attr, 0, sizeof(*attr)); + + if (rdma_port->port_state == QED_RDMA_PORT_UP) { + attr->state = IB_PORT_ACTIVE; + attr->phys_state = 5; + } else { + attr->state = IB_PORT_DOWN; + attr->phys_state = 3; + } + attr->max_mtu = IB_MTU_4096; + attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); + attr->lid = 0; + attr->lmc = 0; + attr->sm_lid = 0; + attr->sm_sl = 0; + attr->port_cap_flags = IB_PORT_IP_BASED_GIDS; + attr->gid_tbl_len = QEDR_MAX_SGID; + attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN; + attr->bad_pkey_cntr = rdma_port->pkey_bad_counter; + attr->qkey_viol_cntr = 0; + get_link_speed_and_width(rdma_port->link_speed, + &attr->active_speed, &attr->active_width); + attr->max_msg_sz = rdma_port->max_msg_size; + attr->max_vl_num = 4; + + return 0; +} + +int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct qedr_dev *dev; + + dev = get_qedr_dev(ibdev); + if (port > 1) { + DP_ERR(dev, "invalid_port=0x%x\n", port); + return -EINVAL; + } + + return 0; +} + +static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + struct qedr_mm *mm; + + mm = kzalloc(sizeof(*mm), GFP_KERNEL); + if (!mm) + return -ENOMEM; + + mm->key.phy_addr = phy_addr; + /* This function might be called with a length which is not a multiple + * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel + * forces this granularity by increasing the requested size if needed. + * When qedr_mmap is called, it will search the list with the updated + * length as a key. To prevent search failures, the length is rounded up + * in advance to PAGE_SIZE. + */ + mm->key.len = roundup(len, PAGE_SIZE); + INIT_LIST_HEAD(&mm->entry); + + mutex_lock(&uctx->mm_list_lock); + list_add(&mm->entry, &uctx->mm_head); + mutex_unlock(&uctx->mm_list_lock); + + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "added (addr=0x%llx,len=0x%lx) for ctx=%p\n", + (unsigned long long)mm->key.phy_addr, + (unsigned long)mm->key.len, uctx); + + return 0; +} + +static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr, + unsigned long len) +{ + bool found = false; + struct qedr_mm *mm; + + mutex_lock(&uctx->mm_list_lock); + list_for_each_entry(mm, &uctx->mm_head, entry) { + if (len != mm->key.len || phy_addr != mm->key.phy_addr) + continue; + + found = true; + break; + } + mutex_unlock(&uctx->mm_list_lock); + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n", + mm->key.phy_addr, mm->key.len, uctx, found); + + return found; +} + +struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + int rc; + struct qedr_ucontext *ctx; + struct qedr_alloc_ucontext_resp uresp; + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qed_rdma_add_user_out_params oparams; + + if (!udata) + return ERR_PTR(-EFAULT); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams); + if (rc) { + DP_ERR(dev, + "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n", + rc); + goto err; + } + + ctx->dpi = oparams.dpi; + ctx->dpi_addr = oparams.dpi_addr; + ctx->dpi_phys_addr = oparams.dpi_phys_addr; + ctx->dpi_size = oparams.dpi_size; + INIT_LIST_HEAD(&ctx->mm_head); + mutex_init(&ctx->mm_list_lock); + + memset(&uresp, 0, sizeof(uresp)); + + uresp.db_pa = ctx->dpi_phys_addr; + uresp.db_size = ctx->dpi_size; + uresp.max_send_wr = dev->attr.max_sqe; + uresp.max_recv_wr = dev->attr.max_rqe; + uresp.max_srq_wr = dev->attr.max_srq_wr; + uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE; + uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE; + uresp.sges_per_srq_wr = dev->attr.max_srq_sge; + uresp.max_cqes = QEDR_MAX_CQES; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + goto err; + + ctx->dev = dev; + + rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size); + if (rc) + goto err; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n", + &ctx->ibucontext); + return &ctx->ibucontext; + +err: + kfree(ctx); + return ERR_PTR(rc); +} + +int qedr_dealloc_ucontext(struct ib_ucontext *ibctx) +{ + struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx); + struct qedr_mm *mm, *tmp; + int status = 0; + + DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n", + uctx); + uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi); + + list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { + DP_DEBUG(uctx->dev, QEDR_MSG_MISC, + "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n", + mm->key.phy_addr, mm->key.len, uctx); + list_del(&mm->entry); + kfree(mm); + } + + kfree(uctx); + return status; +} + +int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct qedr_ucontext *ucontext = get_qedr_ucontext(context); + struct qedr_dev *dev = get_qedr_dev(context->device); + unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; + u64 unmapped_db = dev->db_phys_addr; + unsigned long len = (vma->vm_end - vma->vm_start); + int rc = 0; + bool found; + + DP_DEBUG(dev, QEDR_MSG_INIT, + "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", + vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); + if (vma->vm_start & (PAGE_SIZE - 1)) { + DP_ERR(dev, "Vma_start not page aligned = %ld\n", + vma->vm_start); + return -EINVAL; + } + + found = qedr_search_mmap(ucontext, vm_page, len); + if (!found) { + DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n", + vma->vm_pgoff); + return -EINVAL; + } + + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); + + if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + + dev->db_size))) { + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); + if (vma->vm_flags & VM_READ) { + DP_ERR(dev, "Trying to map doorbell bar for read\n"); + return -EPERM; + } + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + PAGE_SIZE, vma->vm_page_prot); + } else { + DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n"); + rc = remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff, len, vma->vm_page_prot); + } + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); + return rc; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h new file mode 100644 index 000000000000..9472044dd587 --- /dev/null +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -0,0 +1,52 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_VERBS_H__ +#define __QEDR_VERBS_H__ + +int qedr_query_device(struct ib_device *ibdev, + struct ib_device_attr *attr, struct ib_udata *udata); +int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); +int qedr_modify_port(struct ib_device *, u8 port, int mask, + struct ib_port_modify *props); + +int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); + +struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); +int qedr_dealloc_ucontext(struct ib_ucontext *); + +int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); +int qedr_del_gid(struct ib_device *device, u8 port_num, + unsigned int index, void **context); +int qedr_add_gid(struct ib_device *device, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context); +#endif diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h new file mode 100644 index 000000000000..f7c7fff7a877 --- /dev/null +++ b/include/uapi/rdma/qedr-abi.h @@ -0,0 +1,53 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_USER_H__ +#define __QEDR_USER_H__ + +#include + +#define QEDR_ABI_VERSION (8) + +/* user kernel communication data structures. */ + +struct qedr_alloc_ucontext_resp { + __u64 db_pa; + __u32 db_size; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_srq_wr; + __u32 sges_per_send_wr; + __u32 sges_per_recv_wr; + __u32 sges_per_srq_wr; + __u32 max_cqes; +}; +#endif /* __QEDR_USER_H__ */ -- cgit v1.2.3-59-g8ed1b From a7efd7773e31b60f695816c27393fc717a9df127 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:33 +0300 Subject: qedr: Add support for PD,PKEY and CQ verbs Add support for protection domain and completion queue verbs. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 49 ++- drivers/infiniband/hw/qedr/qedr.h | 78 +++++ drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 79 +++++ drivers/infiniband/hw/qedr/verbs.c | 539 +++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 14 + include/uapi/rdma/qedr-abi.h | 19 + 6 files changed, 777 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 55c4f565c214..35928abb6b63 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -87,7 +87,14 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | QEDR_UVERBS(QUERY_DEVICE) | - QEDR_UVERBS(QUERY_PORT); + QEDR_UVERBS(QUERY_PORT) | + QEDR_UVERBS(ALLOC_PD) | + QEDR_UVERBS(DEALLOC_PD) | + QEDR_UVERBS(CREATE_COMP_CHANNEL) | + QEDR_UVERBS(CREATE_CQ) | + QEDR_UVERBS(RESIZE_CQ) | + QEDR_UVERBS(DESTROY_CQ) | + QEDR_UVERBS(REQ_NOTIFY_CQ); dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->num_cnq; @@ -105,6 +112,16 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext; dev->ibdev.mmap = qedr_mmap; + dev->ibdev.alloc_pd = qedr_alloc_pd; + dev->ibdev.dealloc_pd = qedr_dealloc_pd; + + dev->ibdev.create_cq = qedr_create_cq; + dev->ibdev.destroy_cq = qedr_destroy_cq; + dev->ibdev.resize_cq = qedr_resize_cq; + dev->ibdev.req_notify_cq = qedr_arm_cq; + + dev->ibdev.query_pkey = qedr_query_pkey; + dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; @@ -322,6 +339,8 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) { u16 hw_comp_cons, sw_comp_cons; struct qedr_cnq *cnq = handle; + struct regpair *cq_handle; + struct qedr_cq *cq; qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); @@ -334,8 +353,36 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) rmb(); while (sw_comp_cons != hw_comp_cons) { + cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); + cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, + cq_handle->lo); + + if (cq == NULL) { + DP_ERR(cnq->dev, + "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", + cq_handle->hi, cq_handle->lo, sw_comp_cons, + hw_comp_cons); + + break; + } + + if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { + DP_ERR(cnq->dev, + "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", + cq_handle->hi, cq_handle->lo, cq); + break; + } + + cq->arm_flags = 0; + + if (cq->ibcq.comp_handler) + (*cq->ibcq.comp_handler) + (&cq->ibcq, cq->ibcq.cq_context); + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); + cnq->n_comp++; + } qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 2091c0d6e67d..9e2846a599ce 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -50,6 +50,10 @@ #define QEDR_MSG_INIT "INIT" #define QEDR_MSG_MISC "MISC" +#define QEDR_MSG_CQ " CQ" +#define QEDR_MSG_MR " MR" + +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) struct qedr_dev; @@ -181,6 +185,12 @@ struct qedr_dev { #define QEDR_ROCE_PKEY_TABLE_LEN 1 #define QEDR_ROCE_PKEY_DEFAULT 0xffff +struct qedr_pbl { + struct list_head list_entry; + void *va; + dma_addr_t pa; +}; + struct qedr_ucontext { struct ib_ucontext ibucontext; struct qedr_dev *dev; @@ -196,6 +206,64 @@ struct qedr_ucontext { struct mutex mm_list_lock; }; +union db_prod64 { + struct rdma_pwm_val32_data data; + u64 raw; +}; + +enum qedr_cq_type { + QEDR_CQ_TYPE_GSI, + QEDR_CQ_TYPE_KERNEL, + QEDR_CQ_TYPE_USER, +}; + +struct qedr_pbl_info { + u32 num_pbls; + u32 num_pbes; + u32 pbl_size; + u32 pbe_size; + bool two_layered; +}; + +struct qedr_userq { + struct ib_umem *umem; + struct qedr_pbl_info pbl_info; + struct qedr_pbl *pbl_tbl; + u64 buf_addr; + size_t buf_len; +}; + +struct qedr_cq { + struct ib_cq ibcq; + + enum qedr_cq_type cq_type; + u32 sig; + + u16 icid; + + /* Lock to protect multiplem CQ's */ + spinlock_t cq_lock; + u8 arm_flags; + struct qed_chain pbl; + + void __iomem *db_addr; + union db_prod64 db; + + u8 pbl_toggle; + union rdma_cqe *latest_cqe; + union rdma_cqe *toggle_cqe; + + u32 cq_cons; + + struct qedr_userq q; +}; + +struct qedr_pd { + struct ib_pd ibpd; + u32 pd_id; + struct qedr_ucontext *uctx; +}; + struct qedr_mm { struct { u64 phy_addr; @@ -215,4 +283,14 @@ static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) return container_of(ibdev, struct qedr_dev, ibdev); } +static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct qedr_pd, ibpd); +} + +static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct qedr_cq, ibcq); +} + #endif diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index 3e508fbd5e78..84f6520107cc 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -47,6 +47,19 @@ struct rdma_cqe_responder { __le32 imm_data_hi; __le16 rq_cons; u8 flags; +#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3 +#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1 +#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3 +#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4 +#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1 +#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5 +#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3 +#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6 + u8 status; }; struct rdma_cqe_requester { @@ -58,6 +71,12 @@ struct rdma_cqe_requester { __le32 reserved3; __le16 reserved4; u8 flags; +#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3 +#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1 +#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F +#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3 u8 status; }; @@ -66,6 +85,12 @@ struct rdma_cqe_common { struct regpair qp_handle; __le16 reserved1[7]; u8 flags; +#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1 +#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0 +#define RDMA_CQE_COMMON_TYPE_MASK 0x3 +#define RDMA_CQE_COMMON_TYPE_SHIFT 1 +#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F +#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3 u8 status; }; @@ -76,6 +101,45 @@ union rdma_cqe { struct rdma_cqe_common cmn; }; +/* * CQE requester status enumeration */ +enum rdma_cqe_requester_status_enum { + RDMA_CQE_REQ_STS_OK, + RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR, + RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR, + RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR, + RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR, + RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR, + RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR, + RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR, + RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR, + RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR, + RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, + RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, + MAX_RDMA_CQE_REQUESTER_STATUS_ENUM +}; + +/* CQE responder status enumeration */ +enum rdma_cqe_responder_status_enum { + RDMA_CQE_RESP_STS_OK, + RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR, + RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR, + RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR, + RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR, + RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR, + RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR, + RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR, + MAX_RDMA_CQE_RESPONDER_STATUS_ENUM +}; + +/* CQE type enumeration */ +enum rdma_cqe_type { + RDMA_CQE_TYPE_REQUESTER, + RDMA_CQE_TYPE_RESPONDER_RQ, + RDMA_CQE_TYPE_RESPONDER_SRQ, + RDMA_CQE_TYPE_INVALID, + MAX_RDMA_CQE_TYPE +}; + struct rdma_sq_sge { __le32 length; struct regpair addr; @@ -93,4 +157,19 @@ struct rdma_srq_sge { __le32 length; __le32 l_key; }; + +/* Rdma doorbell data for CQ */ +struct rdma_pwm_val32_data { + __le16 icid; + u8 agg_flags; + u8 params; +#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 +#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F +#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 + __le32 value; +}; + #endif /* __QED_HSI_RDMA__ */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b9dcade1cb9f..b525c6cf1df0 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -49,6 +49,17 @@ #include "verbs.h" #include +#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) + +int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +{ + if (index > QEDR_ROCE_PKEY_TABLE_LEN) + return -EINVAL; + + *pkey = QEDR_ROCE_PKEY_DEFAULT; + return 0; +} + int qedr_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *sgid) { @@ -454,3 +465,531 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); return rc; } + +struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qedr_ucontext *uctx = NULL; + struct qedr_alloc_pd_uresp uresp; + struct qedr_pd *pd; + u16 pd_id; + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n", + (udata && context) ? "User Lib" : "Kernel"); + + if (!dev->rdma_ctx) { + DP_ERR(dev, "invlaid RDMA context\n"); + return ERR_PTR(-EINVAL); + } + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + + uresp.pd_id = pd_id; + pd->pd_id = pd_id; + + if (udata && context) { + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); + uctx = get_qedr_ucontext(context); + uctx->pd = pd; + pd->uctx = uctx; + } + + return &pd->ibpd; +} + +int qedr_dealloc_pd(struct ib_pd *ibpd) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_pd *pd = get_qedr_pd(ibpd); + + if (!pd) + pr_err("Invalid PD received in dealloc_pd\n"); + + DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); + + kfree(pd); + + return 0; +} + +static void qedr_free_pbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl) +{ + struct pci_dev *pdev = dev->pdev; + int i; + + for (i = 0; i < pbl_info->num_pbls; i++) { + if (!pbl[i].va) + continue; + dma_free_coherent(&pdev->dev, pbl_info->pbl_size, + pbl[i].va, pbl[i].pa); + } + + kfree(pbl); +} + +#define MIN_FW_PBL_PAGE_SIZE (4 * 1024) +#define MAX_FW_PBL_PAGE_SIZE (64 * 1024) + +#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64)) +#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE) +#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE) + +static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, + gfp_t flags) +{ + struct pci_dev *pdev = dev->pdev; + struct qedr_pbl *pbl_table; + dma_addr_t *pbl_main_tbl; + dma_addr_t pa; + void *va; + int i; + + pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags); + if (!pbl_table) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < pbl_info->num_pbls; i++) { + va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, + &pa, flags); + if (!va) + goto err; + + memset(va, 0, pbl_info->pbl_size); + pbl_table[i].va = va; + pbl_table[i].pa = pa; + } + + /* Two-Layer PBLs, if we have more than one pbl we need to initialize + * the first one with physical pointers to all of the rest + */ + pbl_main_tbl = (dma_addr_t *)pbl_table[0].va; + for (i = 0; i < pbl_info->num_pbls - 1; i++) + pbl_main_tbl[i] = pbl_table[i + 1].pa; + + return pbl_table; + +err: + for (i--; i >= 0; i--) + dma_free_coherent(&pdev->dev, pbl_info->pbl_size, + pbl_table[i].va, pbl_table[i].pa); + + qedr_free_pbl(dev, pbl_info, pbl_table); + + return ERR_PTR(-ENOMEM); +} + +static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, + struct qedr_pbl_info *pbl_info, + u32 num_pbes, int two_layer_capable) +{ + u32 pbl_capacity; + u32 pbl_size; + u32 num_pbls; + + if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) { + if (num_pbes > MAX_PBES_TWO_LAYER) { + DP_ERR(dev, "prepare pbl table: too many pages %d\n", + num_pbes); + return -EINVAL; + } + + /* calculate required pbl page size */ + pbl_size = MIN_FW_PBL_PAGE_SIZE; + pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) * + NUM_PBES_ON_PAGE(pbl_size); + + while (pbl_capacity < num_pbes) { + pbl_size *= 2; + pbl_capacity = pbl_size / sizeof(u64); + pbl_capacity = pbl_capacity * pbl_capacity; + } + + num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size)); + num_pbls++; /* One for the layer0 ( points to the pbls) */ + pbl_info->two_layered = true; + } else { + /* One layered PBL */ + num_pbls = 1; + pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, + roundup_pow_of_two((num_pbes * sizeof(u64)))); + pbl_info->two_layered = false; + } + + pbl_info->num_pbls = num_pbls; + pbl_info->pbl_size = pbl_size; + pbl_info->num_pbes = num_pbes; + + DP_DEBUG(dev, QEDR_MSG_MR, + "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n", + pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size); + + return 0; +} + +static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, + struct qedr_pbl *pbl, + struct qedr_pbl_info *pbl_info) +{ + int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + struct qedr_pbl *pbl_tbl; + struct scatterlist *sg; + struct regpair *pbe; + int entry; + u32 addr; + + if (!pbl_info->num_pbes) + return; + + /* If we have a two layered pbl, the first pbl points to the rest + * of the pbls and the first entry lays on the second pbl in the table + */ + if (pbl_info->two_layered) + pbl_tbl = &pbl[1]; + else + pbl_tbl = pbl; + + pbe = (struct regpair *)pbl_tbl->va; + if (!pbe) { + DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n"); + return; + } + + pbe_cnt = 0; + + shift = ilog2(umem->page_size); + + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { + pages = sg_dma_len(sg) >> shift; + for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { + /* store the page address in pbe */ + pbe->lo = cpu_to_le32(sg_dma_address(sg) + + umem->page_size * pg_cnt); + addr = upper_32_bits(sg_dma_address(sg) + + umem->page_size * pg_cnt); + pbe->hi = cpu_to_le32(addr); + pbe_cnt++; + total_num_pbes++; + pbe++; + + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + } + } +} + +static int qedr_copy_cq_uresp(struct qedr_dev *dev, + struct qedr_cq *cq, struct ib_udata *udata) +{ + struct qedr_create_cq_uresp uresp; + int rc; + + memset(&uresp, 0, sizeof(uresp)); + + uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); + uresp.icid = cq->icid; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); + + return rc; +} + +static void consume_cqe(struct qedr_cq *cq) +{ + if (cq->latest_cqe == cq->toggle_cqe) + cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; + + cq->latest_cqe = qed_chain_consume(&cq->pbl); +} + +static inline int qedr_align_cq_entries(int entries) +{ + u64 size, aligned_size; + + /* We allocate an extra entry that we don't report to the FW. */ + size = (entries + 1) * QEDR_CQE_SIZE; + aligned_size = ALIGN(size, PAGE_SIZE); + + return aligned_size / QEDR_CQE_SIZE; +} + +static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, + struct qedr_dev *dev, + struct qedr_userq *q, + u64 buf_addr, size_t buf_len, + int access, int dmasync) +{ + int page_cnt; + int rc; + + q->buf_addr = buf_addr; + q->buf_len = buf_len; + q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync); + if (IS_ERR(q->umem)) { + DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", + PTR_ERR(q->umem)); + return PTR_ERR(q->umem); + } + + page_cnt = ib_umem_page_count(q->umem); + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + if (rc) + goto err0; + + q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL); + if (IS_ERR_OR_NULL(q->pbl_tbl)) + goto err0; + + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + + return 0; + +err0: + ib_umem_release(q->umem); + + return rc; +} + +static inline void qedr_init_cq_params(struct qedr_cq *cq, + struct qedr_ucontext *ctx, + struct qedr_dev *dev, int vector, + int chain_entries, int page_cnt, + u64 pbl_ptr, + struct qed_rdma_create_cq_in_params + *params) +{ + memset(params, 0, sizeof(*params)); + params->cq_handle_hi = upper_32_bits((uintptr_t)cq); + params->cq_handle_lo = lower_32_bits((uintptr_t)cq); + params->cnq_id = vector; + params->cq_size = chain_entries - 1; + params->dpi = (ctx) ? ctx->dpi : dev->dpi; + params->pbl_num_pages = page_cnt; + params->pbl_ptr = pbl_ptr; + params->pbl_two_level = 0; +} + +static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) +{ + /* Flush data before signalling doorbell */ + wmb(); + cq->db.data.agg_flags = flags; + cq->db.data.value = cpu_to_le32(cons); + writeq(cq->db.raw, cq->db_addr); + + /* Make sure write would stick */ + mmiowb(); +} + +int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + struct qedr_cq *cq = get_qedr_cq(ibcq); + unsigned long sflags; + + if (cq->cq_type == QEDR_CQ_TYPE_GSI) + return 0; + + spin_lock_irqsave(&cq->cq_lock, sflags); + + cq->arm_flags = 0; + + if (flags & IB_CQ_SOLICITED) + cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD; + + if (flags & IB_CQ_NEXT_COMP) + cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD; + + doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); + + spin_unlock_irqrestore(&cq->cq_lock, sflags); + + return 0; +} + +struct ib_cq *qedr_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_ctx, struct ib_udata *udata) +{ + struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx); + struct qed_rdma_destroy_cq_out_params destroy_oparams; + struct qed_rdma_destroy_cq_in_params destroy_iparams; + struct qedr_dev *dev = get_qedr_dev(ibdev); + struct qed_rdma_create_cq_in_params params; + struct qedr_create_cq_ureq ureq; + int vector = attr->comp_vector; + int entries = attr->cqe; + struct qedr_cq *cq; + int chain_entries; + int page_cnt; + u64 pbl_ptr; + u16 icid; + int rc; + + DP_DEBUG(dev, QEDR_MSG_INIT, + "create_cq: called from %s. entries=%d, vector=%d\n", + udata ? "User Lib" : "Kernel", entries, vector); + + if (entries > QEDR_MAX_CQES) { + DP_ERR(dev, + "create cq: the number of entries %d is too high. Must be equal or below %d.\n", + entries, QEDR_MAX_CQES); + return ERR_PTR(-EINVAL); + } + + chain_entries = qedr_align_cq_entries(entries); + chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + if (udata) { + memset(&ureq, 0, sizeof(ureq)); + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + DP_ERR(dev, + "create cq: problem copying data from user space\n"); + goto err0; + } + + if (!ureq.len) { + DP_ERR(dev, + "create cq: cannot create a cq with 0 entries\n"); + goto err0; + } + + cq->cq_type = QEDR_CQ_TYPE_USER; + + rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr, + ureq.len, IB_ACCESS_LOCAL_WRITE, 1); + if (rc) + goto err0; + + pbl_ptr = cq->q.pbl_tbl->pa; + page_cnt = cq->q.pbl_info.num_pbes; + } else { + cq->cq_type = QEDR_CQ_TYPE_KERNEL; + + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + chain_entries, + sizeof(union rdma_cqe), + &cq->pbl); + if (rc) + goto err1; + + page_cnt = qed_chain_get_page_cnt(&cq->pbl); + pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl); + } + + qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt, + pbl_ptr, ¶ms); + + rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid); + if (rc) + goto err2; + + cq->icid = icid; + cq->sig = QEDR_CQ_MAGIC_NUMBER; + spin_lock_init(&cq->cq_lock); + + if (ib_ctx) { + rc = qedr_copy_cq_uresp(dev, cq, udata); + if (rc) + goto err3; + } else { + /* Generate doorbell address. */ + cq->db_addr = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); + cq->db.data.icid = cq->icid; + cq->db.data.params = DB_AGG_CMD_SET << + RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT; + + /* point to the very last element, passing it we will toggle */ + cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl); + cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; + cq->latest_cqe = NULL; + consume_cqe(cq); + cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + } + + DP_DEBUG(dev, QEDR_MSG_CQ, + "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n", + cq->icid, cq, params.cq_size); + + return &cq->ibcq; + +err3: + destroy_iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, + &destroy_oparams); +err2: + if (udata) + qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); + else + dev->ops->common->chain_free(dev->cdev, &cq->pbl); +err1: + if (udata) + ib_umem_release(cq->q.umem); +err0: + kfree(cq); + return ERR_PTR(-EINVAL); +} + +int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + + DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq); + + return 0; +} + +int qedr_destroy_cq(struct ib_cq *ibcq) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qed_rdma_destroy_cq_out_params oparams; + struct qed_rdma_destroy_cq_in_params iparams; + struct qedr_cq *cq = get_qedr_cq(ibcq); + + DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid); + + /* GSIs CQs are handled by driver, so they don't exist in the FW */ + if (cq->cq_type != QEDR_CQ_TYPE_GSI) { + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + } + + if (ibcq->uobject && ibcq->uobject->context) { + qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); + ib_umem_release(cq->q.umem); + } + + kfree(cq); + + return 0; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 9472044dd587..36c8a692f740 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -40,6 +40,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask, int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); +int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); + struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); int qedr_dealloc_ucontext(struct ib_ucontext *); @@ -49,4 +51,16 @@ int qedr_del_gid(struct ib_device *device, u8 port_num, int qedr_add_gid(struct ib_device *device, u8 port_num, unsigned int index, const union ib_gid *gid, const struct ib_gid_attr *attr, void **context); +struct ib_pd *qedr_alloc_pd(struct ib_device *, + struct ib_ucontext *, struct ib_udata *); +int qedr_dealloc_pd(struct ib_pd *pd); + +struct ib_cq *qedr_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_ctx, + struct ib_udata *udata); +int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); +int qedr_destroy_cq(struct ib_cq *); +int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); + #endif diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index f7c7fff7a877..b0fc5f2125e0 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -50,4 +50,23 @@ struct qedr_alloc_ucontext_resp { __u32 sges_per_srq_wr; __u32 max_cqes; }; + +struct qedr_alloc_pd_ureq { + __u64 rsvd1; +}; + +struct qedr_alloc_pd_uresp { + __u32 pd_id; +}; + +struct qedr_create_cq_ureq { + __u64 addr; + __u64 len; +}; + +struct qedr_create_cq_uresp { + __u32 db_offset; + __u16 icid; +}; + #endif /* __QEDR_USER_H__ */ -- cgit v1.2.3-59-g8ed1b From cecbcddf6461a11ce229e80bb3981415220c9763 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:34 +0300 Subject: qedr: Add support for QP verbs Add support for Queue Pair verbs which adds, deletes, modifies and queries Queue Pairs. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 15 +- drivers/infiniband/hw/qedr/qedr.h | 125 ++++ drivers/infiniband/hw/qedr/qedr_cm.h | 40 + drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 11 + drivers/infiniband/hw/qedr/verbs.c | 1089 ++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 7 + include/uapi/rdma/qedr-abi.h | 34 + 7 files changed, 1320 insertions(+), 1 deletion(-) create mode 100644 drivers/infiniband/hw/qedr/qedr_cm.h (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 35928abb6b63..13ba47b7b99f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -48,6 +48,8 @@ MODULE_AUTHOR("QLogic Corporation"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(QEDR_MODULE_VERSION); +#define QEDR_WQ_MULTIPLIER_DFT (3) + void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, enum ib_event_type type) { @@ -94,7 +96,11 @@ static int qedr_register_device(struct qedr_dev *dev) QEDR_UVERBS(CREATE_CQ) | QEDR_UVERBS(RESIZE_CQ) | QEDR_UVERBS(DESTROY_CQ) | - QEDR_UVERBS(REQ_NOTIFY_CQ); + QEDR_UVERBS(REQ_NOTIFY_CQ) | + QEDR_UVERBS(CREATE_QP) | + QEDR_UVERBS(MODIFY_QP) | + QEDR_UVERBS(QUERY_QP) | + QEDR_UVERBS(DESTROY_QP); dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->num_cnq; @@ -120,6 +126,11 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.resize_cq = qedr_resize_cq; dev->ibdev.req_notify_cq = qedr_arm_cq; + dev->ibdev.create_qp = qedr_create_qp; + dev->ibdev.modify_qp = qedr_modify_qp; + dev->ibdev.query_qp = qedr_query_qp; + dev->ibdev.destroy_qp = qedr_destroy_qp; + dev->ibdev.query_pkey = qedr_query_pkey; dev->ibdev.dma_device = &dev->pdev->dev; @@ -630,6 +641,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, goto init_err; } + dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; + qedr_pci_set_atomic(dev, pdev); rc = qedr_alloc_resources(dev); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 9e2846a599ce..e9fe941c48ad 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -52,6 +52,9 @@ #define QEDR_MSG_MISC "MISC" #define QEDR_MSG_CQ " CQ" #define QEDR_MSG_MR " MR" +#define QEDR_MSG_RQ " RQ" +#define QEDR_MSG_SQ " SQ" +#define QEDR_MSG_QP " QP" #define QEDR_CQ_MAGIC_NUMBER (0x11223344) @@ -143,6 +146,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; + uint wq_multiplier; + }; #define QEDR_MAX_SQ_PBL (0x8000) @@ -272,6 +277,122 @@ struct qedr_mm { struct list_head entry; }; +union db_prod32 { + struct rdma_pwm_val16_data data; + u32 raw; +}; + +struct qedr_qp_hwq_info { + /* WQE Elements */ + struct qed_chain pbl; + u64 p_phys_addr_tbl; + u32 max_sges; + + /* WQE */ + u16 prod; + u16 cons; + u16 wqe_cons; + u16 max_wr; + + /* DB */ + void __iomem *db; + union db_prod32 db_data; +}; + +#define QEDR_INC_SW_IDX(p_info, index) \ + do { \ + p_info->index = (p_info->index + 1) & \ + qed_chain_get_capacity(p_info->pbl) \ + } while (0) + +enum qedr_qp_err_bitmap { + QEDR_QP_ERR_SQ_FULL = 1, + QEDR_QP_ERR_RQ_FULL = 2, + QEDR_QP_ERR_BAD_SR = 4, + QEDR_QP_ERR_BAD_RR = 8, + QEDR_QP_ERR_SQ_PBL_FULL = 16, + QEDR_QP_ERR_RQ_PBL_FULL = 32, +}; + +struct qedr_qp { + struct ib_qp ibqp; /* must be first */ + struct qedr_dev *dev; + + struct qedr_qp_hwq_info sq; + struct qedr_qp_hwq_info rq; + + u32 max_inline_data; + + /* Lock for QP's */ + spinlock_t q_lock; + struct qedr_cq *sq_cq; + struct qedr_cq *rq_cq; + struct qedr_srq *srq; + enum qed_roce_qp_state state; + u32 id; + struct qedr_pd *pd; + enum ib_qp_type qp_type; + struct qed_rdma_qp *qed_qp; + u32 qp_id; + u16 icid; + u16 mtu; + int sgid_idx; + u32 rq_psn; + u32 sq_psn; + u32 qkey; + u32 dest_qp_num; + + /* Relevant to qps created from kernel space only (ULPs) */ + u8 prev_wqe_size; + u16 wqe_cons; + u32 err_bitmap; + bool signaled; + + /* SQ shadow */ + struct { + u64 wr_id; + enum ib_wc_opcode opcode; + u32 bytes_len; + u8 wqe_size; + bool signaled; + dma_addr_t icrc_mapping; + u32 *icrc; + struct qedr_mr *mr; + } *wqe_wr_id; + + /* RQ shadow */ + struct { + u64 wr_id; + struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; + u8 wqe_size; + + u16 vlan_id; + int rc; + } *rqe_wr_id; + + /* Relevant to qps created from user space only (applications) */ + struct qedr_userq usq; + struct qedr_userq urq; +}; + +static inline int qedr_get_dmac(struct qedr_dev *dev, + struct ib_ah_attr *ah_attr, u8 *mac_addr) +{ + union ib_gid zero_sgid = { { 0 } }; + struct in6_addr in6; + + if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) { + DP_ERR(dev, "Local port GID not supported\n"); + eth_zero_addr(mac_addr); + return -EINVAL; + } + + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + ether_addr_copy(mac_addr, ah_attr->dmac); + + return 0; +} + static inline struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) { @@ -293,4 +414,8 @@ static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq) return container_of(ibcq, struct qedr_cq, ibcq); } +static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct qedr_qp, ibqp); +} #endif diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h new file mode 100644 index 000000000000..b8a8b76d77b8 --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_cm.h @@ -0,0 +1,40 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef LINUX_QEDR_CM_H_ +#define LINUX_QEDR_CM_H_ + +static inline u32 qedr_get_ipv4_from_gid(u8 *gid) +{ + return *(u32 *)(void *)&gid[12]; +} + +#endif diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index 84f6520107cc..47705598eec6 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -158,6 +158,17 @@ struct rdma_srq_sge { __le32 l_key; }; +/* Rdma doorbell data for SQ and RQ */ +struct rdma_pwm_val16_data { + __le16 icid; + __le16 value; +}; + +union rdma_pwm_val16_data_union { + struct rdma_pwm_val16_data as_struct; + __le32 as_dword; +}; + /* Rdma doorbell data for CQ */ struct rdma_pwm_val32_data { __le16 icid; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b525c6cf1df0..a0d1c5fffb63 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -48,6 +48,7 @@ #include "qedr.h" #include "verbs.h" #include +#include "qedr_cm.h" #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) @@ -993,3 +994,1091 @@ int qedr_destroy_cq(struct ib_cq *ibcq) return 0; } + +static inline int get_gid_info_from_table(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask, + struct qed_rdma_modify_qp_in_params + *qp_params) +{ + enum rdma_network_type nw_type; + struct ib_gid_attr gid_attr; + union ib_gid gid; + u32 ipv4_addr; + int rc = 0; + int i; + + rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num, + attr->ah_attr.grh.sgid_index, &gid, &gid_attr); + if (rc) + return rc; + + if (!memcmp(&gid, &zgid, sizeof(gid))) + return -ENOENT; + + if (gid_attr.ndev) { + qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev); + + dev_put(gid_attr.ndev); + nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid); + switch (nw_type) { + case RDMA_NETWORK_IPV6: + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], + sizeof(qp_params->sgid)); + memcpy(&qp_params->dgid.bytes[0], + &attr->ah_attr.grh.dgid, + sizeof(qp_params->dgid)); + qp_params->roce_mode = ROCE_V2_IPV6; + SET_FIELD(qp_params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); + break; + case RDMA_NETWORK_IB: + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], + sizeof(qp_params->sgid)); + memcpy(&qp_params->dgid.bytes[0], + &attr->ah_attr.grh.dgid, + sizeof(qp_params->dgid)); + qp_params->roce_mode = ROCE_V1; + break; + case RDMA_NETWORK_IPV4: + memset(&qp_params->sgid, 0, sizeof(qp_params->sgid)); + memset(&qp_params->dgid, 0, sizeof(qp_params->dgid)); + ipv4_addr = qedr_get_ipv4_from_gid(gid.raw); + qp_params->sgid.ipv4_addr = ipv4_addr; + ipv4_addr = + qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw); + qp_params->dgid.ipv4_addr = ipv4_addr; + SET_FIELD(qp_params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); + qp_params->roce_mode = ROCE_V2_IPV4; + break; + } + } + + for (i = 0; i < 4; i++) { + qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]); + qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]); + } + + if (qp_params->vlan_id >= VLAN_CFI_MASK) + qp_params->vlan_id = 0; + + return 0; +} + +static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); + ib_umem_release(qp->usq.umem); +} + +static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); + ib_umem_release(qp->urq.umem); +} + +static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); + kfree(qp->wqe_wr_id); +} + +static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); + kfree(qp->rqe_wr_id); +} + +static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + struct qedr_device_attr *qattr = &dev->attr; + + /* QP0... attrs->qp_type == IB_QPT_GSI */ + if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) { + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: unsupported qp type=0x%x requested\n", + attrs->qp_type); + return -EINVAL; + } + + if (attrs->cap.max_send_wr > qattr->max_sqe) { + DP_ERR(dev, + "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n", + attrs->cap.max_send_wr, qattr->max_sqe); + return -EINVAL; + } + + if (attrs->cap.max_inline_data > qattr->max_inline) { + DP_ERR(dev, + "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n", + attrs->cap.max_inline_data, qattr->max_inline); + return -EINVAL; + } + + if (attrs->cap.max_send_sge > qattr->max_sge) { + DP_ERR(dev, + "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n", + attrs->cap.max_send_sge, qattr->max_sge); + return -EINVAL; + } + + if (attrs->cap.max_recv_sge > qattr->max_sge) { + DP_ERR(dev, + "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n", + attrs->cap.max_recv_sge, qattr->max_sge); + return -EINVAL; + } + + /* Unprivileged user space cannot create special QP */ + if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { + DP_ERR(dev, + "create qp: userspace can't create special QPs of type=0x%x\n", + attrs->qp_type); + return -EINVAL; + } + + return 0; +} + +static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp, + struct qedr_qp *qp) +{ + uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); + uresp->rq_icid = qp->icid; +} + +static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp, + struct qedr_qp *qp) +{ + uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); + uresp->sq_icid = qp->icid + 1; +} + +static int qedr_copy_qp_uresp(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_udata *udata) +{ + struct qedr_create_qp_uresp uresp; + int rc; + + memset(&uresp, 0, sizeof(uresp)); + qedr_copy_sq_uresp(&uresp, qp); + qedr_copy_rq_uresp(&uresp, qp); + + uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; + uresp.qp_id = qp->qp_id; + + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (rc) + DP_ERR(dev, + "create qp: failed a copy to user space with qp icid=0x%x.\n", + qp->icid); + + return rc; +} + +static void qedr_set_qp_init_params(struct qedr_dev *dev, + struct qedr_qp *qp, + struct qedr_pd *pd, + struct ib_qp_init_attr *attrs) +{ + qp->pd = pd; + + spin_lock_init(&qp->q_lock); + + qp->qp_type = attrs->qp_type; + qp->max_inline_data = attrs->cap.max_inline_data; + qp->sq.max_sges = attrs->cap.max_send_sge; + qp->state = QED_ROCE_QP_STATE_RESET; + qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; + qp->sq_cq = get_qedr_cq(attrs->send_cq); + qp->rq_cq = get_qedr_cq(attrs->recv_cq); + qp->dev = dev; + + DP_DEBUG(dev, QEDR_MSG_QP, + "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n", + pd->pd_id, qp->qp_type, qp->max_inline_data, + qp->state, qp->signaled, (attrs->srq) ? 1 : 0); + DP_DEBUG(dev, QEDR_MSG_QP, + "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", + qp->sq.max_sges, qp->sq_cq->icid); + qp->rq.max_sges = attrs->cap.max_recv_sge; + DP_DEBUG(dev, QEDR_MSG_QP, + "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n", + qp->rq.max_sges, qp->rq_cq->icid); +} + +static inline void +qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params, + struct qedr_create_qp_ureq *ureq) +{ + /* QP handle to be written in CQE */ + params->qp_handle_lo = ureq->qp_handle_lo; + params->qp_handle_hi = ureq->qp_handle_hi; +} + +static inline void +qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qp->sq.db = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); + qp->sq.db_data.data.icid = qp->icid + 1; +} + +static inline void +qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp) +{ + qp->rq.db = dev->db_addr + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); + qp->rq.db_data.data.icid = qp->icid; +} + +static inline int +qedr_init_qp_kernel_params_rq(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_qp_init_attr *attrs) +{ + /* Allocate driver internal RQ array */ + qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), + GFP_KERNEL); + if (!qp->rqe_wr_id) + return -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr); + + return 0; +} + +static inline int +qedr_init_qp_kernel_params_sq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct qed_rdma_create_qp_in_params *params) +{ + u32 temp_max_wr; + + /* Allocate driver internal SQ array */ + temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier; + temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe); + + /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */ + qp->sq.max_wr = (u16)temp_max_wr; + qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), + GFP_KERNEL); + if (!qp->wqe_wr_id) + return -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr); + + /* QP handle to be written in CQE */ + params->qp_handle_lo = lower_32_bits((uintptr_t)qp); + params->qp_handle_hi = upper_32_bits((uintptr_t)qp); + + return 0; +} + +static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + u32 n_sq_elems, n_sq_entries; + int rc; + + /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in + * the ring. The ring should allow at least a single WR, even if the + * user requested none, due to allocation issues. + */ + n_sq_entries = attrs->cap.max_send_wr; + n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe); + n_sq_entries = max_t(u32, n_sq_entries, 1); + n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE; + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + n_sq_elems, + QEDR_SQE_ELEMENT_SIZE, + &qp->sq.pbl); + if (rc) { + DP_ERR(dev, "failed to allocate QP %p SQ\n", qp); + return rc; + } + + DP_DEBUG(dev, QEDR_MSG_SQ, + "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n", + qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr, + n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc); + return 0; +} + +static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + u32 n_rq_elems, n_rq_entries; + int rc; + + /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in + * the ring. There ring should allow at least a single WR, even if the + * user requested none, due to allocation issues. + */ + n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1); + n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE; + rc = dev->ops->common->chain_alloc(dev->cdev, + QED_CHAIN_USE_TO_CONSUME_PRODUCE, + QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U32, + n_rq_elems, + QEDR_RQE_ELEMENT_SIZE, + &qp->rq.pbl); + + if (rc) { + DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp); + return -ENOMEM; + } + + DP_DEBUG(dev, QEDR_MSG_RQ, + "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n", + qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr, + n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc); + + /* n_rq_entries < u16 so the casting is safe */ + qp->rq.max_wr = (u16)n_rq_entries; + + return 0; +} + +static inline void +qedr_init_qp_in_params_sq(struct qedr_dev *dev, + struct qedr_pd *pd, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata, + struct qed_rdma_create_qp_in_params *params) +{ + /* QP handle to be written in an async event */ + params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp); + params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp); + + params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); + params->fmr_and_reserved_lkey = !udata; + params->pd = pd->pd_id; + params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; + params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; + params->max_sq_sges = 0; + params->stats_queue = 0; + + if (udata) { + params->sq_num_pages = qp->usq.pbl_info.num_pbes; + params->sq_pbl_ptr = qp->usq.pbl_tbl->pa; + } else { + params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); + params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); + } +} + +static inline void +qedr_init_qp_in_params_rq(struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata, + struct qed_rdma_create_qp_in_params *params) +{ + params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; + params->srq_id = 0; + params->use_srq = false; + + if (udata) { + params->rq_num_pages = qp->urq.pbl_info.num_pbes; + params->rq_pbl_ptr = qp->urq.pbl_tbl->pa; + } else { + params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl); + params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl); + } +} + +static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) +{ + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n", + qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr, + qp->urq.buf_len); +} + +static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx, + struct qedr_dev *dev, + struct qedr_qp *qp, + struct qedr_create_qp_ureq *ureq) +{ + int rc; + + /* SQ - read access only (0), dma sync not required (0) */ + rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr, + ureq->sq_len, 0, 0); + if (rc) + return rc; + + /* RQ - read access only (0), dma sync not required (0) */ + rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr, + ureq->rq_len, 0, 0); + + if (rc) + qedr_cleanup_user_sq(dev, qp); + return rc; +} + +static inline int +qedr_init_kernel_qp(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_qp_init_attr *attrs, + struct qed_rdma_create_qp_in_params *params) +{ + int rc; + + rc = qedr_init_qp_kernel_sq(dev, qp, attrs); + if (rc) { + DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params); + if (rc) { + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); + DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_rq(dev, qp, attrs); + if (rc) { + qedr_cleanup_kernel_sq(dev, qp); + DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp); + return rc; + } + + rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs); + if (rc) { + DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp); + qedr_cleanup_kernel_sq(dev, qp); + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); + return rc; + } + + return rc; +} + +struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qed_rdma_create_qp_out_params out_params; + struct qed_rdma_create_qp_in_params in_params; + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct ib_ucontext *ib_ctx = NULL; + struct qedr_ucontext *ctx = NULL; + struct qedr_create_qp_ureq ureq; + struct qedr_qp *qp; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", + udata ? "user library" : "kernel", pd); + + rc = qedr_check_qp_attrs(ibpd, dev, attrs); + if (rc) + return ERR_PTR(rc); + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + if (attrs->srq) + return ERR_PTR(-EINVAL); + + DP_DEBUG(dev, QEDR_MSG_QP, + "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", + get_qedr_cq(attrs->send_cq), + get_qedr_cq(attrs->send_cq)->icid, + get_qedr_cq(attrs->recv_cq), + get_qedr_cq(attrs->recv_cq)->icid); + + qedr_set_qp_init_params(dev, qp, pd, attrs); + + memset(&in_params, 0, sizeof(in_params)); + + if (udata) { + if (!(udata && ibpd->uobject && ibpd->uobject->context)) + goto err0; + + ib_ctx = ibpd->uobject->context; + ctx = get_qedr_ucontext(ib_ctx); + + memset(&ureq, 0, sizeof(ureq)); + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + DP_ERR(dev, + "create qp: problem copying data from user space\n"); + goto err0; + } + + rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq); + if (rc) + goto err0; + + qedr_init_qp_user_params(&in_params, &ureq); + } else { + rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params); + if (rc) + goto err0; + } + + qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params); + qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params); + + qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, + &in_params, &out_params); + + if (!qp->qed_qp) + goto err1; + + qp->qp_id = out_params.qp_id; + qp->icid = out_params.icid; + qp->ibqp.qp_num = qp->qp_id; + + if (udata) { + rc = qedr_copy_qp_uresp(dev, qp, udata); + if (rc) + goto err2; + + qedr_qp_user_print(dev, qp); + } else { + qedr_init_qp_kernel_doorbell_sq(dev, qp); + qedr_init_qp_kernel_doorbell_rq(dev, qp); + } + + DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n", + udata ? "user" : "kernel", qp); + + return &qp->ibqp; + +err2: + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); + if (rc) + DP_ERR(dev, "create qp: fatal fault. rc=%d", rc); +err1: + if (udata) { + qedr_cleanup_user_sq(dev, qp); + qedr_cleanup_user_rq(dev, qp); + } else { + qedr_cleanup_kernel_sq(dev, qp); + qedr_cleanup_kernel_rq(dev, qp); + } + +err0: + kfree(qp); + + return ERR_PTR(-EFAULT); +} + +enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) +{ + switch (qp_state) { + case QED_ROCE_QP_STATE_RESET: + return IB_QPS_RESET; + case QED_ROCE_QP_STATE_INIT: + return IB_QPS_INIT; + case QED_ROCE_QP_STATE_RTR: + return IB_QPS_RTR; + case QED_ROCE_QP_STATE_RTS: + return IB_QPS_RTS; + case QED_ROCE_QP_STATE_SQD: + return IB_QPS_SQD; + case QED_ROCE_QP_STATE_ERR: + return IB_QPS_ERR; + case QED_ROCE_QP_STATE_SQE: + return IB_QPS_SQE; + } + return IB_QPS_ERR; +} + +enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) +{ + switch (qp_state) { + case IB_QPS_RESET: + return QED_ROCE_QP_STATE_RESET; + case IB_QPS_INIT: + return QED_ROCE_QP_STATE_INIT; + case IB_QPS_RTR: + return QED_ROCE_QP_STATE_RTR; + case IB_QPS_RTS: + return QED_ROCE_QP_STATE_RTS; + case IB_QPS_SQD: + return QED_ROCE_QP_STATE_SQD; + case IB_QPS_ERR: + return QED_ROCE_QP_STATE_ERR; + default: + return QED_ROCE_QP_STATE_ERR; + } +} + +static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) +{ + qed_chain_reset(&qph->pbl); + qph->prod = 0; + qph->cons = 0; + qph->wqe_cons = 0; + qph->db_data.data.value = cpu_to_le16(0); +} + +static int qedr_update_qp_state(struct qedr_dev *dev, + struct qedr_qp *qp, + enum qed_roce_qp_state new_state) +{ + int status = 0; + + if (new_state == qp->state) + return 1; + + switch (qp->state) { + case QED_ROCE_QP_STATE_RESET: + switch (new_state) { + case QED_ROCE_QP_STATE_INIT: + qp->prev_wqe_size = 0; + qedr_reset_qp_hwq_info(&qp->sq); + qedr_reset_qp_hwq_info(&qp->rq); + break; + default: + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_INIT: + switch (new_state) { + case QED_ROCE_QP_STATE_RTR: + /* Update doorbell (in case post_recv was + * done before move to RTR) + */ + wmb(); + writel(qp->rq.db_data.raw, qp->rq.db); + /* Make sure write takes effect */ + mmiowb(); + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_RTR: + /* RTR->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RTS: + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_RTS: + /* RTS->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_SQD: + break; + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_SQD: + /* SQD->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RTS: + case QED_ROCE_QP_STATE_ERR: + break; + default: + /* Invalid state change. */ + status = -EINVAL; + break; + }; + break; + case QED_ROCE_QP_STATE_ERR: + /* ERR->XXX */ + switch (new_state) { + case QED_ROCE_QP_STATE_RESET: + break; + default: + status = -EINVAL; + break; + }; + break; + default: + status = -EINVAL; + break; + }; + + return status; +} + +int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_rdma_modify_qp_in_params qp_params = { 0 }; + struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); + enum ib_qp_state old_qp_state, new_qp_state; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, + "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, + attr->qp_state); + + old_qp_state = qedr_get_ibqp_state(qp->state); + if (attr_mask & IB_QP_STATE) + new_qp_state = attr->qp_state; + else + new_qp_state = old_qp_state; + + if (!ib_modify_qp_is_ok + (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + DP_ERR(dev, + "modify qp: invalid attribute mask=0x%x specified for\n" + "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n", + attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state, + new_qp_state); + rc = -EINVAL; + goto err; + } + + /* Translate the masks... */ + if (attr_mask & IB_QP_STATE) { + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); + qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state); + } + + if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) + qp_params.sqd_async = true; + + if (attr_mask & IB_QP_PKEY_INDEX) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_PKEY, 1); + if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) { + rc = -EINVAL; + goto err; + } + + qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT; + } + + if (attr_mask & IB_QP_QKEY) + qp->qkey = attr->qkey; + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1); + qp_params.incoming_rdma_read_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_READ; + qp_params.incoming_rdma_write_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_WRITE; + qp_params.incoming_atomic_en = attr->qp_access_flags & + IB_ACCESS_REMOTE_ATOMIC; + } + + if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || + attr->path_mtu > IB_MTU_4096) { + pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n"); + rc = -EINVAL; + goto err; + } + qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu), + ib_mtu_enum_to_int(iboe_get_mtu + (dev->ndev->mtu))); + } + + if (!qp->mtu) { + qp->mtu = + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); + pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu); + } + + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1); + + qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class; + qp_params.flow_label = attr->ah_attr.grh.flow_label; + qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit; + + qp->sgid_idx = attr->ah_attr.grh.sgid_index; + + rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params); + if (rc) { + DP_ERR(dev, + "modify qp: problems with GID index %d (rc=%d)\n", + attr->ah_attr.grh.sgid_index, rc); + return rc; + } + + rc = qedr_get_dmac(dev, &attr->ah_attr, + qp_params.remote_mac_addr); + if (rc) + return rc; + + qp_params.use_local_mac = true; + ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr); + + DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n", + qp_params.dgid.dwords[0], qp_params.dgid.dwords[1], + qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]); + DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n", + qp_params.sgid.dwords[0], qp_params.sgid.dwords[1], + qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); + DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", + qp_params.remote_mac_addr); +; + + qp_params.mtu = qp->mtu; + qp_params.lb_indication = false; + } + + if (!qp_params.mtu) { + /* Stay with current MTU */ + if (qp->mtu) + qp_params.mtu = qp->mtu; + else + qp_params.mtu = + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); + } + + if (attr_mask & IB_QP_TIMEOUT) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); + + qp_params.ack_timeout = attr->timeout; + if (attr->timeout) { + u32 temp; + + temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; + /* FW requires [msec] */ + qp_params.ack_timeout = temp; + } else { + /* Infinite */ + qp_params.ack_timeout = 0; + } + } + if (attr_mask & IB_QP_RETRY_CNT) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); + qp_params.retry_cnt = attr->retry_cnt; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1); + qp_params.rnr_retry_cnt = attr->rnr_retry; + } + + if (attr_mask & IB_QP_RQ_PSN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1); + qp_params.rq_psn = attr->rq_psn; + qp->rq_psn = attr->rq_psn; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) { + rc = -EINVAL; + DP_ERR(dev, + "unsupported max_rd_atomic=%d, supported=%d\n", + attr->max_rd_atomic, + dev->attr.max_qp_req_rd_atomic_resc); + goto err; + } + + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1); + qp_params.max_rd_atomic_req = attr->max_rd_atomic; + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1); + qp_params.min_rnr_nak_timer = attr->min_rnr_timer; + } + + if (attr_mask & IB_QP_SQ_PSN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1); + qp_params.sq_psn = attr->sq_psn; + qp->sq_psn = attr->sq_psn; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic > + dev->attr.max_qp_resp_rd_atomic_resc) { + DP_ERR(dev, + "unsupported max_dest_rd_atomic=%d, supported=%d\n", + attr->max_dest_rd_atomic, + dev->attr.max_qp_resp_rd_atomic_resc); + + rc = -EINVAL; + goto err; + } + + SET_FIELD(qp_params.modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1); + qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic; + } + + if (attr_mask & IB_QP_DEST_QPN) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1); + + qp_params.dest_qp = attr->dest_qp_num; + qp->dest_qp_num = attr->dest_qp_num; + } + + if (qp->qp_type != IB_QPT_GSI) + rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, + qp->qed_qp, &qp_params); + + if (attr_mask & IB_QP_STATE) { + if ((qp->qp_type != IB_QPT_GSI) && (!udata)) + qedr_update_qp_state(dev, qp, qp_params.new_state); + qp->state = qp_params.new_state; + } + +err: + return rc; +} + +static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params) +{ + int ib_qp_acc_flags = 0; + + if (params->incoming_rdma_write_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; + if (params->incoming_rdma_read_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ; + if (params->incoming_atomic_en) + ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC; + ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; + return ib_qp_acc_flags; +} + +int qedr_query_qp(struct ib_qp *ibqp, + struct ib_qp_attr *qp_attr, + int attr_mask, struct ib_qp_init_attr *qp_init_attr) +{ + struct qed_rdma_query_qp_out_params params; + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + int rc = 0; + + memset(¶ms, 0, sizeof(params)); + + rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms); + if (rc) + goto err; + + memset(qp_attr, 0, sizeof(*qp_attr)); + memset(qp_init_attr, 0, sizeof(*qp_init_attr)); + + qp_attr->qp_state = qedr_get_ibqp_state(params.state); + qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); + qp_attr->path_mtu = iboe_get_mtu(params.mtu); + qp_attr->path_mig_state = IB_MIG_MIGRATED; + qp_attr->rq_psn = params.rq_psn; + qp_attr->sq_psn = params.sq_psn; + qp_attr->dest_qp_num = params.dest_qp; + + qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms); + + qp_attr->cap.max_send_wr = qp->sq.max_wr; + qp_attr->cap.max_recv_wr = qp->rq.max_wr; + qp_attr->cap.max_send_sge = qp->sq.max_sges; + qp_attr->cap.max_recv_sge = qp->rq.max_sges; + qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_init_attr->cap = qp_attr->cap; + + memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], + sizeof(qp_attr->ah_attr.grh.dgid.raw)); + + qp_attr->ah_attr.grh.flow_label = params.flow_label; + qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; + qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl; + qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos; + + qp_attr->ah_attr.ah_flags = IB_AH_GRH; + qp_attr->ah_attr.port_num = 1; + qp_attr->ah_attr.sl = 0; + qp_attr->timeout = params.timeout; + qp_attr->rnr_retry = params.rnr_retry; + qp_attr->retry_cnt = params.retry_cnt; + qp_attr->min_rnr_timer = params.min_rnr_nak_timer; + qp_attr->pkey_index = params.pkey_index; + qp_attr->port_num = 1; + qp_attr->ah_attr.src_path_bits = 0; + qp_attr->ah_attr.static_rate = 0; + qp_attr->alt_pkey_index = 0; + qp_attr->alt_port_num = 0; + qp_attr->alt_timeout = 0; + memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); + + qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0; + qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic; + qp_attr->max_rd_atomic = params.max_rd_atomic; + qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n", + qp_attr->cap.max_inline_data); + +err: + return rc; +} + +int qedr_destroy_qp(struct ib_qp *ibqp) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + struct ib_qp_attr attr; + int attr_mask = 0; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", + qp, qp->qp_type); + + if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR | + QED_ROCE_QP_STATE_INIT)) { + attr.qp_state = IB_QPS_ERR; + attr_mask |= IB_QP_STATE; + + /* Change the QP state to ERROR */ + qedr_modify_qp(ibqp, &attr, attr_mask, NULL); + } + + if (qp->qp_type != IB_QPT_GSI) { + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); + if (rc) + return rc; + } + + if (ibqp->uobject && ibqp->uobject->context) { + qedr_cleanup_user_sq(dev, qp); + qedr_cleanup_user_rq(dev, qp); + } else { + qedr_cleanup_kernel_sq(dev, qp); + qedr_cleanup_kernel_rq(dev, qp); + } + + kfree(qp); + + return rc; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 36c8a692f740..056d6cb31fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -62,5 +62,12 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_destroy_cq(struct ib_cq *); int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, + struct ib_udata *); +int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *); +int qedr_destroy_qp(struct ib_qp *ibqp); #endif diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index b0fc5f2125e0..75c270d839c8 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -69,4 +69,38 @@ struct qedr_create_cq_uresp { __u16 icid; }; +struct qedr_create_qp_ureq { + __u32 qp_handle_hi; + __u32 qp_handle_lo; + + /* SQ */ + /* user space virtual address of SQ buffer */ + __u64 sq_addr; + + /* length of SQ buffer */ + __u64 sq_len; + + /* RQ */ + /* user space virtual address of RQ buffer */ + __u64 rq_addr; + + /* length of RQ buffer */ + __u64 rq_len; +}; + +struct qedr_create_qp_uresp { + __u32 qp_id; + __u32 atomic_supported; + + /* SQ */ + __u32 sq_db_offset; + __u16 sq_icid; + + /* RQ */ + __u32 rq_db_offset; + __u16 rq_icid; + + __u32 rq_db2_offset; +}; + #endif /* __QEDR_USER_H__ */ -- cgit v1.2.3-59-g8ed1b From e0290cce6ac02f8e5ec501f25f6f6900f384550c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:35 +0300 Subject: qedr: Add support for memory registeration verbs Add support for user, dma and memory regions registration. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 10 +- drivers/infiniband/hw/qedr/qedr.h | 40 ++++ drivers/infiniband/hw/qedr/verbs.c | 365 +++++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 11 ++ 4 files changed, 425 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 13ba47b7b99f..bfc287c7d72e 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -100,7 +100,9 @@ static int qedr_register_device(struct qedr_dev *dev) QEDR_UVERBS(CREATE_QP) | QEDR_UVERBS(MODIFY_QP) | QEDR_UVERBS(QUERY_QP) | - QEDR_UVERBS(DESTROY_QP); + QEDR_UVERBS(DESTROY_QP) | + QEDR_UVERBS(REG_MR) | + QEDR_UVERBS(DEREG_MR); dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->num_cnq; @@ -133,6 +135,12 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.query_pkey = qedr_query_pkey; + dev->ibdev.get_dma_mr = qedr_get_dma_mr; + dev->ibdev.dereg_mr = qedr_dereg_mr; + dev->ibdev.reg_user_mr = qedr_reg_user_mr; + dev->ibdev.alloc_mr = qedr_alloc_mr; + dev->ibdev.map_mr_sg = qedr_map_mr_sg; + dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index e9fe941c48ad..655955697995 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -375,6 +375,41 @@ struct qedr_qp { struct qedr_userq urq; }; +struct qedr_ah { + struct ib_ah ibah; + struct ib_ah_attr attr; +}; + +enum qedr_mr_type { + QEDR_MR_USER, + QEDR_MR_KERNEL, + QEDR_MR_DMA, + QEDR_MR_FRMR, +}; + +struct mr_info { + struct qedr_pbl *pbl_table; + struct qedr_pbl_info pbl_info; + struct list_head free_pbl_list; + struct list_head inuse_pbl_list; + u32 completed; + u32 completed_handled; +}; + +struct qedr_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + + struct qed_rdma_register_tid_in_params hw_mr; + enum qedr_mr_type type; + + struct qedr_dev *dev; + struct mr_info info; + + u64 *pages; + u32 npages; +}; + static inline int qedr_get_dmac(struct qedr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr) { @@ -418,4 +453,9 @@ static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) { return container_of(ibqp, struct qedr_qp, ibqp); } + +static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct qedr_mr, ibmr); +} #endif diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a0d1c5fffb63..e70e808e2b9b 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2082,3 +2082,368 @@ int qedr_destroy_qp(struct ib_qp *ibqp) return rc; } + +static void free_mr_info(struct qedr_dev *dev, struct mr_info *info) +{ + struct qedr_pbl *pbl, *tmp; + + if (info->pbl_table) + list_add_tail(&info->pbl_table->list_entry, + &info->free_pbl_list); + + if (!list_empty(&info->inuse_pbl_list)) + list_splice(&info->inuse_pbl_list, &info->free_pbl_list); + + list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) { + list_del(&pbl->list_entry); + qedr_free_pbl(dev, &info->pbl_info, pbl); + } +} + +static int init_mr_info(struct qedr_dev *dev, struct mr_info *info, + size_t page_list_len, bool two_layered) +{ + struct qedr_pbl *tmp; + int rc; + + INIT_LIST_HEAD(&info->free_pbl_list); + INIT_LIST_HEAD(&info->inuse_pbl_list); + + rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info, + page_list_len, two_layered); + if (rc) + goto done; + + info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); + if (!info->pbl_table) { + rc = -ENOMEM; + goto done; + } + + DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n", + &info->pbl_table->pa); + + /* in usual case we use 2 PBLs, so we add one to free + * list and allocating another one + */ + tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL); + if (!tmp) { + DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n"); + goto done; + } + + list_add_tail(&tmp->list_entry, &info->free_pbl_list); + + DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa); + +done: + if (rc) + free_mr_info(dev, info); + + return rc; +} + +struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, + u64 usr_addr, int acc, struct ib_udata *udata) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_mr *mr; + struct qedr_pd *pd; + int rc = -ENOMEM; + + pd = get_qedr_pd(ibpd); + DP_DEBUG(dev, QEDR_MSG_MR, + "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n", + pd->pd_id, start, len, usr_addr, acc); + + if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(rc); + + mr->type = QEDR_MR_USER; + + mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); + if (IS_ERR(mr->umem)) { + rc = -EFAULT; + goto err0; + } + + rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1); + if (rc) + goto err1; + + qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, + &mr->info.pbl_info); + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err1; + } + + /* Index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; + mr->hw_mr.key = 0; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hw_mr.mw_bind = false; + mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa; + mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; + mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); + mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); + mr->hw_mr.fbo = ib_umem_offset(mr->umem); + mr->hw_mr.length = len; + mr->hw_mr.vaddr = usr_addr; + mr->hw_mr.zbva = false; + mr->hw_mr.phy_mr = false; + mr->hw_mr.dma_mr = false; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err2; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || + mr->hw_mr.remote_atomic) + mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + + DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n", + mr->ibmr.lkey); + return &mr->ibmr; + +err2: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err1: + qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); +err0: + kfree(mr); + return ERR_PTR(rc); +} + +int qedr_dereg_mr(struct ib_mr *ib_mr) +{ + struct qedr_mr *mr = get_qedr_mr(ib_mr); + struct qedr_dev *dev = get_qedr_dev(ib_mr->device); + int rc = 0; + + rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid); + if (rc) + return rc; + + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); + + if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR)) + qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); + + /* it could be user registered memory. */ + if (mr->umem) + ib_umem_release(mr->umem); + + kfree(mr); + + return rc; +} + +struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) +{ + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_mr *mr; + int rc = -ENOMEM; + + DP_DEBUG(dev, QEDR_MSG_MR, + "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id, + max_page_list_len); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(rc); + + mr->dev = dev; + mr->type = QEDR_MR_FRMR; + + rc = init_mr_info(dev, &mr->info, max_page_list_len, 1); + if (rc) + goto err0; + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err0; + } + + /* Index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_FMR; + mr->hw_mr.key = 0; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = 0; + mr->hw_mr.remote_read = 0; + mr->hw_mr.remote_write = 0; + mr->hw_mr.remote_atomic = 0; + mr->hw_mr.mw_bind = false; + mr->hw_mr.pbl_ptr = 0; + mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; + mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); + mr->hw_mr.fbo = 0; + mr->hw_mr.length = 0; + mr->hw_mr.vaddr = 0; + mr->hw_mr.zbva = false; + mr->hw_mr.phy_mr = true; + mr->hw_mr.dma_mr = false; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err1; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + mr->ibmr.rkey = mr->ibmr.lkey; + + DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey); + return mr; + +err1: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err0: + kfree(mr); + return ERR_PTR(rc); +} + +struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, + enum ib_mr_type mr_type, u32 max_num_sg) +{ + struct qedr_dev *dev; + struct qedr_mr *mr; + + if (mr_type != IB_MR_TYPE_MEM_REG) + return ERR_PTR(-EINVAL); + + mr = __qedr_alloc_mr(ibpd, max_num_sg); + + if (IS_ERR(mr)) + return ERR_PTR(-EINVAL); + + dev = mr->dev; + + return &mr->ibmr; +} + +static int qedr_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct qedr_mr *mr = get_qedr_mr(ibmr); + struct qedr_pbl *pbl_table; + struct regpair *pbe; + u32 pbes_in_page; + + if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) { + DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages); + return -ENOMEM; + } + + DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n", + mr->npages, addr); + + pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64); + pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page); + pbe = (struct regpair *)pbl_table->va; + pbe += mr->npages % pbes_in_page; + pbe->lo = cpu_to_le32((u32)addr); + pbe->hi = cpu_to_le32((u32)upper_32_bits(addr)); + + mr->npages++; + + return 0; +} + +static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info) +{ + int work = info->completed - info->completed_handled - 1; + + DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work); + while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) { + struct qedr_pbl *pbl; + + /* Free all the page list that are possible to be freed + * (all the ones that were invalidated), under the assumption + * that if an FMR was completed successfully that means that + * if there was an invalidate operation before it also ended + */ + pbl = list_first_entry(&info->inuse_pbl_list, + struct qedr_pbl, list_entry); + list_del(&pbl->list_entry); + list_add_tail(&pbl->list_entry, &info->free_pbl_list); + info->completed_handled++; + } +} + +int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct qedr_mr *mr = get_qedr_mr(ibmr); + + mr->npages = 0; + + handle_completed_mrs(mr->dev, &mr->info); + return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page); +} + +struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc) +{ + struct qedr_dev *dev = get_qedr_dev(ibpd->device); + struct qedr_pd *pd = get_qedr_pd(ibpd); + struct qedr_mr *mr; + int rc; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->type = QEDR_MR_DMA; + + rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); + if (rc) { + DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + goto err1; + } + + /* index only, 18 bit long, lkey = itid << 8 | key */ + mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR; + mr->hw_mr.pd = pd->pd_id; + mr->hw_mr.local_read = 1; + mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; + mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; + mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; + mr->hw_mr.dma_mr = true; + + rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); + if (rc) { + DP_ERR(dev, "roce register tid returned an error %d\n", rc); + goto err2; + } + + mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + if (mr->hw_mr.remote_write || mr->hw_mr.remote_read || + mr->hw_mr.remote_atomic) + mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; + + DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey); + return &mr->ibmr; + +err2: + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err1: + kfree(mr); + return ERR_PTR(rc); +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 056d6cb31fa2..4853f4af9983 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -70,4 +70,15 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *); int qedr_destroy_qp(struct ib_qp *ibqp); +int qedr_dereg_mr(struct ib_mr *); +struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); + +struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length, + u64 virt, int acc, struct ib_udata *); + +int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); + +struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); #endif -- cgit v1.2.3-59-g8ed1b From afa0e13be754307a9ed7ad31fe42b5ec97948c49 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:36 +0300 Subject: qedr: Add support for data path Implement fastpath verbs like ib_send_post, ib_post_recv and ib_poll_cq. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 9 +- drivers/infiniband/hw/qedr/qedr.h | 19 + drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 562 +++++++++++++++ drivers/infiniband/hw/qedr/verbs.c | 1020 ++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 5 + 5 files changed, 1614 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index bfc287c7d72e..858cc354153d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -102,7 +102,10 @@ static int qedr_register_device(struct qedr_dev *dev) QEDR_UVERBS(QUERY_QP) | QEDR_UVERBS(DESTROY_QP) | QEDR_UVERBS(REG_MR) | - QEDR_UVERBS(DEREG_MR); + QEDR_UVERBS(DEREG_MR) | + QEDR_UVERBS(POLL_CQ) | + QEDR_UVERBS(POST_SEND) | + QEDR_UVERBS(POST_RECV); dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->num_cnq; @@ -141,6 +144,10 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.alloc_mr = qedr_alloc_mr; dev->ibdev.map_mr_sg = qedr_map_mr_sg; + dev->ibdev.poll_cq = qedr_poll_cq; + dev->ibdev.post_send = qedr_post_send; + dev->ibdev.post_recv = qedr_post_recv; + dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 655955697995..775dbf16fdf2 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -410,6 +410,25 @@ struct qedr_mr { u32 npages; }; +#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT))) + +#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \ + RDMA_CQE_RESPONDER_IMM_FLG_SHIFT) +#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \ + RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT) +#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA) + +static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info) +{ + info->cons = (info->cons + 1) % info->max_wr; + info->wqe_cons++; +} + +static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info) +{ + info->prod = (info->prod + 1) % info->max_wr; +} + static inline int qedr_get_dmac(struct qedr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr) { diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index 47705598eec6..5c98d2055cad 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -150,6 +150,12 @@ struct rdma_rq_sge { struct regpair addr; __le32 length; __le32 flags; +#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF +#define RDMA_RQ_SGE_L_KEY_SHIFT 0 +#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 +#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 +#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 +#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 }; struct rdma_srq_sge { @@ -183,4 +189,560 @@ struct rdma_pwm_val32_data { __le32 value; }; +/* DIF Block size options */ +enum rdma_dif_block_size { + RDMA_DIF_BLOCK_512 = 0, + RDMA_DIF_BLOCK_4096 = 1, + MAX_RDMA_DIF_BLOCK_SIZE +}; + +/* DIF CRC initial value */ +enum rdma_dif_crc_seed { + RDMA_DIF_CRC_SEED_0000 = 0, + RDMA_DIF_CRC_SEED_FFFF = 1, + MAX_RDMA_DIF_CRC_SEED +}; + +/* RDMA DIF Error Result Structure */ +struct rdma_dif_error_result { + __le32 error_intervals; + __le32 dif_error_1st_interval; + u8 flags; +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2 +#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF +#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3 +#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1 +#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7 + u8 reserved1[55]; +}; + +/* DIF IO direction */ +enum rdma_dif_io_direction_flg { + RDMA_DIF_DIR_RX = 0, + RDMA_DIF_DIR_TX = 1, + MAX_RDMA_DIF_IO_DIRECTION_FLG +}; + +/* RDMA DIF Runt Result Structure */ +struct rdma_dif_runt_result { + __le16 guard_tag; + __le16 reserved[3]; +}; + +/* Memory window type enumeration */ +enum rdma_mw_type { + RDMA_MW_TYPE_1, + RDMA_MW_TYPE_2A, + MAX_RDMA_MW_TYPE +}; + +struct rdma_sq_atomic_wqe { + __le32 reserved1; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + struct regpair remote_va; + __le32 r_key; + __le32 reserved2; + struct regpair cmp_data; + struct regpair swap_data; +}; + +/* First element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_1st { + __le32 reserved1; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_2nd { + struct regpair remote_va; + __le32 r_key; + __le32 reserved2; +}; + +/* Third element (16 bytes) of atomic wqe */ +struct rdma_sq_atomic_wqe_3rd { + struct regpair cmp_data; + struct regpair swap_data; +}; + +struct rdma_sq_bind_wqe { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 +#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; + u8 bind_ctrl; +#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 +#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F +#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2 + u8 access_ctrl; +#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7 +#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + __le32 parent_l_key; + __le32 reserved4; +}; + +/* First element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_1st { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_2nd { + u8 bind_ctrl; +#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 +#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2 + u8 access_ctrl; +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + __le32 parent_l_key; + __le32 reserved4; +}; + +/* Structure with only the SQ WQE common + * fields. Size is of one SQ element (16B) + */ +struct rdma_sq_common_wqe { + __le32 reserved1[3]; + u8 req_type; + u8 flags; +#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7 +#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_fmr_wqe { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + u8 fmr_ctrl; +#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5 +#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1 +#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6 +#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1 +#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7 + u8 access_ctrl; +#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7 +#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + struct regpair pbl_addr; + __le32 dif_base_ref_tag; + __le16 dif_app_tag; + __le16 dif_app_tag_mask; + __le16 dif_runt_crc_value; + __le16 dif_flags; +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 + __le32 Reserved5; +}; + +/* First element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_1st { + struct regpair addr; + __le32 l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3 +#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_2nd { + u8 fmr_ctrl; +#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5 +#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7 + u8 access_ctrl; +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7 +#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5 + u8 reserved3; + u8 length_hi; + __le32 length_lo; + struct regpair pbl_addr; +}; + +/* Third element (16 bytes) of fmr wqe */ +struct rdma_sq_fmr_wqe_3rd { + __le32 dif_base_ref_tag; + __le16 dif_app_tag; + __le16 dif_app_tag_mask; + __le16 dif_runt_crc_value; + __le16 dif_flags; +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 + __le32 Reserved5; +}; + +struct rdma_sq_local_inv_wqe { + struct regpair reserved; + __le32 inv_l_key; + u8 req_type; + u8 flags; +#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_rdma_wqe { + __le32 imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + struct regpair remote_va; + __le32 r_key; + u8 dif_flags; +#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F +#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3 + u8 reserved2[3]; +}; + +/* First element (16 bytes) of rdma wqe */ +struct rdma_sq_rdma_wqe_1st { + __le32 imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3 +#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; +}; + +/* Second element (16 bytes) of rdma wqe */ +struct rdma_sq_rdma_wqe_2nd { + struct regpair remote_va; + __le32 r_key; + u8 dif_flags; +#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F +#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3 + u8 reserved2[3]; +}; + +/* SQ WQE req type enumeration */ +enum rdma_sq_req_type { + RDMA_SQ_REQ_TYPE_SEND, + RDMA_SQ_REQ_TYPE_SEND_WITH_IMM, + RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE, + RDMA_SQ_REQ_TYPE_RDMA_WR, + RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM, + RDMA_SQ_REQ_TYPE_RDMA_RD, + RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP, + RDMA_SQ_REQ_TYPE_ATOMIC_ADD, + RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE, + RDMA_SQ_REQ_TYPE_FAST_MR, + RDMA_SQ_REQ_TYPE_BIND, + RDMA_SQ_REQ_TYPE_INVALID, + MAX_RDMA_SQ_REQ_TYPE +}; + +struct rdma_sq_send_wqe { + __le32 inv_key_or_imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6 + u8 wqe_size; + u8 prev_wqe_size; + __le32 reserved1[4]; +}; + +struct rdma_sq_send_wqe_1st { + __le32 inv_key_or_imm_data; + __le32 length; + __le32 xrc_srq; + u8 req_type; + u8 flags; +#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0 +#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3 +#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7 +#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5 + u8 wqe_size; + u8 prev_wqe_size; +}; + +struct rdma_sq_send_wqe_2st { + __le32 reserved1[4]; +}; + #endif /* __QED_HSI_RDMA__ */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index e70e808e2b9b..e4b4d47c6a2d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2447,3 +2447,1023 @@ err1: kfree(mr); return ERR_PTR(rc); } + +static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq) +{ + return (((wq->prod + 1) % wq->max_wr) == wq->cons); +} + +static int sge_data_len(struct ib_sge *sg_list, int num_sge) +{ + int i, len = 0; + + for (i = 0; i < num_sge; i++) + len += sg_list[i].length; + + return len; +} + +static void swap_wqe_data64(u64 *p) +{ + int i; + + for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++) + *p = cpu_to_be64(cpu_to_le64(*p)); +} + +static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev, + struct qedr_qp *qp, u8 *wqe_size, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr, u8 *bits, + u8 bit) +{ + u32 data_size = sge_data_len(wr->sg_list, wr->num_sge); + char *seg_prt, *wqe; + int i, seg_siz; + + if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) { + DP_ERR(dev, "Too much inline data in WR: %d\n", data_size); + *bad_wr = wr; + return 0; + } + + if (!data_size) + return data_size; + + *bits |= bit; + + seg_prt = NULL; + wqe = NULL; + seg_siz = 0; + + /* Copy data inline */ + for (i = 0; i < wr->num_sge; i++) { + u32 len = wr->sg_list[i].length; + void *src = (void *)(uintptr_t)wr->sg_list[i].addr; + + while (len > 0) { + u32 cur; + + /* New segment required */ + if (!seg_siz) { + wqe = (char *)qed_chain_produce(&qp->sq.pbl); + seg_prt = wqe; + seg_siz = sizeof(struct rdma_sq_common_wqe); + (*wqe_size)++; + } + + /* Calculate currently allowed length */ + cur = min_t(u32, len, seg_siz); + memcpy(seg_prt, src, cur); + + /* Update segment variables */ + seg_prt += cur; + seg_siz -= cur; + + /* Update sge variables */ + src += cur; + len -= cur; + + /* Swap fully-completed segments */ + if (!seg_siz) + swap_wqe_data64((u64 *)wqe); + } + } + + /* swap last not completed segment */ + if (seg_siz) + swap_wqe_data64((u64 *)wqe); + + return data_size; +} + +#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \ + do { \ + DMA_REGPAIR_LE(sge->addr, vaddr); \ + (sge)->length = cpu_to_le32(vlength); \ + (sge)->flags = cpu_to_le32(vflags); \ + } while (0) + +#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \ + do { \ + DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \ + (hdr)->num_sges = num_sge; \ + } while (0) + +#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \ + do { \ + DMA_REGPAIR_LE(sge->addr, vaddr); \ + (sge)->length = cpu_to_le32(vlength); \ + (sge)->l_key = cpu_to_le32(vlkey); \ + } while (0) + +static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, + struct ib_send_wr *wr) +{ + u32 data_size = 0; + int i; + + for (i = 0; i < wr->num_sge; i++) { + struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl); + + DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr); + sge->l_key = cpu_to_le32(wr->sg_list[i].lkey); + sge->length = cpu_to_le32(wr->sg_list[i].length); + data_size += wr->sg_list[i].length; + } + + if (wqe_size) + *wqe_size += wr->num_sge; + + return data_size; +} + +static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev, + struct qedr_qp *qp, + struct rdma_sq_rdma_wqe_1st *rwqe, + struct rdma_sq_rdma_wqe_2nd *rwqe2, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey); + DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr); + + if (wr->send_flags & IB_SEND_INLINE) { + u8 flags = 0; + + SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1); + return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr, + bad_wr, &rwqe->flags, flags); + } + + return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr); +} + +static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev, + struct qedr_qp *qp, + struct rdma_sq_send_wqe_1st *swqe, + struct rdma_sq_send_wqe_2st *swqe2, + struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + memset(swqe2, 0, sizeof(*swqe2)); + if (wr->send_flags & IB_SEND_INLINE) { + u8 flags = 0; + + SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1); + return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr, + bad_wr, &swqe->flags, flags); + } + + return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr); +} + +static int qedr_prepare_reg(struct qedr_qp *qp, + struct rdma_sq_fmr_wqe_1st *fwqe1, + struct ib_reg_wr *wr) +{ + struct qedr_mr *mr = get_qedr_mr(wr->mr); + struct rdma_sq_fmr_wqe_2nd *fwqe2; + + fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl); + fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova); + fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova); + fwqe1->l_key = wr->key; + + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ, + !!(wr->access & IB_ACCESS_REMOTE_READ)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE, + !!(wr->access & IB_ACCESS_REMOTE_WRITE)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC, + !!(wr->access & IB_ACCESS_REMOTE_ATOMIC)); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1); + SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE, + !!(wr->access & IB_ACCESS_LOCAL_WRITE)); + fwqe2->fmr_ctrl = 0; + + SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG, + ilog2(mr->ibmr.page_size) - 12); + + fwqe2->length_hi = 0; + fwqe2->length_lo = mr->ibmr.length; + fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa); + fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa); + + qp->wqe_wr_id[qp->sq.prod].mr = mr; + + return 0; +} + +enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) +{ + switch (opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + return IB_WC_RDMA_WRITE; + case IB_WR_SEND_WITH_IMM: + case IB_WR_SEND: + case IB_WR_SEND_WITH_INV: + return IB_WC_SEND; + case IB_WR_RDMA_READ: + return IB_WC_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return IB_WC_COMP_SWAP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return IB_WC_FETCH_ADD; + case IB_WR_REG_MR: + return IB_WC_REG_MR; + case IB_WR_LOCAL_INV: + return IB_WC_LOCAL_INV; + default: + return IB_WC_SEND; + } +} + +inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) +{ + int wq_is_full, err_wr, pbl_is_full; + struct qedr_dev *dev = qp->dev; + + /* prevent SQ overflow and/or processing of a bad WR */ + err_wr = wr->num_sge > qp->sq.max_sges; + wq_is_full = qedr_wq_is_full(&qp->sq); + pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) < + QEDR_MAX_SQE_ELEMENTS_PER_SQE; + if (wq_is_full || err_wr || pbl_is_full) { + if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) { + DP_ERR(dev, + "error: WQ is full. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL; + } + + if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) { + DP_ERR(dev, + "error: WR is bad. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_BAD_SR; + } + + if (pbl_is_full && + !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) { + DP_ERR(dev, + "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n", + qp); + qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL; + } + return false; + } + return true; +} + +int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct rdma_sq_atomic_wqe_1st *awqe1; + struct rdma_sq_atomic_wqe_2nd *awqe2; + struct rdma_sq_atomic_wqe_3rd *awqe3; + struct rdma_sq_send_wqe_2st *swqe2; + struct rdma_sq_local_inv_wqe *iwqe; + struct rdma_sq_rdma_wqe_2nd *rwqe2; + struct rdma_sq_send_wqe_1st *swqe; + struct rdma_sq_rdma_wqe_1st *rwqe; + struct rdma_sq_fmr_wqe_1st *fwqe1; + struct rdma_sq_common_wqe *wqe; + u32 length; + int rc = 0; + bool comp; + + if (!qedr_can_post_send(qp, wr)) { + *bad_wr = wr; + return -ENOMEM; + } + + wqe = qed_chain_produce(&qp->sq.pbl); + qp->wqe_wr_id[qp->sq.prod].signaled = + !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled; + + wqe->flags = 0; + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, + !!(wr->send_flags & IB_SEND_SOLICITED)); + comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled; + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp); + SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG, + !!(wr->send_flags & IB_SEND_FENCE)); + wqe->prev_wqe_size = qp->prev_wqe_size; + + qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode); + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + swqe->wqe_size = 2; + swqe2 = qed_chain_produce(&qp->sq.pbl); + + swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + case IB_WR_SEND: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + + swqe->wqe_size = 2; + swqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + case IB_WR_SEND_WITH_INV: + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + swqe2 = qed_chain_produce(&qp->sq.pbl); + swqe->wqe_size = 2; + swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey); + length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, + wr, bad_wr); + swqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; + qp->prev_wqe_size = swqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; + break; + + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data)); + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + case IB_WR_RDMA_WRITE: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + case IB_WR_RDMA_READ_WITH_INV: + DP_ERR(dev, + "RDMA READ WITH INVALIDATE not supported\n"); + *bad_wr = wr; + rc = -EINVAL; + break; + + case IB_WR_RDMA_READ: + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + + rwqe->wqe_size = 2; + rwqe2 = qed_chain_produce(&qp->sq.pbl); + length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2, + wr, bad_wr); + rwqe->length = cpu_to_le32(length); + qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; + qp->prev_wqe_size = rwqe->wqe_size; + qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe; + awqe1->wqe_size = 4; + + awqe2 = qed_chain_produce(&qp->sq.pbl); + DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr); + awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey); + + awqe3 = qed_chain_produce(&qp->sq.pbl); + + if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD; + DMA_REGPAIR_LE(awqe3->swap_data, + atomic_wr(wr)->compare_add); + } else { + wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP; + DMA_REGPAIR_LE(awqe3->swap_data, + atomic_wr(wr)->swap); + DMA_REGPAIR_LE(awqe3->cmp_data, + atomic_wr(wr)->compare_add); + } + + qedr_prepare_sq_sges(qp, NULL, wr); + + qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size; + qp->prev_wqe_size = awqe1->wqe_size; + break; + + case IB_WR_LOCAL_INV: + iwqe = (struct rdma_sq_local_inv_wqe *)wqe; + iwqe->wqe_size = 1; + + iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE; + iwqe->inv_l_key = wr->ex.invalidate_rkey; + qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size; + qp->prev_wqe_size = iwqe->wqe_size; + break; + case IB_WR_REG_MR: + DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n"); + wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR; + fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe; + fwqe1->wqe_size = 2; + + rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr)); + if (rc) { + DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc); + *bad_wr = wr; + break; + } + + qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size; + qp->prev_wqe_size = fwqe1->wqe_size; + break; + default: + DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode); + rc = -EINVAL; + *bad_wr = wr; + break; + } + + if (*bad_wr) { + u16 value; + + /* Restore prod to its position before + * this WR was processed + */ + value = le16_to_cpu(qp->sq.db_data.data.value); + qed_chain_set_prod(&qp->sq.pbl, value, wqe); + + /* Restore prev_wqe_size */ + qp->prev_wqe_size = wqe->prev_wqe_size; + rc = -EINVAL; + DP_ERR(dev, "POST SEND FAILED\n"); + } + + return rc; +} + +int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + unsigned long flags; + int rc = 0; + + *bad_wr = NULL; + + spin_lock_irqsave(&qp->q_lock, flags); + + if ((qp->state == QED_ROCE_QP_STATE_RESET) || + (qp->state == QED_ROCE_QP_STATE_ERR)) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + DP_DEBUG(dev, QEDR_MSG_CQ, + "QP in wrong state! QP icid=0x%x state %d\n", + qp->icid, qp->state); + return -EINVAL; + } + + if (!wr) { + DP_ERR(dev, "Got an empty post send.\n"); + return -EINVAL; + } + + while (wr) { + rc = __qedr_post_send(ibqp, wr, bad_wr); + if (rc) + break; + + qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; + + qedr_inc_sw_prod(&qp->sq); + + qp->sq.db_data.data.value++; + + wr = wr->next; + } + + /* Trigger doorbell + * If there was a failure in the first WR then it will be triggered in + * vane. However this is not harmful (as long as the producer value is + * unchanged). For performance reasons we avoid checking for this + * redundant doorbell. + */ + wmb(); + writel(qp->sq.db_data.raw, qp->sq.db); + + /* Make sure write sticks */ + mmiowb(); + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return rc; +} + +int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qedr_dev *dev = qp->dev; + unsigned long flags; + int status = 0; + + spin_lock_irqsave(&qp->q_lock, flags); + + if ((qp->state == QED_ROCE_QP_STATE_RESET) || + (qp->state == QED_ROCE_QP_STATE_ERR)) { + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -EINVAL; + } + + while (wr) { + int i; + + if (qed_chain_get_elem_left_u32(&qp->rq.pbl) < + QEDR_MAX_RQE_ELEMENTS_PER_RQE || + wr->num_sge > qp->rq.max_sges) { + DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n", + qed_chain_get_elem_left_u32(&qp->rq.pbl), + QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge, + qp->rq.max_sges); + status = -ENOMEM; + *bad_wr = wr; + break; + } + for (i = 0; i < wr->num_sge; i++) { + u32 flags = 0; + struct rdma_rq_sge *rqe = + qed_chain_produce(&qp->rq.pbl); + + /* First one must include the number + * of SGE in the list + */ + if (!i) + SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, + wr->num_sge); + + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, + wr->sg_list[i].lkey); + + RQ_SGE_SET(rqe, wr->sg_list[i].addr, + wr->sg_list[i].length, flags); + } + + /* Special case of no sges. FW requires between 1-4 sges... + * in this case we need to post 1 sge with length zero. this is + * because rdma write with immediate consumes an RQ. + */ + if (!wr->num_sge) { + u32 flags = 0; + struct rdma_rq_sge *rqe = + qed_chain_produce(&qp->rq.pbl); + + /* First one must include the number + * of SGE in the list + */ + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); + SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); + + RQ_SGE_SET(rqe, 0, 0, flags); + i = 1; + } + + qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; + qp->rqe_wr_id[qp->rq.prod].wqe_size = i; + + qedr_inc_sw_prod(&qp->rq); + + /* Flush all the writes before signalling doorbell */ + wmb(); + + qp->rq.db_data.data.value++; + + writel(qp->rq.db_data.raw, qp->rq.db); + + /* Make sure write sticks */ + mmiowb(); + + wr = wr->next; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return status; +} + +static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + + return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) == + cq->pbl_toggle; +} + +static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + struct qedr_qp *qp; + + qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi, + resp_cqe->qp_handle.lo, + u64); + return qp; +} + +static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe) +{ + struct rdma_cqe_requester *resp_cqe = &cqe->req; + + return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE); +} + +/* Return latest CQE (needs processing) */ +static union rdma_cqe *get_cqe(struct qedr_cq *cq) +{ + return cq->latest_cqe; +} + +/* In fmr we need to increase the number of fmr completed counter for the fmr + * algorithm determining whether we can free a pbl or not. + * we need to perform this whether the work request was signaled or not. for + * this purpose we call this function from the condition that checks if a wr + * should be skipped, to make sure we don't miss it ( possibly this fmr + * operation was not signalted) + */ +static inline void qedr_chk_if_fmr(struct qedr_qp *qp) +{ + if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR) + qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; +} + +static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, int num_entries, + struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status, + int force) +{ + u16 cnt = 0; + + while (num_entries && qp->sq.wqe_cons != hw_cons) { + if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) { + qedr_chk_if_fmr(qp); + /* skip WC */ + goto next_cqe; + } + + /* fill WC */ + wc->status = status; + wc->wc_flags = 0; + wc->src_qp = qp->id; + wc->qp = &qp->ibqp; + + wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; + wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode; + + switch (wc->opcode) { + case IB_WC_RDMA_WRITE: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; + case IB_WC_COMP_SWAP: + case IB_WC_FETCH_ADD: + wc->byte_len = 8; + break; + case IB_WC_REG_MR: + qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; + break; + default: + break; + } + + num_entries--; + wc++; + cnt++; +next_cqe: + while (qp->wqe_wr_id[qp->sq.cons].wqe_size--) + qed_chain_consume(&qp->sq.pbl); + qedr_inc_sw_cons(&qp->sq); + } + + return cnt; +} + +static int qedr_poll_cq_req(struct qedr_dev *dev, + struct qedr_qp *qp, struct qedr_cq *cq, + int num_entries, struct ib_wc *wc, + struct rdma_cqe_requester *req) +{ + int cnt = 0; + + switch (req->status) { + case RDMA_CQE_REQ_STS_OK: + cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, + IB_WC_SUCCESS, 0); + break; + case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, + IB_WC_WR_FLUSH_ERR, 0); + break; + default: + /* process all WQE before the cosumer */ + qp->state = QED_ROCE_QP_STATE_ERR; + cnt = process_req(dev, qp, cq, num_entries, wc, + req->sq_cons - 1, IB_WC_SUCCESS, 0); + wc += cnt; + /* if we have extra WC fill it with actual error info */ + if (cnt < num_entries) { + enum ib_wc_status wc_status; + + switch (req->status) { + case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_BAD_RESP_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_LEN_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_QP_OP_ERR; + break; + case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_LOC_PROT_ERR; + break; + case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_MW_BIND_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_INV_REQ_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_ACCESS_ERR; + break; + case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_REM_OP_ERR; + break; + case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR: + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR: + DP_ERR(dev, + "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_RETRY_EXC_ERR; + break; + default: + DP_ERR(dev, + "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); + wc_status = IB_WC_GENERAL_ERR; + } + cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons, + wc_status, 1); + } + } + + return cnt; +} + +static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, struct ib_wc *wc, + struct rdma_cqe_responder *resp, u64 wr_id) +{ + enum ib_wc_status wc_status = IB_WC_SUCCESS; + u8 flags; + + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + + switch (resp->status) { + case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR: + wc_status = IB_WC_LOC_ACCESS_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR: + wc_status = IB_WC_LOC_LEN_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR: + wc_status = IB_WC_LOC_QP_OP_ERR; + break; + case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR: + wc_status = IB_WC_LOC_PROT_ERR; + break; + case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR: + wc_status = IB_WC_MW_BIND_ERR; + break; + case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR: + wc_status = IB_WC_REM_INV_RD_REQ_ERR; + break; + case RDMA_CQE_RESP_STS_OK: + wc_status = IB_WC_SUCCESS; + wc->byte_len = le32_to_cpu(resp->length); + + flags = resp->flags & QEDR_RESP_RDMA_IMM; + + if (flags == QEDR_RESP_RDMA_IMM) + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + + if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) { + wc->ex.imm_data = + le32_to_cpu(resp->imm_data_or_inv_r_Key); + wc->wc_flags |= IB_WC_WITH_IMM; + } + break; + default: + wc->status = IB_WC_GENERAL_ERR; + DP_ERR(dev, "Invalid CQE status detected\n"); + } + + /* fill WC */ + wc->status = wc_status; + wc->src_qp = qp->id; + wc->qp = &qp->ibqp; + wc->wr_id = wr_id; +} + +static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, struct ib_wc *wc, + struct rdma_cqe_responder *resp) +{ + u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + + __process_resp_one(dev, qp, cq, wc, resp, wr_id); + + while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) + qed_chain_consume(&qp->rq.pbl); + qedr_inc_sw_cons(&qp->rq); + + return 1; +} + +static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, + int num_entries, struct ib_wc *wc, u16 hw_cons) +{ + u16 cnt = 0; + + while (num_entries && qp->rq.wqe_cons != hw_cons) { + /* fill WC */ + wc->status = IB_WC_WR_FLUSH_ERR; + wc->wc_flags = 0; + wc->src_qp = qp->id; + wc->byte_len = 0; + wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + wc->qp = &qp->ibqp; + num_entries--; + wc++; + cnt++; + while (qp->rqe_wr_id[qp->rq.cons].wqe_size--) + qed_chain_consume(&qp->rq.pbl); + qedr_inc_sw_cons(&qp->rq); + } + + return cnt; +} + +static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, + struct rdma_cqe_responder *resp, int *update) +{ + if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) { + consume_cqe(cq); + *update |= 1; + } +} + +static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, int num_entries, + struct ib_wc *wc, struct rdma_cqe_responder *resp, + int *update) +{ + int cnt; + + if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) { + cnt = process_resp_flush(qp, cq, num_entries, wc, + resp->rq_cons); + try_consume_resp_cqe(cq, qp, resp, update); + } else { + cnt = process_resp_one(dev, qp, cq, wc, resp); + consume_cqe(cq); + *update |= 1; + } + + return cnt; +} + +static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, + struct rdma_cqe_requester *req, int *update) +{ + if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) { + consume_cqe(cq); + *update |= 1; + } +} + +int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + union rdma_cqe *cqe = cq->latest_cqe; + u32 old_cons, new_cons; + unsigned long flags; + int update = 0; + int done = 0; + + spin_lock_irqsave(&cq->cq_lock, flags); + old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + while (num_entries && is_valid_cqe(cq, cqe)) { + struct qedr_qp *qp; + int cnt = 0; + + /* prevent speculative reads of any field of CQE */ + rmb(); + + qp = cqe_get_qp(cqe); + if (!qp) { + WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe); + break; + } + + wc->qp = &qp->ibqp; + + switch (cqe_get_type(cqe)) { + case RDMA_CQE_TYPE_REQUESTER: + cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc, + &cqe->req); + try_consume_req_cqe(cq, qp, &cqe->req, &update); + break; + case RDMA_CQE_TYPE_RESPONDER_RQ: + cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc, + &cqe->resp, &update); + break; + case RDMA_CQE_TYPE_INVALID: + default: + DP_ERR(dev, "Error: invalid CQE type = %d\n", + cqe_get_type(cqe)); + } + num_entries -= cnt; + wc += cnt; + done += cnt; + + cqe = get_cqe(cq); + } + new_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + + cq->cq_cons += new_cons - old_cons; + + if (update) + /* doorbell notifies abount latest VALID entry, + * but chain already point to the next INVALID one + */ + doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); + + spin_unlock_irqrestore(&cq->cq_lock, flags); + return done; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 4853f4af9983..fb9b751a6dde 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -81,4 +81,9 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); +int qedr_post_send(struct ib_qp *, struct ib_send_wr *, + struct ib_send_wr **bad_wr); +int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *, + struct ib_recv_wr **bad_wr); #endif -- cgit v1.2.3-59-g8ed1b From 1d1424c8f834e4e3b8792d310a94b0e2e8f59b4c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:37 +0300 Subject: qedr: Add LL2 RoCE interface Add light L2 interface for RoCE. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 34 +++++++++++++++++++++++++++++++++- drivers/infiniband/hw/qedr/qedr.h | 2 +- 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 858cc354153d..e5a07a2d4c5e 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -724,6 +724,38 @@ static void qedr_shutdown(struct qedr_dev *dev) qedr_remove(dev); } +static void qedr_mac_address_change(struct qedr_dev *dev) +{ + union ib_gid *sgid = &dev->sgid_tbl[0]; + u8 guid[8], mac_addr[6]; + int rc; + + /* Update SGID */ + ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); + guid[0] = mac_addr[0] ^ 2; + guid[1] = mac_addr[1]; + guid[2] = mac_addr[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac_addr[3]; + guid[6] = mac_addr[4]; + guid[7] = mac_addr[5]; + sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + memcpy(&sgid->raw[8], guid, sizeof(guid)); + + /* Update LL2 */ + rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); + + ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); + + qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + + if (rc) + DP_ERR(dev, "Error updating mac filter\n"); +} + /* event handling via NIC driver ensures that all the NIC specific * initialization done before RoCE driver notifies * event to stack. @@ -741,7 +773,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) qedr_shutdown(dev); break; case QEDE_CHANGE_ADDR: - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + qedr_mac_address_change(dev); break; default: pr_err("Event not supported\n"); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 775dbf16fdf2..0b404d87921f 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -147,7 +147,7 @@ struct qedr_dev { u8 dp_level; u8 num_hwfns; uint wq_multiplier; - + u8 gsi_ll2_mac_address[ETH_ALEN]; }; #define QEDR_MAX_SQ_PBL (0x8000) -- cgit v1.2.3-59-g8ed1b From 048867793046e6bd665869816f4702fc49cc9a18 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:38 +0300 Subject: qedr: Add GSI support Add support for GSI over light L2. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/Makefile | 2 +- drivers/infiniband/hw/qedr/main.c | 3 + drivers/infiniband/hw/qedr/qedr.h | 15 + drivers/infiniband/hw/qedr/qedr_cm.c | 622 +++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/qedr/qedr_cm.h | 21 ++ drivers/infiniband/hw/qedr/verbs.c | 41 +++ drivers/infiniband/hw/qedr/verbs.h | 3 + 7 files changed, 706 insertions(+), 1 deletion(-) create mode 100644 drivers/infiniband/hw/qedr/qedr_cm.c (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile index b10f2b17ada5..ba7067c77f2f 100644 --- a/drivers/infiniband/hw/qedr/Makefile +++ b/drivers/infiniband/hw/qedr/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o -qedr-y := main.o verbs.o +qedr-y := main.o verbs.o qedr_cm.o diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index e5a07a2d4c5e..42dff48a3171 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -138,6 +138,9 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.query_pkey = qedr_query_pkey; + dev->ibdev.create_ah = qedr_create_ah; + dev->ibdev.destroy_ah = qedr_destroy_ah; + dev->ibdev.get_dma_mr = qedr_get_dma_mr; dev->ibdev.dereg_mr = qedr_dereg_mr; dev->ibdev.reg_user_mr = qedr_reg_user_mr; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 0b404d87921f..620badd7d4fb 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -55,6 +55,7 @@ #define QEDR_MSG_RQ " RQ" #define QEDR_MSG_SQ " SQ" #define QEDR_MSG_QP " QP" +#define QEDR_MSG_GSI " GSI" #define QEDR_CQ_MAGIC_NUMBER (0x11223344) @@ -148,6 +149,10 @@ struct qedr_dev { u8 num_hwfns; uint wq_multiplier; u8 gsi_ll2_mac_address[ETH_ALEN]; + int gsi_qp_created; + struct qedr_cq *gsi_sqcq; + struct qedr_cq *gsi_rqcq; + struct qedr_qp *gsi_qp; }; #define QEDR_MAX_SQ_PBL (0x8000) @@ -246,6 +251,9 @@ struct qedr_cq { u16 icid; + /* Lock to protect completion handler */ + spinlock_t comp_handler_lock; + /* Lock to protect multiplem CQ's */ spinlock_t cq_lock; u8 arm_flags; @@ -292,6 +300,7 @@ struct qedr_qp_hwq_info { u16 prod; u16 cons; u16 wqe_cons; + u16 gsi_cons; u16 max_wr; /* DB */ @@ -366,6 +375,7 @@ struct qedr_qp { struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; u8 wqe_size; + u8 smac[ETH_ALEN]; u16 vlan_id; int rc; } *rqe_wr_id; @@ -473,6 +483,11 @@ static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) return container_of(ibqp, struct qedr_qp, ibqp); } +static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct qedr_ah, ibah); +} + static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr) { return container_of(ibmr, struct qedr_mr, ibmr); diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c new file mode 100644 index 000000000000..63890ebb72bd --- /dev/null +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -0,0 +1,622 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "qedr_hsi.h" +#include +#include +#include "qedr.h" +#include "qedr_hsi.h" +#include "verbs.h" +#include +#include "qedr_hsi.h" +#include "qedr_cm.h" + +void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) +{ + info->gsi_cons = (info->gsi_cons + 1) % info->max_wr; +} + +void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, + struct ib_qp_init_attr *attrs) +{ + dev->gsi_qp_created = 1; + dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); + dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq); + dev->gsi_qp = qp; +} + +void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +{ + struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_cq *cq = dev->gsi_sqcq; + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + + DP_DEBUG(dev, QEDR_MSG_GSI, + "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n", + dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, + cq->ibcq.comp_handler ? "Yes" : "No"); + + dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr, + pkt->header.baddr); + kfree(pkt); + + spin_lock_irqsave(&qp->q_lock, flags); + qedr_inc_sw_gsi_cons(&qp->sq); + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (cq->ibcq.comp_handler) { + spin_lock_irqsave(&cq->comp_handler_lock, flags); + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + } +} + +void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, + struct qed_roce_ll2_rx_params *params) +{ + struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_cq *cq = dev->gsi_rqcq; + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + + spin_lock_irqsave(&qp->q_lock, flags); + + qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; + ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + + qedr_inc_sw_gsi_cons(&qp->rq); + + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (cq->ibcq.comp_handler) { + spin_lock_irqsave(&cq->comp_handler_lock, flags); + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + } +} + +static void qedr_destroy_gsi_cq(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + struct qed_rdma_destroy_cq_in_params iparams; + struct qed_rdma_destroy_cq_out_params oparams; + struct qedr_cq *cq; + + cq = get_qedr_cq(attrs->send_cq); + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + + cq = get_qedr_cq(attrs->recv_cq); + /* if a dedicated recv_cq was used, delete it too */ + if (iparams.icid != cq->icid) { + iparams.icid = cq->icid; + dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams); + dev->ops->common->chain_free(dev->cdev, &cq->pbl); + } +} + +static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs) +{ + if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) { + DP_ERR(dev, + " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n", + attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE); + return -EINVAL; + } + + if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) { + DP_ERR(dev, + " create gsi qp: failed. max_recv_wr is too large %d>%d\n", + attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR); + return -EINVAL; + } + + if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) { + DP_ERR(dev, + " create gsi qp: failed. max_send_wr is too large %d>%d\n", + attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR); + return -EINVAL; + } + + return 0; +} + +struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, + struct qedr_qp *qp) +{ + struct qed_roce_ll2_params ll2_params; + int rc; + + rc = qedr_check_gsi_qp_attrs(dev, attrs); + if (rc) + return ERR_PTR(rc); + + /* configure and start LL2 */ + memset(&ll2_params, 0, sizeof(ll2_params)); + ll2_params.max_tx_buffers = attrs->cap.max_send_wr; + ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; + ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; + ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; + ll2_params.cb_cookie = (void *)dev; + ll2_params.mtu = dev->ndev->mtu; + ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); + rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + if (rc) { + DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); + return ERR_PTR(rc); + } + + /* create QP */ + qp->ibqp.qp_num = 1; + qp->rq.max_wr = attrs->cap.max_recv_wr; + qp->sq.max_wr = attrs->cap.max_send_wr; + + qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), + GFP_KERNEL); + if (!qp->rqe_wr_id) + goto err; + qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), + GFP_KERNEL); + if (!qp->wqe_wr_id) + goto err; + + qedr_store_gsi_qp_cq(dev, qp, attrs); + ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); + + /* the GSI CQ is handled by the driver so remove it from the FW */ + qedr_destroy_gsi_cq(dev, attrs); + dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; + dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI; + + DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); + + return &qp->ibqp; + +err: + kfree(qp->rqe_wr_id); + + rc = dev->ops->roce_ll2_stop(dev->cdev); + if (rc) + DP_ERR(dev, "create gsi qp: failed destroy on create\n"); + + return ERR_PTR(-ENOMEM); +} + +int qedr_destroy_gsi_qp(struct qedr_dev *dev) +{ + int rc; + + rc = dev->ops->roce_ll2_stop(dev->cdev); + if (rc) + DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); + else + DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); + + return rc; +} + +#define QEDR_MAX_UD_HEADER_SIZE (100) +#define QEDR_GSI_QPN (1) +static inline int qedr_gsi_build_header(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_send_wr *swr, + struct ib_ud_header *udh, + int *roce_mode) +{ + bool has_vlan = false, has_grh_ipv6 = true; + struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr; + struct ib_global_route *grh = &ah_attr->grh; + union ib_gid sgid; + int send_size = 0; + u16 vlan_id = 0; + u16 ether_type; + struct ib_gid_attr sgid_attr; + int rc; + int ip_ver = 0; + + bool has_udp = false; + int i; + + send_size = 0; + for (i = 0; i < swr->num_sge; ++i) + send_size += swr->sg_list[i].length; + + rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num, + grh->sgid_index, &sgid, &sgid_attr); + if (rc) { + DP_ERR(dev, + "gsi post send: failed to get cached GID (port=%d, ix=%d)\n", + ah_attr->port_num, grh->sgid_index); + return rc; + } + + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); + if (vlan_id < VLAN_CFI_MASK) + has_vlan = true; + if (sgid_attr.ndev) + dev_put(sgid_attr.ndev); + + if (!memcmp(&sgid, &zgid, sizeof(sgid))) { + DP_ERR(dev, "gsi post send: GID not found GID index %d\n", + ah_attr->grh.sgid_index); + return -ENOENT; + } + + has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); + if (!has_udp) { + /* RoCE v1 */ + ether_type = ETH_P_ROCE; + *roce_mode = ROCE_V1; + } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) { + /* RoCE v2 IPv4 */ + ip_ver = 4; + ether_type = ETH_P_IP; + has_grh_ipv6 = false; + *roce_mode = ROCE_V2_IPV4; + } else { + /* RoCE v2 IPv6 */ + ip_ver = 6; + ether_type = ETH_P_IPV6; + *roce_mode = ROCE_V2_IPV6; + } + + rc = ib_ud_header_init(send_size, false, true, has_vlan, + has_grh_ipv6, ip_ver, has_udp, 0, udh); + if (rc) { + DP_ERR(dev, "gsi post send: failed to init header\n"); + return rc; + } + + /* ENET + VLAN headers */ + ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac); + ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr); + if (has_vlan) { + udh->eth.type = htons(ETH_P_8021Q); + udh->vlan.tag = htons(vlan_id); + udh->vlan.type = htons(ether_type); + } else { + udh->eth.type = htons(ether_type); + } + + /* BTH */ + udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED); + udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT; + udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn); + udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1)); + udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY; + + /* DETH */ + udh->deth.qkey = htonl(0x80010000); + udh->deth.source_qpn = htonl(QEDR_GSI_QPN); + + if (has_grh_ipv6) { + /* GRH / IPv6 header */ + udh->grh.traffic_class = grh->traffic_class; + udh->grh.flow_label = grh->flow_label; + udh->grh.hop_limit = grh->hop_limit; + udh->grh.destination_gid = grh->dgid; + memcpy(&udh->grh.source_gid.raw, &sgid.raw, + sizeof(udh->grh.source_gid.raw)); + } else { + /* IPv4 header */ + u32 ipv4_addr; + + udh->ip4.protocol = IPPROTO_UDP; + udh->ip4.tos = htonl(ah_attr->grh.flow_label); + udh->ip4.frag_off = htons(IP_DF); + udh->ip4.ttl = ah_attr->grh.hop_limit; + + ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw); + udh->ip4.saddr = ipv4_addr; + ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw); + udh->ip4.daddr = ipv4_addr; + /* note: checksum is calculated by the device */ + } + + /* UDP */ + if (has_udp) { + udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT); + udh->udp.dport = htons(ROCE_V2_UDP_DPORT); + udh->udp.csum = 0; + /* UDP length is untouched hence is zero */ + } + return 0; +} + +static inline int qedr_gsi_build_packet(struct qedr_dev *dev, + struct qedr_qp *qp, + struct ib_send_wr *swr, + struct qed_roce_ll2_packet **p_packet) +{ + u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE]; + struct qed_roce_ll2_packet *packet; + struct pci_dev *pdev = dev->pdev; + int roce_mode, header_size; + struct ib_ud_header udh; + int i, rc; + + *p_packet = NULL; + + rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode); + if (rc) + return rc; + + header_size = ib_ud_header_pack(&udh, &ud_header_buffer); + + packet = kzalloc(sizeof(*packet), GFP_ATOMIC); + if (!packet) + return -ENOMEM; + + packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size, + &packet->header.baddr, + GFP_ATOMIC); + if (!packet->header.vaddr) { + kfree(packet); + return -ENOMEM; + } + + if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; + else + packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; + + packet->roce_mode = roce_mode; + memcpy(packet->header.vaddr, ud_header_buffer, header_size); + packet->header.len = header_size; + packet->n_seg = swr->num_sge; + for (i = 0; i < packet->n_seg; i++) { + packet->payload[i].baddr = swr->sg_list[i].addr; + packet->payload[i].len = swr->sg_list[i].length; + } + + *p_packet = packet; + + return 0; +} + +int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qed_roce_ll2_packet *pkt = NULL; + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_roce_ll2_tx_params params; + struct qedr_dev *dev = qp->dev; + unsigned long flags; + int rc; + + if (qp->state != QED_ROCE_QP_STATE_RTS) { + *bad_wr = wr; + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n", + qp->state); + return -EINVAL; + } + + if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { + DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n", + wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); + rc = -EINVAL; + goto err; + } + + if (wr->opcode != IB_WR_SEND) { + DP_ERR(dev, + "gsi post send: failed due to unsupported opcode %d\n", + wr->opcode); + rc = -EINVAL; + goto err; + } + + memset(¶ms, 0, sizeof(params)); + + spin_lock_irqsave(&qp->q_lock, flags); + + rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); + if (rc) { + spin_unlock_irqrestore(&qp->q_lock, flags); + goto err; + } + + rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + if (!rc) { + qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; + qedr_inc_sw_prod(&qp->sq); + DP_DEBUG(qp->dev, QEDR_MSG_GSI, + "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", + wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); + } else { + if (rc == QED_ROCE_TX_HEAD_FAILURE) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { + /* NTD since TX failed while posting a fragment. We will + * release the resources on TX callback + */ + } + + DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); + rc = -EAGAIN; + *bad_wr = wr; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + if (wr->next) { + DP_ERR(dev, + "gsi post send: failed second WR. Only one WR may be passed at a time\n"); + *bad_wr = wr->next; + rc = -EINVAL; + } + + return rc; + +err: + *bad_wr = wr; + return rc; +} + +int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qedr_dev *dev = get_qedr_dev(ibqp->device); + struct qedr_qp *qp = get_qedr_qp(ibqp); + struct qed_roce_ll2_buffer buf; + unsigned long flags; + int status = 0; + int rc; + + if ((qp->state != QED_ROCE_QP_STATE_RTR) && + (qp->state != QED_ROCE_QP_STATE_RTS)) { + *bad_wr = wr; + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n", + qp->state); + return -EINVAL; + } + + memset(&buf, 0, sizeof(buf)); + + spin_lock_irqsave(&qp->q_lock, flags); + + while (wr) { + if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) { + DP_ERR(dev, + "gsi post recv: failed to post rx buffer. too many sges %d>%d\n", + wr->num_sge, QEDR_GSI_MAX_RECV_SGE); + goto err; + } + + buf.baddr = wr->sg_list[0].addr; + buf.len = wr->sg_list[0].length; + + rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + if (rc) { + DP_ERR(dev, + "gsi post recv: failed to post rx buffer (rc=%d)\n", + rc); + goto err; + } + + memset(&qp->rqe_wr_id[qp->rq.prod], 0, + sizeof(qp->rqe_wr_id[qp->rq.prod])); + qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0]; + qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; + + qedr_inc_sw_prod(&qp->rq); + + wr = wr->next; + } + + spin_unlock_irqrestore(&qp->q_lock, flags); + + return status; +err: + spin_unlock_irqrestore(&qp->q_lock, flags); + *bad_wr = wr; + return -ENOMEM; +} + +int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); + struct qedr_qp *qp = dev->gsi_qp; + unsigned long flags; + int i = 0; + + spin_lock_irqsave(&cq->cq_lock, flags); + + while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { + memset(&wc[i], 0, sizeof(*wc)); + + wc[i].qp = &qp->ibqp; + wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; + wc[i].opcode = IB_WC_RECV; + wc[i].pkey_index = 0; + wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ? + IB_WC_GENERAL_ERR : IB_WC_SUCCESS; + /* 0 - currently only one recv sg is supported */ + wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; + wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; + ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); + wc[i].wc_flags |= IB_WC_WITH_SMAC; + if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { + wc[i].wc_flags |= IB_WC_WITH_VLAN; + wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; + } + + qedr_inc_sw_cons(&qp->rq); + i++; + } + + while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { + memset(&wc[i], 0, sizeof(*wc)); + + wc[i].qp = &qp->ibqp; + wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; + wc[i].opcode = IB_WC_SEND; + wc[i].status = IB_WC_SUCCESS; + + qedr_inc_sw_cons(&qp->sq); + i++; + } + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + DP_DEBUG(dev, QEDR_MSG_GSI, + "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n", + num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, + qp->sq.gsi_cons, qp->ibqp.qp_num); + + return i; +} diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h index b8a8b76d77b8..9ba6e15cd93f 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.h +++ b/drivers/infiniband/hw/qedr/qedr_cm.h @@ -32,9 +32,30 @@ #ifndef LINUX_QEDR_CM_H_ #define LINUX_QEDR_CM_H_ +#define QEDR_GSI_MAX_RECV_WR (4096) +#define QEDR_GSI_MAX_SEND_WR (4096) + +#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */ + +#define ETH_P_ROCE (0x8915) +#define QEDR_ROCE_V2_UDP_SPORT (0000) + static inline u32 qedr_get_ipv4_from_gid(u8 *gid) { return *(u32 *)(void *)&gid[12]; } +/* RDMA CM */ +int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, + struct qedr_qp *qp); +void qedr_store_gsi_qp_cq(struct qedr_dev *dev, + struct qedr_qp *qp, struct ib_qp_init_attr *attrs); +int qedr_destroy_gsi_qp(struct qedr_dev *dev); +void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info); #endif diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index e4b4d47c6a2d..44ad6ae036c9 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1502,6 +1502,15 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, qedr_set_qp_init_params(dev, qp, pd, attrs); + if (attrs->qp_type == IB_QPT_GSI) { + if (udata) { + DP_ERR(dev, + "create qp: unexpected udata when creating GSI QP\n"); + goto err0; + } + return qedr_create_gsi_qp(dev, attrs, qp); + } + memset(&in_params, 0, sizeof(in_params)); if (udata) { @@ -2068,6 +2077,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp) rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); if (rc) return rc; + } else { + qedr_destroy_gsi_qp(dev); } if (ibqp->uobject && ibqp->uobject->context) { @@ -2083,6 +2094,27 @@ int qedr_destroy_qp(struct ib_qp *ibqp) return rc; } +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +{ + struct qedr_ah *ah; + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + ah->attr = *attr; + + return &ah->ibah; +} + +int qedr_destroy_ah(struct ib_ah *ibah) +{ + struct qedr_ah *ah = get_qedr_ah(ibah); + + kfree(ah); + return 0; +} + static void free_mr_info(struct qedr_dev *dev, struct mr_info *info) { struct qedr_pbl *pbl, *tmp; @@ -2934,6 +2966,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, *bad_wr = NULL; + if (qp->qp_type == IB_QPT_GSI) + return qedr_gsi_post_send(ibqp, wr, bad_wr); + spin_lock_irqsave(&qp->q_lock, flags); if ((qp->state == QED_ROCE_QP_STATE_RESET) || @@ -2990,6 +3025,9 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, unsigned long flags; int status = 0; + if (qp->qp_type == IB_QPT_GSI) + return qedr_gsi_post_recv(ibqp, wr, bad_wr); + spin_lock_irqsave(&qp->q_lock, flags); if ((qp->state == QED_ROCE_QP_STATE_RESET) || @@ -3416,6 +3454,9 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int update = 0; int done = 0; + if (cq->cq_type == QEDR_CQ_TYPE_GSI) + return qedr_gsi_poll_cq(ibcq, num_entries, wc); + spin_lock_irqsave(&cq->cq_lock, flags); old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index fb9b751a6dde..fbc6db5a5e1f 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -70,6 +70,9 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *); int qedr_destroy_qp(struct ib_qp *ibqp); +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr); +int qedr_destroy_ah(struct ib_ah *ibah); + int qedr_dereg_mr(struct ib_mr *); struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc); -- cgit v1.2.3-59-g8ed1b From 993d1b52615e1a549e55875c3b74308391672d9f Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Mon, 10 Oct 2016 13:15:39 +0300 Subject: qedr: Add events support and register IB device Add error handling support. Register ib device with ib stack. Signed-off-by: Rajesh Borundia Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/main.c | 114 ++++++++++++++++++++++++++++++++++++- drivers/infiniband/hw/qedr/verbs.c | 37 ++++++++++++ drivers/infiniband/hw/qedr/verbs.h | 9 +++ 3 files changed, 158 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 42dff48a3171..7b74d09a8217 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -79,10 +79,25 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str, (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); } +static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num) +{ + struct qedr_dev *qdev; + + qdev = get_qedr_dev(dev); + dev_hold(qdev->ndev); + + /* The HW vendor's device driver must guarantee + * that this function returns NULL before the net device reaches + * NETDEV_UNREGISTER_FINAL state. + */ + return qdev->ndev; +} + static int qedr_register_device(struct qedr_dev *dev) { strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX); + dev->ibdev.node_guid = dev->attr.node_guid; memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); dev->ibdev.owner = THIS_MODULE; dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION; @@ -151,12 +166,16 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.post_send = qedr_post_send; dev->ibdev.post_recv = qedr_post_recv; + dev->ibdev.process_mad = qedr_process_mad; + dev->ibdev.get_port_immutable = qedr_port_immutable; + dev->ibdev.get_netdev = qedr_get_netdev; + dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; - return 0; + return ib_register_device(&dev->ibdev, NULL); } /* This function allocates fast-path status block memory */ @@ -557,6 +576,92 @@ static int qedr_set_device_attr(struct qedr_dev *dev) return 0; } +void qedr_unaffiliated_event(void *context, + u8 event_code) +{ + pr_err("unaffiliated event not implemented yet\n"); +} + +void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) +{ +#define EVENT_TYPE_NOT_DEFINED 0 +#define EVENT_TYPE_CQ 1 +#define EVENT_TYPE_QP 2 + struct qedr_dev *dev = (struct qedr_dev *)context; + union event_ring_data *data = fw_handle; + u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + + data->roce_handle.lo; + u8 event_type = EVENT_TYPE_NOT_DEFINED; + struct ib_event event; + struct ib_cq *ibcq; + struct ib_qp *ibqp; + struct qedr_cq *cq; + struct qedr_qp *qp; + + switch (e_code) { + case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: + event.event = IB_EVENT_CQ_ERR; + event_type = EVENT_TYPE_CQ; + break; + case ROCE_ASYNC_EVENT_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: + event.event = IB_EVENT_QP_FATAL; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: + event.event = IB_EVENT_QP_REQ_ERR; + event_type = EVENT_TYPE_QP; + break; + case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: + event.event = IB_EVENT_QP_ACCESS_ERR; + event_type = EVENT_TYPE_QP; + break; + default: + DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, + roce_handle64); + } + + switch (event_type) { + case EVENT_TYPE_CQ: + cq = (struct qedr_cq *)(uintptr_t)roce_handle64; + if (cq) { + ibcq = &cq->ibcq; + if (ibcq->event_handler) { + event.device = ibcq->device; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } + } else { + WARN(1, + "Error: CQ event with NULL pointer ibcq. Handle=%llx\n", + roce_handle64); + } + DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq); + break; + case EVENT_TYPE_QP: + qp = (struct qedr_qp *)(uintptr_t)roce_handle64; + if (qp) { + ibqp = &qp->ibqp; + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + ibqp->event_handler(&event, ibqp->qp_context); + } + } else { + WARN(1, + "Error: QP event with NULL pointer ibqp. Handle=%llx\n", + roce_handle64); + } + DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp); + break; + default: + break; + } +} + static int qedr_init_hw(struct qedr_dev *dev) { struct qed_rdma_add_user_out_params out_params; @@ -585,6 +690,8 @@ static int qedr_init_hw(struct qedr_dev *dev) cur_pbl->pbl_ptr = (u64)p_phys_table; } + events.affiliated_event = qedr_affiliated_event; + events.unaffiliated_event = qedr_unaffiliated_event; events.context = dev; in_params->events = &events; @@ -683,11 +790,13 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++) if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) - goto reg_err; + goto sysfs_err; DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; +sysfs_err: + ib_unregister_device(&dev->ibdev); reg_err: qedr_sync_free_irqs(dev); irq_err: @@ -707,6 +816,7 @@ static void qedr_remove(struct qedr_dev *dev) * of the registered clients. */ qedr_remove_sysfiles(dev); + ib_unregister_device(&dev->ibdev); qedr_stop_hw(dev); qedr_sync_free_irqs(dev); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 44ad6ae036c9..a61514296767 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3508,3 +3508,40 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_unlock_irqrestore(&cq->cq_lock, flags); return done; } + +int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, + u8 port_num, + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *mad_hdr, + size_t in_mad_size, struct ib_mad_hdr *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +{ + struct qedr_dev *dev = get_qedr_dev(ibdev); + + DP_DEBUG(dev, QEDR_MSG_GSI, + "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n", + mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod, + mad_hdr->class_specific, mad_hdr->class_version, + mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status); + return IB_MAD_RESULT_SUCCESS; +} + +int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = qedr_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index fbc6db5a5e1f..a9b5e67bb81e 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -89,4 +89,13 @@ int qedr_post_send(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **bad_wr); int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *, struct ib_recv_wr **bad_wr); +int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, struct ib_mad_hdr *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); + +int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable); #endif -- cgit v1.2.3-59-g8ed1b From 610df1d2342d639ec87a8fb5290ca4799d91d59b Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 13 Oct 2016 16:43:16 -0700 Subject: net: asix: Avoid looping when the device does not respond Check answers from USB stack and avoid re-sending the request multiple times if the device does not respond. This fixes the following problem, observed with a probably flaky adapter. [62108.732707] usb 1-3: new high-speed USB device number 5 using xhci_hcd [62108.914421] usb 1-3: New USB device found, idVendor=0b95, idProduct=7720 [62108.914463] usb 1-3: New USB device strings: Mfr=1, Product=2, SerialNumber=3 [62108.914476] usb 1-3: Product: AX88x72A [62108.914486] usb 1-3: Manufacturer: ASIX Elec. Corp. [62108.914495] usb 1-3: SerialNumber: 000001 [62114.109109] asix 1-3:1.0 (unnamed net_device) (uninitialized): Failed to write reg index 0x0000: -110 [62114.109139] asix 1-3:1.0 (unnamed net_device) (uninitialized): Failed to send software reset: ffffff92 [62119.109048] asix 1-3:1.0 (unnamed net_device) (uninitialized): Failed to write reg index 0x0000: -110 ... Since the USB timeout is 5 seconds, and the operation is retried 30 times, this results in [62278.180353] INFO: task mtpd:1725 blocked for more than 120 seconds. [62278.180373] Tainted: G W 3.18.0-13298-g94ace9e #1 [62278.180383] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. ... [62278.180957] kworker/2:0 D 0000000000000000 0 5744 2 0x00000000 [62278.180978] Workqueue: usb_hub_wq hub_event [62278.181029] ffff880177f833b8 0000000000000046 ffff88017fd00000 ffff88017b126d80 [62278.181048] ffff880177f83fd8 ffff880065a71b60 0000000000013340 ffff880065a71b60 [62278.181065] 0000000000000286 0000000103b1c199 0000000000001388 0000000000000002 [62278.181081] Call Trace: [62278.181092] [] ? console_conditional_schedule+0x2c/0x2c [62278.181105] [] schedule+0x69/0x6b [62278.181117] [] schedule_timeout+0xe3/0x11d [62278.181133] [] ? trace_timer_start+0x51/0x51 [62278.181146] [] do_wait_for_common+0x12f/0x16c [62278.181162] [] ? wake_up_process+0x39/0x39 [62278.181174] [] wait_for_common+0x52/0x6d [62278.181187] [] wait_for_completion_timeout+0x13/0x15 [62278.181201] [] usb_start_wait_urb+0x93/0xf1 [62278.181214] [] usb_control_msg+0xe1/0x11d [62278.181230] [] usbnet_write_cmd+0x9c/0xc6 [usbnet] [62278.181286] [] asix_write_cmd+0x4e/0x7e [asix] [62278.181300] [] asix_set_sw_mii+0x25/0x4e [asix] [62278.181314] [] asix_mdio_read+0x51/0x109 [asix] ... Signed-off-by: Guenter Roeck Signed-off-by: David S. Miller --- drivers/net/usb/asix_common.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index f79eb12c326a..125cff57c759 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) mutex_lock(&dev->phy_mutex); do { ret = asix_set_sw_mii(dev, 0); - if (ret == -ENODEV) + if (ret == -ENODEV || ret == -ETIMEDOUT) break; usleep_range(1000, 1100); ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &smsr, 0); } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); - if (ret == -ENODEV) { + if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; } @@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) mutex_lock(&dev->phy_mutex); do { ret = asix_set_sw_mii(dev, 1); - if (ret == -ENODEV) + if (ret == -ENODEV || ret == -ETIMEDOUT) break; usleep_range(1000, 1100); ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &smsr, 1); } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); - if (ret == -ENODEV) { + if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; } -- cgit v1.2.3-59-g8ed1b From e7cb08e894a0b876443ef8fdb0706575dc00a5d2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 14 Oct 2016 16:18:39 -0400 Subject: scsi: zfcp: spin_lock_irqsave() is not nestable We accidentally overwrite the original saved value of "flags" so that we can't re-enable IRQs at the end of the function. Presumably this function is mostly called with IRQs disabled or it would be obvious in testing. Fixes: aceeffbb59bb ("zfcp: trace full payload of all SAN records (req,resp,iels)") Cc: #2.6.38+ Signed-off-by: Dan Carpenter Signed-off-by: Steffen Maier Signed-off-by: Martin K. Petersen --- drivers/s390/scsi/zfcp_dbf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 637cf8973c9e..581001989937 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, /* if (len > rec_len): * dump data up to cap_len ignoring small duplicate in rec->payload */ - spin_lock_irqsave(&dbf->pay_lock, flags); + spin_lock(&dbf->pay_lock); memset(payload, 0, sizeof(*payload)); memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); payload->fsf_req_id = req_id; -- cgit v1.2.3-59-g8ed1b From 8a4236a2c7868768943a24dc7b1e2ff495836880 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 13 Oct 2016 14:45:24 -0500 Subject: scsi: ipr: Fix async error WARN_ON Commit afc3f83cb4a5 ("scsi: ipr: Add asynchronous error notification") introduced the warn on shown below. To fix this, rather than attempting to send the KOBJ_CHANGE uevent from interrupt context, which is what is causing the WARN_ON, just wake the ipr worker thread which will send a KOBJ_CHANGE uevent. [ 142.278120] WARNING: CPU: 15 PID: 0 at kernel/softirq.c:161 __local_bh_enable_ip+0x7c/0xd0 [ 142.278124] Modules linked in: ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw iptable_filter ses enclosure scsi_transport_sas sg pseries_rng nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c sr_mod sd_mod cdrom ipr libata ibmvscsi scsi_transport_srp ibmveth dm_mirror dm_region_hash dm_log dm_mod [ 142.278208] CPU: 15 PID: 0 Comm: swapper/15 Not tainted 4.8.0.ipr+ #21 [ 142.278213] task: c00000010cf24480 task.stack: c00000010cfec000 [ 142.278217] NIP: c0000000000c0c7c LR: c000000000881778 CTR: c0000000003c5bf0 [ 142.278221] REGS: c00000010cfef080 TRAP: 0700 Not tainted (4.8.0.ipr+) [ 142.278224] MSR: 8000000000029033 CR: 28008022 XER: 2000000f [ 142.278236] CFAR: c0000000000c0c20 SOFTE: 0 GPR00: c000000000706c78 c00000010cfef300 c000000000f91d00 c000000000706c78 GPR04: 0000000000000200 c000000000f7bc80 0000000000000000 00000000024000c0 GPR08: 0000000000000000 0000000000000001 c000000000ee1d00 c000000000a9bdd0 GPR12: c0000000003c5bf0 c00000000eb22d00 c000000100ca3880 c00000020ed38400 GPR16: 0000000000000000 0000000000000000 c000000100940508 0000000000000000 GPR20: 0000000000000000 0000000000000000 0000000000000000 00000000024000c0 GPR24: c0000000004588e0 c00000010863bd00 c00000010863bd00 c0000000013773f8 GPR28: c000000000f7bc80 0000000000000000 ffffffffffffffff c000000000f7bcd8 [ 142.278290] NIP [c0000000000c0c7c] __local_bh_enable_ip+0x7c/0xd0 [ 142.278296] LR [c000000000881778] _raw_spin_unlock_bh+0x38/0x60 [ 142.278299] Call Trace: [ 142.278303] [c00000010cfef300] [c000000000f7bc80] init_net+0x0/0x1900 (unreliable) [ 142.278310] [c00000010cfef320] [c000000000706c78] peernet2id+0x58/0x80 [ 142.278316] [c00000010cfef370] [c00000000075caec] netlink_broadcast_filtered+0x30c/0x550 [ 142.278323] [c00000010cfef430] [c000000000459078] kobject_uevent_env+0x588/0x780 [ 142.278331] [c00000010cfef510] [d000000003163a6c] ipr_process_error+0x11c/0x240 [ipr] [ 142.278337] [c00000010cfef5c0] [d000000003152298] ipr_fail_all_ops+0x108/0x220 [ipr] [ 142.278343] [c00000010cfef670] [d0000000031643f8] ipr_reset_restore_cfg_space+0xa8/0x240 [ipr] [ 142.278350] [c00000010cfef6f0] [d000000003158a00] ipr_reset_ioa_job+0x80/0xe0 [ipr] [ 142.278356] [c00000010cfef720] [d000000003153f78] ipr_reset_timer_done+0xa8/0xe0 [ipr] [ 142.278363] [c00000010cfef770] [c000000000149c88] call_timer_fn+0x58/0x1c0 [ 142.278368] [c00000010cfef800] [c000000000149f60] expire_timers+0x140/0x200 [ 142.278373] [c00000010cfef870] [c00000000014a0e8] run_timer_softirq+0xc8/0x230 [ 142.278379] [c00000010cfef900] [c0000000000c0844] __do_softirq+0x164/0x3c0 [ 142.278384] [c00000010cfef9f0] [c0000000000c0f18] irq_exit+0x1a8/0x1c0 [ 142.278389] [c00000010cfefa20] [c000000000020b54] timer_interrupt+0xa4/0xe0 [ 142.278394] [c00000010cfefa50] [c000000000002414] decrementer_common+0x114/0x180 Signed-off-by: Brian King Signed-off-by: Martin K. Petersen --- drivers/scsi/ipr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index a8762a3efeef..532474109624 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 fd_ioasc; - char *envp[] = { "ASYNC_ERR_LOG=1", NULL }; if (ioa_cfg->sis64) fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); @@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) } list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); + schedule_work(&ioa_cfg->work_q); hostrcb = ipr_get_free_hostrcb(ioa_cfg); - kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp); ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); } -- cgit v1.2.3-59-g8ed1b From 87c0fded852ae20bddb7833da6ead082404de86a Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 29 Sep 2016 13:41:05 +0200 Subject: rbd: don't wait for the lock forever if blacklisted -EBLACKLISTED from __rbd_register_watch() means that our ceph_client got blacklisted - we won't be able to restore the watch and reacquire the lock. Wake up and fail all outstanding requests waiting for the lock and arrange for all new requests that require the lock to fail immediately. Signed-off-by: Ilya Dryomov Tested-by: Mike Christie --- drivers/block/rbd.c | 50 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index abb71628ab61..633e8c2ea120 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -415,15 +415,15 @@ struct rbd_device { }; /* - * Flag bits for rbd_dev->flags. If atomicity is required, - * rbd_dev->lock is used to protect access. - * - * Currently, only the "removing" flag (which is coupled with the - * "open_count" field) requires atomic access. + * Flag bits for rbd_dev->flags: + * - REMOVING (which is coupled with rbd_dev->open_count) is protected + * by rbd_dev->lock + * - BLACKLISTED is protected by rbd_dev->lock_rwsem */ enum rbd_dev_flags { RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ + RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */ }; static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ @@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work) struct rbd_device *rbd_dev = container_of(to_delayed_work(work), struct rbd_device, watch_dwork); bool was_lock_owner = false; + bool need_to_wake = false; int ret; dout("%s rbd_dev %p\n", __func__, rbd_dev); @@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work) was_lock_owner = rbd_release_lock(rbd_dev); mutex_lock(&rbd_dev->watch_mutex); - if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) - goto fail_unlock; + if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { + mutex_unlock(&rbd_dev->watch_mutex); + goto out; + } ret = __rbd_register_watch(rbd_dev); if (ret) { rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); - if (ret != -EBLACKLISTED) + if (ret == -EBLACKLISTED) { + set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); + need_to_wake = true; + } else { queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, RBD_RETRY_DELAY); - goto fail_unlock; + } + mutex_unlock(&rbd_dev->watch_mutex); + goto out; } + need_to_wake = true; rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; mutex_unlock(&rbd_dev->watch_mutex); @@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work) ret); } +out: up_write(&rbd_dev->lock_rwsem); - wake_requests(rbd_dev, true); - return; - -fail_unlock: - mutex_unlock(&rbd_dev->watch_mutex); - up_write(&rbd_dev->lock_rwsem); + if (need_to_wake) + wake_requests(rbd_dev, true); } /* @@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) up_read(&rbd_dev->lock_rwsem); schedule(); down_read(&rbd_dev->lock_rwsem); - } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); + } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && + !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + finish_wait(&rbd_dev->lock_waitq, &wait); } @@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work) if (must_be_locked) { down_read(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) + if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && + !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) rbd_wait_state_locked(rbd_dev); + + WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^ + !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + result = -EBLACKLISTED; + goto err_unlock; + } } img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, -- cgit v1.2.3-59-g8ed1b From 4d73644bc3d76dd161a84e3849c6f2c9c01c4ba7 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 29 Sep 2016 14:23:12 +0200 Subject: rbd: don't retry watch reregistration if header object is gone If the header object gets deleted (perhaps along with the entire pool), there is no point in attempting to reregister the watch. Treat this the same as blacklisting: fail all pending and new I/Os requiring the lock. Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 633e8c2ea120..7b274ff4632c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3944,7 +3944,7 @@ static void rbd_reregister_watch(struct work_struct *work) ret = __rbd_register_watch(rbd_dev); if (ret) { rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); - if (ret == -EBLACKLISTED) { + if (ret == -EBLACKLISTED || ret == -ENOENT) { set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); need_to_wake = true; } else { -- cgit v1.2.3-59-g8ed1b From f0076436136751359e0886f3302a2a0b3a28ba6e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 14 Oct 2016 14:40:33 +0100 Subject: r8169: set coherent DMA mask as well as streaming DMA mask PCI devices that are 64-bit DMA capable should set the coherent DMA mask as well as the streaming DMA mask. On some architectures, these are managed separately, and so the coherent DMA mask will be left at its default value of 32 if it is not set explicitly. This results in errors such as r8169 Gigabit Ethernet driver 2.3LK-NAPI loaded hwdev DMA mask = 0x00000000ffffffff, dev_addr = 0x00000080fbfff000 swiotlb: coherent allocation failed for device 0000:02:00.0 size=4096 CPU: 0 PID: 1062 Comm: systemd-udevd Not tainted 4.8.0+ #35 Hardware name: AMD Seattle/Seattle, BIOS 10:53:24 Oct 13 2016 on systems without memory that is 32-bit addressable by PCI devices. Signed-off-by: Ard Biesheuvel Acked-by: Francois Romieu Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e55638c7505a..bf000d819a21 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if ((sizeof(dma_addr_t) > 4) && (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) -- cgit v1.2.3-59-g8ed1b From 93966b715b32a783a1641f5a385901bbfab04733 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 14 Oct 2016 14:14:35 -0500 Subject: net: qcom/emac: disable interrupts before calling phy_disconnect There is a race condition that can occur if EMAC interrupts are enabled when phy_disconnect() is called. phy_disconnect() sets adjust_link to NULL. When an interrupt occurs, the ISR might call phy_mac_interrupt(), which wakes up the workqueue function phy_state_machine(). This function might reference adjust_link, thereby causing a null pointer exception. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index e97968ed4b8f..6fb3bee904d3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt) napi_disable(&adpt->rx_q.napi); phy_stop(adpt->phydev); - phy_disconnect(adpt->phydev); - /* disable mac irq */ + /* Interrupts must be disabled before the PHY is disconnected, to + * avoid a race condition where adjust_link is null when we get + * an interrupt. + */ writel(DIS_INT, adpt->base + EMAC_INT_STATUS); writel(0, adpt->base + EMAC_INT_MASK); synchronize_irq(adpt->irq.irq); free_irq(adpt->irq.irq, &adpt->irq); + phy_disconnect(adpt->phydev); + emac_mac_reset(adpt); emac_tx_q_descs_free(adpt); -- cgit v1.2.3-59-g8ed1b From 50756ebecf69276b8a908409fa32c123bb12420b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 14 Oct 2016 22:26:11 +0300 Subject: stmmac: fix an error code in stmmac_ptp_register() PTR_ERR(NULL) is success. We have to preserve the error code earlier. Fixes: 7086605a6ab5 ("stmmac: fix error check when init ptp") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 289d52725a6c..5d61fb27219a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -185,8 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv) priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); if (IS_ERR(priv->ptp_clock)) { + int ret = PTR_ERR(priv->ptp_clock); + priv->ptp_clock = NULL; - return PTR_ERR(priv->ptp_clock); + return ret; } spin_lock_init(&priv->ptp_lock); -- cgit v1.2.3-59-g8ed1b From fb5c6cfaec126d9a96b9dd471d4711bf4c737a6f Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 15 Oct 2016 00:01:20 +0300 Subject: vmxnet3: avoid assumption about invalid dma_pa in vmxnet3_set_mc() vmxnet3_set_mc() checks new_table_pa returned by dma_map_single() with dma_mapping_error(), but even there it assumes zero is invalid pa (it assumes dma_mapping_error(...,0) returns true if new_table is NULL). The patch adds an explicit variable to track status of new_table_pa. Found by Linux Driver Verification project (linuxtesting.org). v2: use "bool" and "true"/"false" for boolean variables. Signed-off-by: Alexey Khoroshilov Signed-off-by: David S. Miller --- drivers/net/vmxnet3/vmxnet3_drv.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b5554f2ebee4..ef83ae3b0a44 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev) &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; dma_addr_t new_table_pa = 0; + bool new_table_pa_valid = false; u32 new_mode = VMXNET3_RXM_UCAST; if (netdev->flags & IFF_PROMISC) { @@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev) new_table, sz, PCI_DMA_TODEVICE); + if (!dma_mapping_error(&adapter->pdev->dev, + new_table_pa)) { + new_mode |= VMXNET3_RXM_MCAST; + new_table_pa_valid = true; + rxConf->mfTablePA = cpu_to_le64( + new_table_pa); + } } - - if (!dma_mapping_error(&adapter->pdev->dev, - new_table_pa)) { - new_mode |= VMXNET3_RXM_MCAST; - rxConf->mfTablePA = cpu_to_le64(new_table_pa); - } else { + if (!new_table_pa_valid) { netdev_info(netdev, "failed to copy mcast list, setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; @@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); - if (new_table_pa) + if (new_table_pa_valid) dma_unmap_single(&adapter->pdev->dev, new_table_pa, rxConf->mfTableLen, PCI_DMA_TODEVICE); kfree(new_table); -- cgit v1.2.3-59-g8ed1b From fa860a1751e388385a7f249dd3f24a6c76db0ba9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 13 Oct 2016 16:13:44 +0200 Subject: drm: Print device information again in debugfs I was a bit over-eager in my cleanup in commit 95c081c17f284de50eaca60d4d55643a64d39019 Author: Daniel Vetter Date: Tue Jun 21 10:54:12 2016 +0200 drm: Move master pointer from drm_minor to drm_device Noticed by Chris Wilson. Fixes: 95c081c17f28 ("drm: Move master pointer from drm_minor to drm_device") Cc: Chris Wilson Cc: Chris Wilson Cc: Daniel Vetter Cc: Emil Velikov Cc: Julia Lawall Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Tested-by: Chris Wilson Reviewed-by: Emil Velikov Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_info.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 1df2d33d0b40..ffb2ab389d1d 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data) mutex_lock(&dev->master_mutex); master = dev->master; - if (!master) - goto out_unlock; - seq_printf(m, "%s", dev->driver->name); if (dev->dev) seq_printf(m, " dev=%s", dev_name(dev->dev)); @@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data) if (dev->unique) seq_printf(m, " unique=%s", dev->unique); seq_printf(m, "\n"); -out_unlock: mutex_unlock(&dev->master_mutex); return 0; -- cgit v1.2.3-59-g8ed1b From 039bea844016ae85eaf195bf25266b5eb028a319 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 12 Oct 2016 08:02:21 +0530 Subject: Staging: greybus: gpio: Use gbphy_dev->dev instead of bundle->dev Some of the print messages are using the incorrect device pointer, fix them. Signed-off-by: Viresh Kumar Acked-by: Johan Hovold Reviewed-by: Rui Miguel Silva Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/gpio.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 5e06e4229e42..250caa00de5e 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev, ret = gb_gpio_irqchip_add(gpio, irqc, 0, handle_level_irq, IRQ_TYPE_NONE); if (ret) { - dev_err(&connection->bundle->dev, - "failed to add irq chip: %d\n", ret); + dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret); goto exit_line_free; } ret = gpiochip_add(gpio); if (ret) { - dev_err(&connection->bundle->dev, - "failed to add gpio chip: %d\n", ret); + dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret); goto exit_gpio_irqchip_remove; } -- cgit v1.2.3-59-g8ed1b From 4fa589126f23243bd998a464cb6158d343eb6a89 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 12 Oct 2016 08:02:22 +0530 Subject: Staging: greybus: uart: Use gbphy_dev->dev instead of bundle->dev Some of the print messages are using the incorrect device pointer, fix them. Signed-off-by: Viresh Kumar Acked-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/uart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index 5ee7954bd9f9..2633d2bfb1b4 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, minor = alloc_minor(gb_tty); if (minor < 0) { if (minor == -ENOSPC) { - dev_err(&connection->bundle->dev, + dev_err(&gbphy_dev->dev, "no more free minor numbers\n"); retval = -ENODEV; } else { -- cgit v1.2.3-59-g8ed1b From 0047b6e5f1b45b391244d78097631eb09a960202 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Oct 2016 09:20:22 +0300 Subject: staging: android/ion: testing the wrong variable We're testing "pdev" but we intended to test "heap_pdev". This is a static checker fix and it's unlikely that anyone is affected by this bug. Fixes: 13439479c7de ('staging: ion: Add files for parsing the devicetree') Signed-off-by: Dan Carpenter Acked-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ion/ion_of.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c index 15bac92b7f04..46b2bb99bfd6 100644 --- a/drivers/staging/android/ion/ion_of.c +++ b/drivers/staging/android/ion/ion_of.c @@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev, heap_pdev = of_platform_device_create(node, heaps[i].name, &pdev->dev); - if (!pdev) + if (!heap_pdev) return ERR_PTR(-ENOMEM); heap_pdev->dev.platform_data = &heaps[i]; -- cgit v1.2.3-59-g8ed1b From 1d4f1d53e1e2d5e38f4d3ca3bf60f8be5025540f Mon Sep 17 00:00:00 2001 From: Aditya Shankar Date: Fri, 7 Oct 2016 09:45:03 +0530 Subject: Staging: wilc1000: Fix kernel Oops on opening the device Commit 2518ac59eb27 ("staging: wilc1000: Replace kthread with workqueue for host interface") adds an unconditional destroy_workqueue() on the wilc's "hif_workqueue" soon after its creation thereby rendering it unusable. It then further attempts to queue work onto this non-existing hif_worqueue and results in: Unable to handle kernel NULL pointer dereference at virtual address 00000010 pgd = de478000 [00000010] *pgd=3eec0831, *pte=00000000, *ppte=00000000 Internal error: Oops: 17 [#1] ARM Modules linked in: wilc1000_sdio(C) wilc1000(C) CPU: 0 PID: 825 Comm: ifconfig Tainted: G C 4.8.0-rc8+ #37 Hardware name: Atmel SAMA5 task: df56f800 task.stack: deeb0000 PC is at __queue_work+0x90/0x284 LR is at __queue_work+0x58/0x284 pc : [] lr : [] psr: 600f0093 sp : deeb1aa0 ip : def22d78 fp : deea6000 r10: 00000000 r9 : c0a08150 r8 : c0a2f058 r7 : 00000001 r6 : dee9b600 r5 : def22d74 r4 : 00000000 r3 : 00000000 r2 : def22d74 r1 : 07ffffff r0 : 00000000 Flags: nZCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment none ... [] (__queue_work) from [] (queue_work_on+0x34/0x40) [] (queue_work_on) from [] (wilc_enqueue_cmd+0x54/0x64 [wilc1000]) [] (wilc_enqueue_cmd [wilc1000]) from [] (wilc_set_wfi_drv_handler+0x48/0x70 [wilc1000]) [] (wilc_set_wfi_drv_handler [wilc1000]) from [] (wilc_mac_open+0x214/0x250 [wilc1000]) [] (wilc_mac_open [wilc1000]) from [] (__dev_open+0xb8/0x11c) [] (__dev_open) from [] (__dev_change_flags+0x94/0x158) [] (__dev_change_flags) from [] (dev_change_flags+0x18/0x48) [] (dev_change_flags) from [] (devinet_ioctl+0x6b4/0x788) [] (devinet_ioctl) from [] (sock_ioctl+0x154/0x2cc) [] (sock_ioctl) from [] (do_vfs_ioctl+0x9c/0x878) [] (do_vfs_ioctl) from [] (SyS_ioctl+0x34/0x5c) [] (SyS_ioctl) from [] (ret_fast_syscall+0x0/0x3c) Code: e5932004 e1520006 01a04003 0affffff (e5943010) ---[ end trace b612328adaa6bf20 ]--- This fix removes the unnecessary call to destroy_workqueue() while opening the device to avoid the above kernel panic. The deinit routine already does a good job of terminating the workqueue when no longer needed. Reported-by: Nicolas Ferre Fixes: 2518ac59eb27 ("staging: wilc1000: Replace kthread with workqueue for host interface") Cc: stable@vger.kernel.org # 4.8+ Signed-off-by: Aditya Shankar Signed-off-by: Greg Kroah-Hartman --- drivers/staging/wilc1000/host_interface.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 78f5613e9467..6ab7443eabde 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) clients_count++; - destroy_workqueue(hif_workqueue); _fail_: return result; } -- cgit v1.2.3-59-g8ed1b From c89d98e224b4858f42a9fec0f16766b3d7669ba3 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Sun, 16 Oct 2016 13:16:50 -0400 Subject: staging/lustre/llite: Move unstable_stats from sysfs to debugfs It's multiple values per file, so it has no business being in sysfs, besides it was assuming seqfile anyway. Fixes: d806f30e639b ("staging: lustre: osc: revise unstable pages accounting") Signed-off-by: Oleg Drokin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/lustre/lustre/llite/lproc_llite.c | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index 6eae60595905..23fda9d98bff 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj, } LUSTRE_RW_ATTR(xattr_cache); -static ssize_t unstable_stats_show(struct kobject *kobj, - struct attribute *attr, - char *buf) +static int ll_unstable_stats_seq_show(struct seq_file *m, void *v) { - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); + struct super_block *sb = m->private; + struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = sbi->ll_cache; long pages; int mb; @@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj, pages = atomic_long_read(&cache->ccc_unstable_nr); mb = (pages * PAGE_SIZE) >> 20; - return sprintf(buf, "unstable_check: %8d\n" - "unstable_pages: %12ld\n" - "unstable_mb: %8d\n", - cache->ccc_unstable_check, pages, mb); + seq_printf(m, + "unstable_check: %8d\n" + "unstable_pages: %12ld\n" + "unstable_mb: %8d\n", + cache->ccc_unstable_check, pages, mb); + + return 0; } -static ssize_t unstable_stats_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t count) +static ssize_t ll_unstable_stats_seq_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *off) { - struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, - ll_kobj); + struct super_block *sb = ((struct seq_file *)file->private_data)->private; + struct ll_sb_info *sbi = ll_s2sbi(sb); char kernbuf[128]; int val, rc; @@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj, return count; } -LUSTRE_RW_ATTR(unstable_stats); +LPROC_SEQ_FOPS(ll_unstable_stats); static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = { /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */ { "max_cached_mb", &ll_max_cached_mb_fops, NULL }, { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 }, + { "unstable_stats", &ll_unstable_stats_fops, NULL }, { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 }, { .name = "nosquash_nids", .fops = &ll_nosquash_nids_fops }, @@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = { &lustre_attr_max_easize.attr, &lustre_attr_default_easize.attr, &lustre_attr_xattr_cache.attr, - &lustre_attr_unstable_stats.attr, &lustre_attr_root_squash.attr, NULL, }; -- cgit v1.2.3-59-g8ed1b From bbe097f092b0d13e9736bd2794d0ab24547d0e5d Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 15 Sep 2016 17:07:22 +0200 Subject: usb: gadget: udc: atmel: fix endpoint name Since commit c32b5bcfa3c4 ("ARM: dts: at91: Fix USB endpoint nodes"), atmel_usba_udc fails with: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 0 at include/linux/usb/gadget.h:405 ecm_do_notify+0x188/0x1a0 Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 4.7.0+ #15 Hardware name: Atmel SAMA5 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (__warn+0xe4/0xfc) [] (__warn) from [] (warn_slowpath_null+0x20/0x28) [] (warn_slowpath_null) from [] (ecm_do_notify+0x188/0x1a0) [] (ecm_do_notify) from [] (ecm_set_alt+0x74/0x1ac) [] (ecm_set_alt) from [] (composite_setup+0xfc0/0x19f8) [] (composite_setup) from [] (usba_udc_irq+0x8f4/0xd9c) [] (usba_udc_irq) from [] (handle_irq_event_percpu+0x9c/0x158) [] (handle_irq_event_percpu) from [] (handle_irq_event+0x28/0x3c) [] (handle_irq_event) from [] (handle_fasteoi_irq+0xa0/0x168) [] (handle_fasteoi_irq) from [] (generic_handle_irq+0x24/0x34) [] (generic_handle_irq) from [] (__handle_domain_irq+0x54/0xa8) [] (__handle_domain_irq) from [] (__irq_svc+0x54/0x70) [] (__irq_svc) from [] (arch_cpu_idle+0x38/0x3c) [] (arch_cpu_idle) from [] (cpu_startup_entry+0x9c/0xdc) [] (cpu_startup_entry) from [] (start_kernel+0x354/0x360) [] (start_kernel) from [<20008078>] (0x20008078) ---[ end trace e7cf9dcebf4815a6 ]--- Fixes: c32b5bcfa3c4 ("ARM: dts: at91: Fix USB endpoint nodes") Cc: Reported-by: Richard Genoud Acked-by: Nicolas Ferre Signed-off-by: Alexandre Belloni Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/atmel_usba_udc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index bb1f6c8f0f01..45bc997d0711 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); goto err; } - ep->ep.name = name; + ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); ep->ep_regs = udc->regs + USBA_EPT_BASE(i); ep->dma_regs = udc->regs + USBA_DMA_BASE(i); -- cgit v1.2.3-59-g8ed1b From 6c83f77278f17a7679001027e9231291c20f0d8a Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 4 Oct 2016 15:14:43 +0300 Subject: usb: gadget: function: u_ether: don't starve tx request queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we don't guarantee that we will always get an interrupt at least when we're queueing our very last request, we could fall into situation where we queue every request with 'no_interrupt' set. This will cause the link to get stuck. The behavior above has been triggered with g_ether and dwc3. Cc: Reported-by: Ville Syrjälä Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/u_ether.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 9c8c9ed1dc9e..fe1811650dbc 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -590,8 +590,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, /* throttle high/super speed IRQ rate back slightly */ if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || - dev->gadget->speed == USB_SPEED_SUPER) + req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || + dev->gadget->speed == USB_SPEED_SUPER)) && + !list_empty(&dev->tx_reqs)) ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) : 0; -- cgit v1.2.3-59-g8ed1b From a9c3ca5fae6bf73770f0576eaf57d5f1305ef4b3 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 5 Oct 2016 14:24:37 +0300 Subject: usb: dwc3: gadget: properly account queued requests Some requests could be accounted for multiple times. Let's fix that so each and every requests is accounted for only once. Cc: # v4.8 Fixes: 55a0237f8f47 ("usb: dwc3: gadget: use allocated/queued reqs for LST bit") Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/gadget.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 07cc8929f271..3c3ced128c77 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->trb = trb; req->trb_dma = dwc3_trb_dma_offset(dep, trb); req->first_trb_index = dep->trb_enqueue; + dep->queued_requests++; } dwc3_ep_inc_enq(dep); @@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, trb->ctrl |= DWC3_TRB_CTRL_HWO; - dep->queued_requests++; - trace_dwc3_prepare_trb(dep, trb); } @@ -1861,8 +1860,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, unsigned int s_pkt = 0; unsigned int trb_status; - dep->queued_requests--; dwc3_ep_inc_deq(dep); + + if (req->trb == trb) + dep->queued_requests--; + trace_dwc3_complete_trb(dep, trb); /* -- cgit v1.2.3-59-g8ed1b From d889c23ce4e3159e3d737f55f9d686a030a7ba87 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 29 Sep 2016 15:44:29 +0300 Subject: usb: dwc3: gadget: never pre-start Isochronous endpoints We cannot pre-start isochronous endpoints because we rely on the micro-frame number passed via XferNotReady command for proper Isochronous scheduling. Fixes: 08a36b543803 ("usb: dwc3: gadget: simplify __dwc3_gadget_ep_queue()") Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/gadget.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 3c3ced128c77..f15147f79d14 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1073,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) list_add_tail(&req->list, &dep->pending_list); - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && - dep->flags & DWC3_EP_PENDING_REQUEST) { - if (list_empty(&dep->started_list)) { + /* + * NOTICE: Isochronous endpoints should NEVER be prestarted. We must + * wait for a XferNotReady event so we will know what's the current + * (micro-)frame number. + * + * Without this trick, we are very, very likely gonna get Bus Expiry + * errors which will force us issue EndTransfer command. + */ + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { + if ((dep->flags & DWC3_EP_PENDING_REQUEST) && + list_empty(&dep->started_list)) { dwc3_stop_active_transfer(dwc, dep->number, true); dep->flags = DWC3_EP_ENABLED; } -- cgit v1.2.3-59-g8ed1b From 0f02c4e749bc79975dd23ddcc9bfe38bf76afb67 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 9 Sep 2016 16:28:43 +0200 Subject: s390/dasd: avoid undefined behaviour the mdc value can be quite big (like 65535), so we are in undefined territory when doing the multiplication with the (also signed) FCX_MAX_DATA_FACTOR as outlined by UBSAN: UBSAN: Undefined behaviour in drivers/s390/block/dasd_eckd.c:1678:14 signed integer overflow: 65535 * 65536 cannot be represented in type 'int' CPU: 5 PID: 183 Comm: kworker/u512:1 Not tainted 4.7.0+ #150 Workqueue: events_unbound async_run_entry_fn 000000fb8b59f900 000000fb8b59f990 0000000000000002 0000000000000000 000000fb8b59fa30 000000fb8b59f9a8 000000fb8b59f9a8 000000000011732e 00000000000000a4 0000000000a309e2 0000000000a4c072 000000000000000b 000000fb8b59f9f0 000000fb8b59f990 0000000000000000 0000000000000000 0400000000d83238 000000000011732e 000000fb8b59f990 000000fb8b59f9f0 Call Trace: ([<0000000000117260>] show_trace+0x98/0xa8) ([<00000000001172e0>] show_stack+0x70/0xf0) ([<000000000053ac96>] dump_stack+0x86/0xb8) ([<000000000057f5f8>] ubsan_epilogue+0x28/0x70) ([<000000000057fe9e>] handle_overflow+0xde/0xf0) ([<00000000006c322a>] dasd_eckd_check_characteristics+0x50a/0x550) ([<00000000006b42ca>] dasd_generic_set_online+0xba/0x380) ([<0000000000693d82>] ccw_device_set_online+0x192/0x550) ([<00000000006ac1ae>] dasd_generic_auto_online+0x2e/0x70) ([<0000000000172130>] async_run_entry_fn+0x70/0x270) ([<0000000000165a72>] process_one_work+0x26a/0x638) ([<0000000000165e8a>] worker_thread+0x4a/0x658) ([<000000000016dd9c>] kthread+0x10c/0x110) ([<00000000008963ae>] kernel_thread_starter+0x6/0xc) ([<00000000008963a8>] kernel_thread_starter+0x0/0xc) As this is a runtime value there is actually no risk of any sane compiler to detect and (ab)use this undefinedness, but let's make the multiplication defined by making mdc unsigned. Signed-off-by: Christian Borntraeger Acked-by: Stefan Haberland Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd_eckd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 831935af7389..a7a88476e215 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) mdc, lpm); return mdc; } - fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; + fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; if (fcx_max_data < private->fcx_max_data) { dev_warn(&device->cdev->dev, "The maximum data size for zHPF requests %u " @@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) " data size for zHPF requests failed\n"); return 0; } else - return mdc * FCX_MAX_DATA_FACTOR; + return (u32)mdc * FCX_MAX_DATA_FACTOR; } /* -- cgit v1.2.3-59-g8ed1b From 179a98cba11b057d9f1cc70cd2a8831f9e9a06e6 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Wed, 12 Oct 2016 11:14:31 +0200 Subject: s390/cio: don't register chpids in reserved state During IPL we register all chpids that are not in the unrecognized state. This includes chpids that are not usable and chpids for which the state could not be obtained. Change that to only register chpids in the configured (usable) or standby (usable after a configure operation) state. All other chpids could only be made available by external control for which we would receive machine checks. Signed-off-by: Sebastian Ott Reviewed-by: Peter Oberparleiter Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/chp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 46be25c7461e..876c7e6e3a99 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -780,7 +780,7 @@ static int cfg_wait_idle(void) static int __init chp_init(void) { struct chp_id chpid; - int ret; + int state, ret; ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); if (ret) @@ -791,7 +791,9 @@ static int __init chp_init(void) return 0; /* Register available channel-paths. */ chp_id_for_each(&chpid) { - if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) + state = chp_info_get_status(chpid); + if (state == CHP_STATUS_CONFIGURED || + state == CHP_STATUS_STANDBY) chp_new(chpid); } -- cgit v1.2.3-59-g8ed1b From a07ce8d34eb3d9c6cec3aa25f7713e6aafad2260 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Fri, 14 Oct 2016 10:47:24 -0700 Subject: usb: dwc2: Add msleep for host-only Although a host-only controller should not have any associated delay, some rockchip SOC platforms will not show the correct host-values of registers until after a delay. So add a 50 ms sleep when in host-only mode. Signed-off-by: John Youn Signed-off-by: Heiko Stuebner Signed-off-by: Felipe Balbi --- drivers/usb/dwc2/core.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index fa9b26b91507..4c0fa0b17353 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) */ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg) { + bool ret; + switch (hsotg->dr_mode) { case USB_DR_MODE_HOST: - dwc2_force_mode(hsotg, true); + ret = dwc2_force_mode(hsotg, true); + /* + * NOTE: This is required for some rockchip soc based + * platforms on their host-only dwc2. + */ + if (!ret) + msleep(50); + break; case USB_DR_MODE_PERIPHERAL: dwc2_force_mode(hsotg, false); -- cgit v1.2.3-59-g8ed1b From 454915dde06a51133750c6745f0ba57361ba209d Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Tue, 4 Oct 2016 02:07:33 +0200 Subject: usb: gadget: f_fs: edit epfile->ep under lock epfile->ep is protected by ffs->eps_lock (not epfile->mutex) so clear it while holding the spin lock. Tested-by: John Stultz Tested-by: Chen Yu Signed-off-by: Michal Nazarewicz Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_fs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 54ad100af35b..b31aa9572723 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1725,17 +1725,17 @@ static void ffs_func_eps_disable(struct ffs_function *func) unsigned long flags; do { - if (epfile) - mutex_lock(&epfile->mutex); spin_lock_irqsave(&func->ffs->eps_lock, flags); /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); ++ep; + if (epfile) + epfile->ep = NULL; spin_unlock_irqrestore(&func->ffs->eps_lock, flags); if (epfile) { - epfile->ep = NULL; + mutex_lock(&epfile->mutex); kfree(epfile->read_buffer); epfile->read_buffer = NULL; mutex_unlock(&epfile->mutex); -- cgit v1.2.3-59-g8ed1b From a9e6f83c2df199187a5248f824f31b6787ae23ae Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Tue, 4 Oct 2016 02:07:34 +0200 Subject: usb: gadget: f_fs: stop sleeping in ffs_func_eps_disable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ffs_func_eps_disable is called from atomic context so it cannot sleep thus cannot grab a mutex. Change the handling of epfile->read_buffer to use non-sleeping synchronisation method. Reported-by: Chen Yu Signed-off-by: Michał Nazarewicz Fixes: 9353afbbfa7b ("buffer data from ‘oversized’ OUT requests") Tested-by: John Stultz Tested-by: Chen Yu Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_fs.c | 109 +++++++++++++++++++++++++++++++------ 1 file changed, 93 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index b31aa9572723..e40d47d47d82 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -136,8 +136,60 @@ struct ffs_epfile { /* * Buffer for holding data from partial reads which may happen since * we’re rounding user read requests to a multiple of a max packet size. + * + * The pointer is initialised with NULL value and may be set by + * __ffs_epfile_read_data function to point to a temporary buffer. + * + * In normal operation, calls to __ffs_epfile_read_buffered will consume + * data from said buffer and eventually free it. Importantly, while the + * function is using the buffer, it sets the pointer to NULL. This is + * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered + * can never run concurrently (they are synchronised by epfile->mutex) + * so the latter will not assign a new value to the pointer. + * + * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is + * valid) and sets the pointer to READ_BUFFER_DROP value. This special + * value is crux of the synchronisation between ffs_func_eps_disable and + * __ffs_epfile_read_data. + * + * Once __ffs_epfile_read_data is about to finish it will try to set the + * pointer back to its old value (as described above), but seeing as the + * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free + * the buffer. + * + * == State transitions == + * + * • ptr == NULL: (initial state) + * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP + * ◦ __ffs_epfile_read_buffered: nop + * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf + * ◦ reading finishes: n/a, not in ‘and reading’ state + * • ptr == DROP: + * ◦ __ffs_epfile_read_buffer_free: nop + * ◦ __ffs_epfile_read_buffered: go to ptr == NULL + * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop + * ◦ reading finishes: n/a, not in ‘and reading’ state + * • ptr == buf: + * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP + * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading + * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered + * is always called first + * ◦ reading finishes: n/a, not in ‘and reading’ state + * • ptr == NULL and reading: + * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading + * ◦ __ffs_epfile_read_buffered: n/a, mutex is held + * ◦ __ffs_epfile_read_data: n/a, mutex is held + * ◦ reading finishes and … + * … all data read: free buf, go to ptr == NULL + * … otherwise: go to ptr == buf and reading + * • ptr == DROP and reading: + * ◦ __ffs_epfile_read_buffer_free: nop + * ◦ __ffs_epfile_read_buffered: n/a, mutex is held + * ◦ __ffs_epfile_read_data: n/a, mutex is held + * ◦ reading finishes: free buf, go to ptr == DROP */ - struct ffs_buffer *read_buffer; /* P: epfile->mutex */ + struct ffs_buffer *read_buffer; +#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN)) char name[5]; @@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, schedule_work(&io_data->work); } +static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) +{ + /* + * See comment in struct ffs_epfile for full read_buffer pointer + * synchronisation story. + */ + struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP); + if (buf && buf != READ_BUFFER_DROP) + kfree(buf); +} + /* Assumes epfile->mutex is held. */ static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile, struct iov_iter *iter) { - struct ffs_buffer *buf = epfile->read_buffer; + /* + * Null out epfile->read_buffer so ffs_func_eps_disable does not free + * the buffer while we are using it. See comment in struct ffs_epfile + * for full read_buffer pointer synchronisation story. + */ + struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL); ssize_t ret; - if (!buf) + if (!buf || buf == READ_BUFFER_DROP) return 0; ret = copy_to_iter(buf->data, buf->length, iter); if (buf->length == ret) { kfree(buf); - epfile->read_buffer = NULL; - } else if (unlikely(iov_iter_count(iter))) { + return ret; + } + + if (unlikely(iov_iter_count(iter))) { ret = -EFAULT; } else { buf->length -= ret; buf->data += ret; } + + if (cmpxchg(&epfile->read_buffer, NULL, buf)) + kfree(buf); + return ret; } @@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile, buf->length = data_len; buf->data = buf->storage; memcpy(buf->storage, data + ret, data_len); - epfile->read_buffer = buf; + + /* + * At this point read_buffer is NULL or READ_BUFFER_DROP (if + * ffs_func_eps_disable has been called in the meanwhile). See comment + * in struct ffs_epfile for full read_buffer pointer synchronisation + * story. + */ + if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf))) + kfree(buf); return ret; } @@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file) ENTER(); - kfree(epfile->read_buffer); - epfile->read_buffer = NULL; + __ffs_epfile_read_buffer_free(epfile); ffs_data_closed(epfile->ffs); return 0; @@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func) unsigned count = func->ffs->eps_count; unsigned long flags; + spin_lock_irqsave(&func->ffs->eps_lock, flags); do { - spin_lock_irqsave(&func->ffs->eps_lock, flags); /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); ++ep; - if (epfile) - epfile->ep = NULL; - spin_unlock_irqrestore(&func->ffs->eps_lock, flags); if (epfile) { - mutex_lock(&epfile->mutex); - kfree(epfile->read_buffer); - epfile->read_buffer = NULL; - mutex_unlock(&epfile->mutex); + epfile->ep = NULL; + __ffs_epfile_read_buffer_free(epfile); ++epfile; } } while (--count); + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } static int ffs_func_eps_enable(struct ffs_function *func) -- cgit v1.2.3-59-g8ed1b From 51fbc7c06c8900370c6da5fc4a4685add8fa4fb0 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 7 Oct 2016 22:12:39 +0200 Subject: usb: dwc3: Fix size used in dma_free_coherent() In commit 2abd9d5fa60f9 ("usb: dwc3: ep0: Add chained TRB support"), the size of the memory allocated with 'dma_alloc_coherent()' has been modified but the corresponding calls to 'dma_free_coherent()' have not been updated accordingly. This has been spotted with coccinelle, using the following script: //////////////////// @r@ expression x0, x1, y0, y1, z0, z1, t0, t1, ret; @@ * ret = dma_alloc_coherent(x0, y0, z0, t0); ... * dma_free_coherent(x1, y1, ret, t1); @script:python@ y0 << r.y0; y1 << r.y1; @@ if y1.find(y0) == -1: print "WARNING: sizes look different: '%s' vs '%s'" % (y0, y1) //////////////////// Fixes: 2abd9d5fa60f9 ("usb: dwc3: ep0: Add chained TRB support") Signed-off-by: Christophe JAILLET Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/gadget.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f15147f79d14..1dfa56a5f1c5 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2990,7 +2990,7 @@ err3: kfree(dwc->setup_buf); err2: - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); err1: @@ -3015,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc) kfree(dwc->setup_buf); kfree(dwc->zlp_buf); - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), -- cgit v1.2.3-59-g8ed1b From a19b882c07a63174f09d1f7c036cc2aab7f04ad3 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 6 Oct 2016 10:25:38 -0700 Subject: wusb: Stop using the stack for sg crypto scratch space Pointing an sg list at the stack is verboten and, with CONFIG_VMAP_STACK=y, will malfunction. Use kmalloc for the wusb crypto stack space instead. Untested -- I'm not entirely convinced that this hardware exists in the wild. Signed-off-by: Andy Lutomirski Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/crypto.c | 59 +++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 79b2b628066d..de089f3a82f3 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, bo[itr] = bi1[itr] ^ bi2[itr]; } +/* Scratch space for MAC calculations. */ +struct wusb_mac_scratch { + struct aes_ccm_b0 b0; + struct aes_ccm_b1 b1; + struct aes_ccm_a ax; +}; + /* * CC-MAC function WUSB1.0[6.5] * @@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, * what sg[4] is for. Maybe there is a smarter way to do this. */ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, - struct crypto_cipher *tfm_aes, void *mic, + struct crypto_cipher *tfm_aes, + struct wusb_mac_scratch *scratch, + void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc); - struct aes_ccm_b0 b0; - struct aes_ccm_b1 b1; - struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; void *dst_buf; size_t dst_size; @@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, * These checks should be compile time optimized out * ensure @a fills b1's mac_header and following fields */ - WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); - WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); - WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); - WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); + WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la)); + WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block)); + WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block)); + WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block)); result = -ENOMEM; zero_padding = blen % sizeof(struct aes_ccm_block); if (zero_padding) zero_padding = sizeof(struct aes_ccm_block) - zero_padding; - dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; + dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) + + zero_padding; dst_buf = kzalloc(dst_size, GFP_KERNEL); if (!dst_buf) goto error_dst_buf; @@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, memset(iv, 0, sizeof(iv)); /* Setup B0 */ - b0.flags = 0x59; /* Format B0 */ - b0.ccm_nonce = *n; - b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ + scratch->b0.flags = 0x59; /* Format B0 */ + scratch->b0.ccm_nonce = *n; + scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ /* Setup B1 * @@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, * 14'--after clarification, it means to use A's contents * for MAC Header, EO, sec reserved and padding. */ - b1.la = cpu_to_be16(blen + 14); - memcpy(&b1.mac_header, a, sizeof(*a)); + scratch->b1.la = cpu_to_be16(blen + 14); + memcpy(&scratch->b1.mac_header, a, sizeof(*a)); sg_init_table(sg, ARRAY_SIZE(sg)); - sg_set_buf(&sg[0], &b0, sizeof(b0)); - sg_set_buf(&sg[1], &b1, sizeof(b1)); + sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0)); + sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1)); sg_set_buf(&sg[2], b, blen); /* 0 if well behaved :) */ sg_set_buf(&sg[3], bzero, zero_padding); @@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, * POS Crypto API: size is assumed to be AES's block size. * Thanks for documenting it -- tip taken from airo.c */ - ax.flags = 0x01; /* as per WUSB 1.0 spec */ - ax.ccm_nonce = *n; - ax.counter = 0; - crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); - bytewise_xor(mic, &ax, iv, 8); + scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */ + scratch->ax.ccm_nonce = *n; + scratch->ax.counter = 0; + crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax, + (void *)&scratch->ax); + bytewise_xor(mic, &scratch->ax, iv, 8); result = 8; error_cbc_crypt: kfree(dst_buf); @@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size, struct aes_ccm_nonce n = *_n; struct crypto_skcipher *tfm_cbc; struct crypto_cipher *tfm_aes; + struct wusb_mac_scratch *scratch; u64 sfn = 0; __le64 sfn_le; @@ -329,17 +338,23 @@ ssize_t wusb_prf(void *out, size_t out_size, printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); goto error_setkey_aes; } + scratch = kmalloc(sizeof(*scratch), GFP_KERNEL); + if (!scratch) + goto error_alloc_scratch; for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ - result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, + result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes, &n, a, b, blen); if (result < 0) goto error_ccm_mac; bytes += result; } result = bytes; + + kfree(scratch); +error_alloc_scratch: error_ccm_mac: error_setkey_aes: crypto_free_cipher(tfm_aes); -- cgit v1.2.3-59-g8ed1b From 589ce5f447b8610df0dbd6935b56d5cda17b9cec Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 14 Oct 2016 15:13:07 +0100 Subject: irqchip/gic-v3-its: Fix 64bit GIC{R,ITS}_TYPER accesses The GICv3 architecture specification mentions that a 64bit register can be accessed using two 32bit accesses. What it doesn't mention is that this is only guaranteed on a system that implements AArch32, and a pure AArch64 system is allowed not to support this. This causes issues with the GICR_TYPER and GITS_TYPER registers, which are both RO 64bit registers. In order to solve this, this patch switches the TYPER accesses to the gic_read_typer macro already used in other parts of the driver. This makes sure that we always use a 64bit access on 64bit systems, and two 32bit accesses on 32bit system. Signed-off-by: Marc Zyngier --- drivers/irqchip/irq-gic-v3-its.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 003495d91f9c..c5dee300e8a3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its) static int its_alloc_tables(struct its_node *its) { - u64 typer = readq_relaxed(its->base + GITS_TYPER); + u64 typer = gic_read_typer(its->base + GITS_TYPER); u32 ids = GITS_TYPER_DEVBITS(typer); u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_WaWb; @@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void) * We now have to bind each collection to its target * redistributor. */ - if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { /* * This ITS wants the physical address of the * redistributor. @@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void) /* * This ITS wants a linear CPU number. */ - target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); target = GICR_TYPER_CPU_NUMBER(target) << 16; } @@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res, INIT_LIST_HEAD(&its->its_device_list); its->base = its_base; its->phys_base = res->start; - its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; its->numa_node = numa_node; its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); @@ -1763,7 +1763,7 @@ out_unmap: static bool gic_rdists_supports_plpis(void) { - return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); } int its_cpu_init(void) -- cgit v1.2.3-59-g8ed1b From 15480f3ab7a8b5333e170d7168ce8a1012cc735e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 17 Oct 2016 11:39:32 +0100 Subject: PCI: layerscape: Fix drvdata usage before assignment Commit fefe6733e516 ("PCI: layerscape: Move struct pcie_port setup to probe function") changed the init ordering of the pcie structure, but started to use the pcie->drvdata field before initializing it. Mayhem follows. Fix this by moving the drvdata assignment right before the first use. Tested on LS2085a. Fixes: efe6733e516 ("PCI: layerscape: Move struct pcie_port setup to probe function") Signed-off-by: Marc Zyngier Signed-off-by: Bjorn Helgaas --- drivers/pci/host/pci-layerscape.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 2cb7315e26d0..653707996342 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev) pp = &pcie->pp; pp->dev = dev; + pcie->drvdata = match->data; pp->ops = pcie->drvdata->ops; dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); @@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev) return PTR_ERR(pcie->pp.dbi_base); } - pcie->drvdata = match->data; pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; if (!ls_pcie_is_bridge(pcie)) -- cgit v1.2.3-59-g8ed1b From 02265cd60335a2c1417abae4192611e1fc05a6e5 Mon Sep 17 00:00:00 2001 From: Haibo Chen Date: Mon, 17 Oct 2016 10:18:37 +0200 Subject: mmc: sdhci: cast unsigned int to unsigned long long to avoid unexpeted error Potentially overflowing expression 1000000 * data->timeout_clks with type unsigned int is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type unsigned long long. To avoid overflow, cast 1000000U to type unsigned long long. Special thanks to Coverity. Fixes: 7f05538af71c ("mmc: sdhci: fix data timeout (part 2)") Signed-off-by: Haibo Chen Cc: stable@vger.kernel.org # v3.15+ Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 223a91e039dc..71654b90227f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) * host->clock is in Hz. target_timeout is in us. * Hence, us = 1000000 * cycles / Hz. Round up. */ - val = 1000000 * data->timeout_clks; + val = 1000000ULL * data->timeout_clks; if (do_div(val, host->clock)) target_timeout++; target_timeout += val; -- cgit v1.2.3-59-g8ed1b From 31cf742f515c275d22843c4c756e048d2b6d716c Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 27 Sep 2016 08:44:33 -0700 Subject: mmc: rtsx_usb_sdmmc: Avoid keeping the device runtime resumed when unused The rtsx_usb_sdmmc driver may bail out in its ->set_ios() callback when no SD card is inserted. This is wrong, as it could cause the device to remain runtime resumed when it's unused. Fix this behaviour. Tested-by: Ritesh Raj Sarraf Cc: Cc: Alan Stern Signed-off-by: Ulf Hansson --- drivers/mmc/host/rtsx_usb_sdmmc.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 4106295527b9..e0b85904c9bb 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) dev_dbg(sdmmc_dev(host), "%s\n", __func__); mutex_lock(&ucr->dev_mutex); - if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { - mutex_unlock(&ucr->dev_mutex); - return; - } - sd_set_power_mode(host, ios->power_mode); sd_set_bus_width(host, ios->bus_width); sd_set_timing(host, ios->timing, &host->ddr_mode); -- cgit v1.2.3-59-g8ed1b From 4f48aa7a11bfed9502a7c85a5b68cd40ea827f73 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Thu, 15 Sep 2016 14:46:21 +0200 Subject: mmc: rtsx_usb_sdmmc: Handle runtime PM while changing the led Accesses of the rtsx sdmmc's parent device, which is the rtsx usb device, must be done when it's runtime resumed. Currently this isn't case when changing the led, so let's fix this by adding a pm_runtime_get_sync() and a pm_runtime_put() around those operations. Reported-by: Ritesh Raj Sarraf Tested-by: Ritesh Raj Sarraf Cc: Cc: Alan Stern Signed-off-by: Ulf Hansson --- drivers/mmc/host/rtsx_usb_sdmmc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index e0b85904c9bb..6e9c0f8fddb1 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1309,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work) container_of(work, struct rtsx_usb_sdmmc, led_work); struct rtsx_ucr *ucr = host->ucr; + pm_runtime_get_sync(sdmmc_dev(host)); mutex_lock(&ucr->dev_mutex); if (host->led.brightness == LED_OFF) @@ -1317,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work) rtsx_usb_turn_on_led(ucr); mutex_unlock(&ucr->dev_mutex); + pm_runtime_put(sdmmc_dev(host)); } #endif -- cgit v1.2.3-59-g8ed1b From 796aa46adf1d90eab36ae06a42e6d3f10b28a75c Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 26 Sep 2016 15:45:41 -0700 Subject: memstick: rtsx_usb_ms: Runtime resume the device when polling for cards Accesses to the rtsx usb device, which is the parent of the rtsx memstick device, must not be done unless it's runtime resumed. Therefore when the rtsx_usb_ms driver polls for inserted memstick cards, let's add pm_runtime_get|put*() to make sure accesses is done when the rtsx usb device is runtime resumed. Reported-by: Ritesh Raj Sarraf Tested-by: Ritesh Raj Sarraf Signed-off-by: Alan Stern Cc: Signed-off-by: Ulf Hansson --- drivers/memstick/host/rtsx_usb_ms.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index d34bc3530385..1b994897f0ff 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -681,6 +681,7 @@ static int rtsx_usb_detect_ms_card(void *__host) int err; for (;;) { + pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); /* Check pending MS card changes */ @@ -703,6 +704,7 @@ static int rtsx_usb_detect_ms_card(void *__host) } poll_again: + pm_runtime_put(ms_dev(host)); if (host->eject) break; -- cgit v1.2.3-59-g8ed1b From 9158cb29e7c2f10dd325eb1589f0fe745a271257 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 28 Sep 2016 11:33:28 -0700 Subject: memstick: rtsx_usb_ms: Manage runtime PM when accessing the device Accesses to the rtsx usb device, which is the parent of the rtsx memstick device, must not be done unless it's runtime resumed. This is currently not the case and it could trigger various errors. Fix this by properly deal with runtime PM in this regards. This means making sure the device is runtime resumed, when serving requests via the ->request() callback or changing settings via the ->set_param() callbacks. Cc: Cc: Ritesh Raj Sarraf Cc: Alan Stern Signed-off-by: Ulf Hansson --- drivers/memstick/host/rtsx_usb_ms.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 1b994897f0ff..2e3cf012ef48 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) int rc; if (!host->req) { + pm_runtime_get_sync(ms_dev(host)); do { rc = memstick_next_req(msh, &host->req); dev_dbg(ms_dev(host), "next req %d\n", rc); @@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) host->req->error); } } while (!rc); + pm_runtime_put(ms_dev(host)); } } @@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", __func__, param, value); + pm_runtime_get_sync(ms_dev(host)); mutex_lock(&ucr->dev_mutex); err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); @@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, } out: mutex_unlock(&ucr->dev_mutex); + pm_runtime_put(ms_dev(host)); /* power-on delay */ if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) -- cgit v1.2.3-59-g8ed1b From a04a480d4392ea6efd117be2de564117b2a009c0 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 16 Oct 2016 20:02:52 -0700 Subject: net: Require exact match for TCP socket lookups if dif is l3mdev Currently, socket lookups for l3mdev (vrf) use cases can match a socket that is bound to a port but not a device (ie., a global socket). If the sysctl tcp_l3mdev_accept is not set this leads to ack packets going out based on the main table even though the packet came in from an L3 domain. The end result is that the connection does not establish creating confusion for users since the service is running and a socket shows in ss output. Fix by requiring an exact dif to sk_bound_dev_if match if the skb came through an interface enslaved to an l3mdev device and the tcp_l3mdev_accept is not set. skb's through an l3mdev interface are marked by setting a flag in inet{6}_skb_parm. The IPv6 variant is already set; this patch adds the flag for IPv4. Using an skb flag avoids a device lookup on the dif. The flag is set in the VRF driver using the IP{6}CB macros. For IPv4, the inet_skb_parm struct is moved in the cb per commit 971f10eca186, so the match function in the TCP stack needs to use TCP_SKB_CB. For IPv6, the move is done after the socket lookup, so IP6CB is used. The flags field in inet_skb_parm struct needs to be increased to add another flag. There is currently a 1-byte hole following the flags, so it can be expanded to u16 without increasing the size of the struct. Fixes: 193125dbd8eb ("net: Introduce VRF device driver") Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 2 ++ include/linux/ipv6.h | 17 ++++++++++++++--- include/net/ip.h | 8 +++++++- include/net/tcp.h | 13 ++++++++++++- net/ipv4/inet_hashtables.c | 8 +++++--- net/ipv6/inet6_hashtables.c | 7 ++++--- 6 files changed, 44 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 85c271c70d42..820de6a9ddde 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, if (skb->pkt_type == PACKET_LOOPBACK) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; + IP6CB(skb)->flags |= IP6SKB_L3SLAVE; skb->pkt_type = PACKET_HOST; goto out; } @@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; + IPCB(skb)->flags |= IPSKB_L3SLAVE; /* loopback traffic; do not push through packet taps again. * Reset pkt_type for upper layers to process skb diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7e9a789be5e0..ca1ad9ebbc92 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -123,12 +123,12 @@ struct inet6_skb_parm { }; #if defined(CONFIG_NET_L3_MASTER_DEV) -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return flags & IP6SKB_L3SLAVE; } #else -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return false; } @@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags) static inline int inet6_iif(const struct sk_buff *skb) { - bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; +#endif + return false; +} + struct tcp6_request_sock { struct tcp_request_sock tcp6rsk_tcp; }; diff --git a/include/net/ip.h b/include/net/ip.h index bc43c0fcae12..c9d07988911e 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -38,7 +38,7 @@ struct sock; struct inet_skb_parm { int iif; struct ip_options opt; /* Compiled IP options */ - unsigned char flags; + u16 flags; #define IPSKB_FORWARDED BIT(0) #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) @@ -48,10 +48,16 @@ struct inet_skb_parm { #define IPSKB_DOREDIRECT BIT(5) #define IPSKB_FRAG_PMTU BIT(6) #define IPSKB_FRAG_SEGS BIT(7) +#define IPSKB_L3SLAVE BIT(8) u16 frag_max_size; }; +static inline bool ipv4_l3mdev_skb(u16 flags) +{ + return !!(flags & IPSKB_L3SLAVE); +} + static inline unsigned int ip_hdrlen(const struct sk_buff *skb) { return ip_hdr(skb)->ihl * 4; diff --git a/include/net/tcp.h b/include/net/tcp.h index f83b7f220a65..5b82d4d94834 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -794,12 +794,23 @@ struct tcp_skb_cb { */ static inline int tcp_v6_iif(const struct sk_buff *skb) { - bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags); + bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; } #endif +/* TCP_SKB_CB reference means this can not be used from early demux */ +static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) + return true; +#endif + return false; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 77c20a489218..ca97835bfec4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -25,6 +25,7 @@ #include #include #include +#include #include static u32 inet_ehashfn(const struct net *net, const __be32 laddr, @@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port); static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const __be32 daddr, - const int dif) + const int dif, bool exact_dif) { int score = -1; struct inet_sock *inet = inet_sk(sk); @@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net, return -1; score += 4; } - if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; @@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net, unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; int score, hiscore = 0, matches = 0, reuseport = 0; + bool exact_dif = inet_exact_dif_match(net, skb); struct sock *sk, *result = NULL; u32 phash = 0; sk_for_each_rcu(sk, &ilb->head) { - score = compute_score(sk, net, hnum, daddr, dif); + score = compute_score(sk, net, hnum, daddr, dif, exact_dif); if (score > hiscore) { reuseport = sk->sk_reuseport; if (reuseport) { diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 00cf28ad4565..2fd0374a35b1 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established); static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const struct in6_addr *daddr, - const int dif) + const int dif, bool exact_dif) { int score = -1; @@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net, return -1; score++; } - if (sk->sk_bound_dev_if) { + if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score++; @@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net, unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; int score, hiscore = 0, matches = 0, reuseport = 0; + bool exact_dif = inet6_exact_dif_match(net, skb); struct sock *sk, *result = NULL; u32 phash = 0; sk_for_each(sk, &ilb->head) { - score = compute_score(sk, net, hnum, daddr, dif); + score = compute_score(sk, net, hnum, daddr, dif, exact_dif); if (score > hiscore) { reuseport = sk->sk_reuseport; if (reuseport) { -- cgit v1.2.3-59-g8ed1b From ca006f785fbfd7a5c901900bd3fe2b26e946a1ee Mon Sep 17 00:00:00 2001 From: Stefan Tauner Date: Thu, 6 Oct 2016 18:40:11 +0200 Subject: USB: serial: ftdi_sio: add support for Infineon TriBoard TC2X7 This adds support to ftdi_sio for the Infineon TriBoard TC2X7 engineering board for first-generation Aurix SoCs with Tricore CPUs. Mere addition of the device IDs does the job. Signed-off-by: Stefan Tauner Cc: stable Signed-off-by: Johan Hovold --- drivers/usb/serial/ftdi_sio.c | 3 ++- drivers/usb/serial/ftdi_sio_ids.h | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b2d767e743fc..0ff7f38d7800 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = { /* ekey Devices */ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, /* Infineon Devices */ - { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) }, /* GE Healthcare devices */ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, /* Active Research (Actisense) devices */ diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f87a938cf005..21011c0a4c64 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -626,8 +626,9 @@ /* * Infineon Technologies */ -#define INFINEON_VID 0x058b -#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ +#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */ /* * Acton Research Corp. -- cgit v1.2.3-59-g8ed1b From 9a1a1f404be55b07aea64864f98d7306cc493360 Mon Sep 17 00:00:00 2001 From: Tai Nguyen Date: Thu, 13 Oct 2016 11:09:16 -0700 Subject: perf: xgene: Remove bogus IS_ERR() check In acpi_get_pmu_hw_inf we pass the address of a local variable to IS_ERR(), which doesn't make sense, as the pointer must be a real, valid pointer. This doesn't cause a functional problem, as IS_ERR() will evaluate as false, but the check is bogus and causes static checkers to complain. Remove the bogus check. The bug is reported by Dan Carpenter in [1] [1] https://www.spinics.net/lists/arm-kernel/msg535957.html Signed-off-by: Tai Nguyen Acked-by: Mark Rutland Signed-off-by: Will Deacon --- drivers/perf/xgene_pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index c2ac7646b99f..a8ac4bcef2c0 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, rc = acpi_dev_get_resources(adev, &resource_list, acpi_pmu_dev_add_resource, &res); acpi_dev_free_resource_list(&resource_list); - if (rc < 0 || IS_ERR(&res)) { + if (rc < 0) { dev_err(dev, "PMU type %d: No resource address found\n", type); goto err; } -- cgit v1.2.3-59-g8ed1b From 67b11e2ea704f99953f18585609df17b6e189e53 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 16 Oct 2016 23:54:03 +0100 Subject: cxgb4: fix memory leak of qe on error exit path A memory leak of qe occurs when t4_sched_queue_unbind fails, so fix this by free'ing qe on the error exit path. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sched.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 539de764bbd3..cbd68a8fe2e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); - if (err) + if (err) { + t4_free_mem(qe); goto out; + } /* Bind queue to specified class */ memset(qe, 0, sizeof(*qe)); -- cgit v1.2.3-59-g8ed1b From fc971a2f2383950ddedb1e4896c5ead5d2357029 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:40 -0300 Subject: net: nps_enet: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/ethernet/ezchip/nps_enet.ko | grep alias $ After this patch: $ modinfo drivers/net/ethernet/ezchip/nps_enet.ko | grep alias alias: of:N*T*Cezchip,nps-mgt-enetC* alias: of:N*T*Cezchip,nps-mgt-enet Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/ethernet/ezchip/nps_enet.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f928e6f79c89..223f35cc034c 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = { { .compatible = "ezchip,nps-mgt-enet" }, { /* Sentinel */ } }; +MODULE_DEVICE_TABLE(of, nps_enet_dt_ids); static struct platform_driver nps_enet_driver = { .probe = nps_enet_probe, -- cgit v1.2.3-59-g8ed1b From 2fa3e317e6c89fc89e96a48308f6791f74fcea3c Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:41 -0300 Subject: net: ethernet: nb8800: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ $ modinfo drivers/net/ethernet/aurora/nb8800.ko | grep alias $ After this patch: $ modinfo drivers/net/ethernet/aurora/nb8800.ko | grep alias alias: of:N*T*Csigma,smp8734-ethernetC* alias: of:N*T*Csigma,smp8734-ethernet alias: of:N*T*Csigma,smp8642-ethernetC* alias: of:N*T*Csigma,smp8642-ethernet alias: of:N*T*Caurora,nb8800C* alias: of:N*T*Caurora,nb8800 Signed-off-by: Javier Martinez Canillas Acked-by: Mans Rullgard Signed-off-by: David S. Miller --- drivers/net/ethernet/aurora/nb8800.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index b047fd607b83..00c38bf151e6 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, nb8800_dt_ids); static int nb8800_probe(struct platform_device *pdev) { -- cgit v1.2.3-59-g8ed1b From a7deb924d33b5688b11e34e84c88f2591f933e3a Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:42 -0300 Subject: net: hns: Fix hns_dsaf module autoload for OF registration If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/ethernet/hisilicon/hns/hns_dsaf.ko | grep alias alias: acpi*:HISI00B2:* alias: acpi*:HISI00B1:* After this patch: $ modinfo drivers/net/ethernet/hisilicon/hns/hns_dsaf.ko | grep alias alias: acpi*:HISI00B2:* alias: acpi*:HISI00B1:* alias: of:N*T*Chisilicon,hns-dsaf-v2C* alias: of:N*T*Chisilicon,hns-dsaf-v2 alias: of:N*T*Chisilicon,hns-dsaf-v1C* alias: of:N*T*Chisilicon,hns-dsaf-v1 Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 8e5b3f51b47b..66b99d2be3fe 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2761,6 +2761,7 @@ static const struct of_device_id g_dsaf_match[] = { {.compatible = "hisilicon,hns-dsaf-v2"}, {} }; +MODULE_DEVICE_TABLE(of, g_dsaf_match); static struct platform_driver g_dsaf_driver = { .probe = hns_dsaf_probe, -- cgit v1.2.3-59-g8ed1b From 7097268509a7d6756d14ab422af7446f9bf53221 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:43 -0300 Subject: net: qcom/emac: Fix module autoload for OF registration If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/ethernet/qualcomm/emac/qcom-emac.ko | grep alias alias: platform:qcom-emac After this patch: $ modinfo drivers/net/ethernet/qualcomm/emac/qcom-emac.ko | grep alias alias: platform:qcom-emac alias: of:N*T*Cqcom,fsm9900-emacC* alias: of:N*T*Cqcom,fsm9900-emac Signed-off-by: Javier Martinez Canillas Acked-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 9bf3b2b82e95..4fede4b86538 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, emac_dt_match); #if IS_ENABLED(CONFIG_ACPI) static const struct acpi_device_id emac_acpi_match[] = { -- cgit v1.2.3-59-g8ed1b From af40097e3eafc97f6f856c085360a0e696e1f319 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:44 -0300 Subject: net: hisilicon: Fix hns_mdio module autoload for OF registration If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/ethernet/hisilicon//hns_mdio.ko | grep alias alias: platform:Hi-HNS_MDIO alias: acpi*:HISI0141:* After this patch: $ modinfo drivers/net/ethernet/hisilicon//hns_mdio.ko | grep alias alias: platform:Hi-HNS_MDIO alias: of:N*T*Chisilicon,hns-mdioC* alias: of:N*T*Chisilicon,hns-mdio alias: of:N*T*Chisilicon,mdioC* alias: of:N*T*Chisilicon,mdio alias: acpi*:HISI0141:* Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns_mdio.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 33f4c483af0f..501eb2090ca6 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = { {.compatible = "hisilicon,hns-mdio"}, {} }; +MODULE_DEVICE_TABLE(of, hns_mdio_match); static const struct acpi_device_id hns_mdio_acpi_match[] = { { "HISI0141", 0 }, -- cgit v1.2.3-59-g8ed1b From 03eaae52537f4bbe2e565535f2aa38d80f92e994 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:45 -0300 Subject: net: dsa: b53: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/dsa/b53/b53_mmap.ko | grep alias $ After this patch: $ modinfo drivers/net/dsa/b53/b53_mmap.ko | grep alias alias: of:N*T*Cbrcm,bcm63xx-switchC* alias: of:N*T*Cbrcm,bcm63xx-switch alias: of:N*T*Cbrcm,bcm6368-switchC* alias: of:N*T*Cbrcm,bcm6368-switch alias: of:N*T*Cbrcm,bcm6328-switchC* alias: of:N*T*Cbrcm,bcm6328-switch alias: of:N*T*Cbrcm,bcm3384-switchC* alias: of:N*T*Cbrcm,bcm3384-switch Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_mmap.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 76fb8552c9d9..ef63d24fef81 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = { { .compatible = "brcm,bcm63xx-switch" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, b53_mmap_of_table); static struct platform_driver b53_mmap_driver = { .probe = b53_mmap_probe, -- cgit v1.2.3-59-g8ed1b From 0822b43e4f08af3619a5dbf59f5036872c3ea323 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Mon, 17 Oct 2016 11:05:46 -0300 Subject: net: dsa: bcm_sf2: Fix module autoload for OF registration If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/net/dsa/bcm_sf2.ko | grep alias alias: platform:brcm-sf2 After this patch: $ modinfo drivers/net/dsa/bcm_sf2.ko | grep alias alias: platform:brcm-sf2 alias: of:N*T*Cbrcm,bcm7445-switch-v4.0C* alias: of:N*T*Cbrcm,bcm7445-switch-v4.0 Signed-off-by: Javier Martinez Canillas Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e218887f18b7..0427009bc924 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1158,6 +1158,7 @@ static const struct of_device_id bcm_sf2_of_match[] = { { .compatible = "brcm,bcm7445-switch-v4.0" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, -- cgit v1.2.3-59-g8ed1b From 667f4bab81ea8357d260aa14bb6fb1a4834248d5 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Wed, 5 Oct 2016 10:40:54 +1300 Subject: hwmon: (adm9240) handle temperature readings below 0 Unlike the temperature thresholds the temperature data is a 9-bit signed value. This allows and additional 0.5 degrees of precision on the reading but makes handling negative values slightly harder. In order to have sign-extension applied correctly the 9-bit value is stored in the upper bits of a signed 16-bit value. When presenting this in sysfs the value is shifted and scaled appropriately. Signed-off-by: Chris Packham Signed-off-by: Guenter Roeck --- drivers/hwmon/adm9240.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 98114cef1e43..2fe1828bd10b 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev) * 0.5'C per two measurement cycles thus ignore possible * but unlikely aliasing error on lsb reading. --Grant */ - data->temp = ((i2c_smbus_read_byte_data(client, + data->temp = (i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP) << 8) | i2c_smbus_read_byte_data(client, - ADM9240_REG_TEMP_CONF)) / 128; + ADM9240_REG_TEMP_CONF); for (i = 0; i < 2; i++) { /* read fans */ data->fan[i] = i2c_smbus_read_byte_data(client, @@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy, char *buf) { struct adm9240_data *data = adm9240_update_device(dev); - return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */ + return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */ } static ssize_t show_max(struct device *dev, struct device_attribute *devattr, -- cgit v1.2.3-59-g8ed1b From 94cdc5608b5561aeda80edda9c9223608a1da6fc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Oct 2016 09:24:52 +0300 Subject: hwmon: (max31790) potential ERR_PTR dereference We should only dereference "data" after we check if it is an error pointer. Fixes: 54187ff9d766 ('hwmon: (max31790) Convert to use new hwmon registration API') Signed-off-by: Dan Carpenter Signed-off-by: Guenter Roeck --- drivers/hwmon/max31790.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c index bef84e085973..c1b9275978f9 100644 --- a/drivers/hwmon/max31790.c +++ b/drivers/hwmon/max31790.c @@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel, long *val) { struct max31790_data *data = max31790_update_device(dev); - u8 fan_config = data->fan_config[channel]; + u8 fan_config; if (IS_ERR(data)) return PTR_ERR(data); + fan_config = data->fan_config[channel]; + switch (attr) { case hwmon_pwm_input: *val = data->pwm[channel] >> 8; -- cgit v1.2.3-59-g8ed1b From 4fa507992f0a1063d7326abaf705f9408548349e Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Thu, 13 Oct 2016 12:08:48 +0530 Subject: scsi: libiscsi: Fix locking in __iscsi_conn_send_pdu The code at free_task label in __iscsi_conn_send_pdu can get executed from blk_timeout_work which takes queue_lock using spin_lock_irq. back_lock taken with spin_unlock_bh will cause WARN_ON_ONCE. The code gets executed either with bottom half or IRQ disabled hence using spin_lock/spin_unlock for back_lock is safe. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Reviewed-by: Chris Leech Signed-off-by: Martin K. Petersen --- drivers/scsi/libiscsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c051694bfcb0..f9b6fba689ff 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, free_task: /* regular RX path uses back_lock */ - spin_lock_bh(&session->back_lock); + spin_lock(&session->back_lock); __iscsi_put_task(task); - spin_unlock_bh(&session->back_lock); + spin_unlock(&session->back_lock); return NULL; } -- cgit v1.2.3-59-g8ed1b From 7d2c0d643244311d0ce04fde373cd371ad1f1cad Mon Sep 17 00:00:00 2001 From: Jitendra Bhivare Date: Thu, 13 Oct 2016 12:08:49 +0530 Subject: scsi: be2iscsi: Replace _bh with _irqsave/irqrestore [ 3843.132217] WARNING: CPU: 20 PID: 1227 at kernel/softirq.c:150 __local_bh_enable_ip+0x6b/0x90 [ 3843.142815] Modules linked in: ... [ 3843.294328] CPU: 20 PID: 1227 Comm: kworker/20:1H Tainted: G E 4.8.0-rc1+ #3 [ 3843.304944] Hardware name: Dell Inc. PowerEdge R720/0X6H47, BIOS 1.4.8 10/25/2012 [ 3843.314798] Workqueue: kblockd blk_timeout_work [ 3843.321350] 0000000000000086 00000000a32f4533 ffff8802216d7bd8 ffffffff8135c3cf [ 3843.331146] 0000000000000000 0000000000000000 ffff8802216d7c18 ffffffff8108d661 [ 3843.340918] 00000096216d7c50 0000000000000200 ffff8802d07cc828 ffff8801b3632550 [ 3843.350687] Call Trace: [ 3843.354866] [] dump_stack+0x63/0x84 [ 3843.362061] [] __warn+0xd1/0xf0 [ 3843.368851] [] warn_slowpath_null+0x1d/0x20 [ 3843.376791] [] __local_bh_enable_ip+0x6b/0x90 [ 3843.384903] [] _raw_spin_unlock_bh+0x1e/0x20 [ 3843.392940] [] beiscsi_alloc_pdu+0x2f0/0x6e0 [be2iscsi] [ 3843.402076] [] __iscsi_conn_send_pdu+0xf8/0x370 [libiscsi] [ 3843.411549] [] iscsi_send_nopout+0xbe/0x110 [libiscsi] [ 3843.420639] [] iscsi_eh_cmd_timed_out+0x29b/0x2b0 [libiscsi] [ 3843.430339] [] scsi_times_out+0x5e/0x250 [ 3843.438119] [] blk_rq_timed_out+0x1f/0x60 [ 3843.446009] [] blk_timeout_work+0xad/0x150 [ 3843.454010] [] process_one_work+0x152/0x400 [ 3843.462114] [] worker_thread+0x125/0x4b0 [ 3843.469961] [] ? rescuer_thread+0x380/0x380 [ 3843.478116] [] kthread+0xd8/0xf0 [ 3843.485212] [] ret_from_fork+0x1f/0x40 [ 3843.492908] [] ? kthread_park+0x60/0x60 [ 3843.500715] ---[ end trace 57ec0a1d8f0dd3a0 ]--- [ 3852.328667] NMI watchdog: Watchdog detected hard LOCKUP on cpu 1Kernel panic - not syncing: Hard LOCKUP blk_timeout_work takes queue_lock spin_lock with interrupts disabled before invoking iscsi_eh_cmd_timed_out. This causes a WARN_ON_ONCE in spin_unlock_bh for wrb_lock/io_sgl_lock/mgmt_sgl_lock. CPU was kept busy in lot of bottom half work with interrupts disabled thus causing hard lock up. Signed-off-by: Jitendra Bhivare Reviewed-by: Hannes Reinecke Reviewed-by: Chris Leech Signed-off-by: Martin K. Petersen --- drivers/scsi/be2iscsi/be_main.c | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 6a6906f847db..75bfa14ed5ba 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba, static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; + unsigned long flags; - spin_lock_bh(&phba->io_sgl_lock); + spin_lock_irqsave(&phba->io_sgl_lock, flags); if (phba->io_sgl_hndl_avbl) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In alloc_io_sgl_handle," @@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) phba->io_sgl_alloc_index++; } else psgl_handle = NULL; - spin_unlock_bh(&phba->io_sgl_lock); + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); return psgl_handle; } static void free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { - spin_lock_bh(&phba->io_sgl_lock); + unsigned long flags; + + spin_lock_irqsave(&phba->io_sgl_lock, flags); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In free_,io_sgl_free_index=%d\n", phba->io_sgl_free_index); @@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) "value there=%p\n", phba->io_sgl_free_index, phba->io_sgl_hndl_base [phba->io_sgl_free_index]); - spin_unlock_bh(&phba->io_sgl_lock); + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); return; } phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; @@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) phba->io_sgl_free_index = 0; else phba->io_sgl_free_index++; - spin_unlock_bh(&phba->io_sgl_lock); + spin_unlock_irqrestore(&phba->io_sgl_lock, flags); } static inline struct wrb_handle * @@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, unsigned int wrbs_per_cxn) { struct wrb_handle *pwrb_handle; + unsigned long flags; - spin_lock_bh(&pwrb_context->wrb_lock); + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; pwrb_context->wrb_handles_available--; if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) pwrb_context->alloc_index = 0; else pwrb_context->alloc_index++; - spin_unlock_bh(&pwrb_context->wrb_lock); + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); if (pwrb_handle) memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); @@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle, unsigned int wrbs_per_cxn) { - spin_lock_bh(&pwrb_context->wrb_lock); + unsigned long flags; + + spin_lock_irqsave(&pwrb_context->wrb_lock, flags); pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; pwrb_context->wrb_handles_available++; if (pwrb_context->free_index == (wrbs_per_cxn - 1)) pwrb_context->free_index = 0; else pwrb_context->free_index++; - spin_unlock_bh(&pwrb_context->wrb_lock); + spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); } /** @@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; + unsigned long flags; - spin_lock_bh(&phba->mgmt_sgl_lock); + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); if (phba->eh_sgl_hndl_avbl) { psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; @@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) phba->eh_sgl_alloc_index++; } else psgl_handle = NULL; - spin_unlock_bh(&phba->mgmt_sgl_lock); + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); return psgl_handle; } void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { - spin_lock_bh(&phba->mgmt_sgl_lock); + unsigned long flags; + + spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BM_%d : In free_mgmt_sgl_handle," "eh_sgl_free_index=%d\n", @@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) "BM_%d : Double Free in eh SGL ," "eh_sgl_free_index=%d\n", phba->eh_sgl_free_index); - spin_unlock_bh(&phba->mgmt_sgl_lock); + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); return; } phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; @@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) phba->eh_sgl_free_index = 0; else phba->eh_sgl_free_index++; - spin_unlock_bh(&phba->mgmt_sgl_lock); + spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); } static void -- cgit v1.2.3-59-g8ed1b From 77f18a87186a87cab2a027335758a7244896084c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 11 Oct 2016 11:23:23 +0200 Subject: scsi: NCR5380: no longer mark irq probing as __init The g_NCR5380 has been converted to more regular probing, which means its probe function can now be invoked after the __init section is discarded, as pointed out by this kbuild warning: WARNING: drivers/scsi/built-in.o(.text+0x3a105): Section mismatch in reference from the function generic_NCR5380_isa_match() to the function .init.text:probe_intr() WARNING: drivers/scsi/built-in.o(.text+0x3a145): Section mismatch in reference from the function generic_NCR5380_isa_match() to the variable .init.data:probe_irq To make sure this works correctly in all cases, let's remove the __init and __initdata annotations. Fixes: a8cfbcaec0c1 ("scsi: g_NCR5380: Stop using scsi_module.c") Signed-off-by: Arnd Bergmann Acked-by: Finn Thain Signed-off-by: Martin K. Petersen --- drivers/scsi/NCR5380.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index db2739079cbb..790babc5ef66 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) #endif -static int probe_irq __initdata; +static int probe_irq; /** * probe_intr - helper for IRQ autoprobe @@ -365,7 +365,7 @@ static int probe_irq __initdata; * used by the IRQ probe code. */ -static irqreturn_t __init probe_intr(int irq, void *dev_id) +static irqreturn_t probe_intr(int irq, void *dev_id) { probe_irq = irq; return IRQ_HANDLED; @@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id) * and then looking to see what interrupt actually turned up. */ -static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, +static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, int possible) { struct NCR5380_hostdata *hostdata = shost_priv(instance); -- cgit v1.2.3-59-g8ed1b From b052b07c39d593c9954a84d5bbe1563999483f38 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Mon, 17 Oct 2016 21:20:07 +0200 Subject: dm raid: fix activation of existing raid4/10 devices dm-raid 1.9.0 fails to activate existing RAID4/10 devices that have the old superblock format (which does not have takeover/reshaping support that was added via commit 33e53f06850f). Fix validation path for old superblocks by reverting to the old raid4 layout and basing checks on mddev->new_{level,layout,...} members in super_init_validation(). Cc: stable@vger.kernel.org # 4.8 Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- Documentation/device-mapper/dm-raid.txt | 1 + drivers/md/dm-raid.c | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index e5b6497116f4..c75b64a85859 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -309,3 +309,4 @@ Version History with a reshape in progress. 1.9.0 Add support for RAID level takeover/reshape/region size and set size reduction. +1.9.1 Fix activation of existing RAID 4/10 mapped devices diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2a3970097991..6d53810963f7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -266,7 +266,7 @@ static struct raid_type { {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, - {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ + {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */ {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, @@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) /* * No takeover/reshaping, because we don't have the extended v1.9.0 metadata */ - if (le32_to_cpu(sb->level) != mddev->level) { + if (le32_to_cpu(sb->level) != mddev->new_level) { DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); return -EINVAL; } - if (le32_to_cpu(sb->layout) != mddev->layout) { + if (le32_to_cpu(sb->layout) != mddev->new_layout) { DMERR("Reshaping raid sets not yet supported. (raid layout change)"); DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); DMERR(" Old layout: %s w/ %d copies", @@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) raid10_md_layout_to_copies(mddev->layout)); return -EINVAL; } - if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { + if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) { DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); return -EINVAL; } @@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) return -EINVAL; } + DMINFO("Discovered old metadata format; upgrading to extended metadata format"); + /* Table line is checked vs. authoritative superblock */ rs_set_new(rs); } @@ -3647,7 +3649,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 9, 0}, + .version = {1, 9, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, -- cgit v1.2.3-59-g8ed1b From 5fac7e8405a951f536dfcea09c6f6adb904a08a8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 4 Oct 2016 13:56:19 +0200 Subject: bus: qcom-ebi2: depend on ARCH_QCOM or COMPILE_TEST This hides the option for people who do not want their Kconfig vision cluttered (i.e. x86) and enables compile testing apart from the supported main arch. Cc: Stephen Boyd Cc: Arnd Bergmann Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson --- drivers/bus/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 7010dcac9328..78751057164a 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -111,6 +111,7 @@ config OMAP_OCP2SCP config QCOM_EBI2 bool "Qualcomm External Bus Interface 2 (EBI2)" depends on HAS_IOMEM + depends on ARCH_QCOM || COMPILE_TEST help Say y here to enable support for the Qualcomm External Bus Interface 2, which can be used to connect things like NAND Flash, -- cgit v1.2.3-59-g8ed1b From 8236d9ac4cf2e51e4355c411612cfbaec8d31942 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 Oct 2016 00:11:14 +0900 Subject: clk: uniphier: add system clock support for sLD3 SoC I do not know why, but I missed to add this compatible string in the initial commit of this driver. Signed-off-by: Masahiro Yamada Signed-off-by: Stephen Boyd --- drivers/clk/uniphier/clk-uniphier-core.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index 5ffb898d0839..f4e0f6be5f33 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -110,6 +110,10 @@ static int uniphier_clk_remove(struct platform_device *pdev) static const struct of_device_id uniphier_clk_match[] = { /* System clock */ + { + .compatible = "socionext,uniphier-sld3-clock", + .data = uniphier_sld3_sys_clk_data, + }, { .compatible = "socionext,uniphier-ld4-clock", .data = uniphier_ld4_sys_clk_data, -- cgit v1.2.3-59-g8ed1b From c0ce317f0c8863d309e7cfb6f6c0fe86e38c6d86 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 Oct 2016 00:25:55 +0900 Subject: clk: uniphier: fix type of variable passed to regmap_read() The 3rd argument of regmap_read() takes a pointer to unsigned int. This driver is saved just because u32 happens to be typedef'ed as unsigned int, but we should not rely on that fact. Change the variable type just in case. Signed-off-by: Masahiro Yamada Signed-off-by: Stephen Boyd --- drivers/clk/uniphier/clk-uniphier-mux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c index 15a2f2cbe0d9..2c243a894f3b 100644 --- a/drivers/clk/uniphier/clk-uniphier-mux.c +++ b/drivers/clk/uniphier/clk-uniphier-mux.c @@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw) struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw); int num_parents = clk_hw_get_num_parents(hw); int ret; - u32 val; + unsigned int val; u8 i; ret = regmap_read(mux->regmap, mux->reg, &val); -- cgit v1.2.3-59-g8ed1b From 34b89b2967f284937be6759936ef3dc4d3aff2d0 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Sun, 16 Oct 2016 10:45:07 -0300 Subject: clk: samsung: clk-exynos-audss: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/clk/samsung/clk-exynos-audss.ko | grep alias alias: platform:exynos-audss-clk After this patch: $ modinfo drivers/clk/samsung/clk-exynos-audss.ko | grep alias alias: platform:exynos-audss-clk alias: of:N*T*Csamsung,exynos5420-audss-clockC* alias: of:N*T*Csamsung,exynos5420-audss-clock alias: of:N*T*Csamsung,exynos5410-audss-clockC* alias: of:N*T*Csamsung,exynos5410-audss-clock alias: of:N*T*Csamsung,exynos5250-audss-clockC* alias: of:N*T*Csamsung,exynos5250-audss-clock alias: of:N*T*Csamsung,exynos4210-audss-clockC* alias: of:N*T*Csamsung,exynos4210-audss-clock Fixes: 4d252fd5719b ("clk: samsung: Allow modular build of the Audio Subsystem CLKCON driver") Signed-off-by: Javier Martinez Canillas Reviewed-by: Krzysztof Kozlowski Tested-by: Krzysztof Kozlowski Signed-off-by: Stephen Boyd --- drivers/clk/samsung/clk-exynos-audss.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 51d152f735cc..17e68a724945 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = { }, { }, }; +MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match); static void exynos_audss_clk_teardown(void) { -- cgit v1.2.3-59-g8ed1b From 234d511d8c158d62f73f1a818eb4dd494a13a6e3 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 14 Oct 2016 14:44:13 +0200 Subject: clk: mediatek: Add hardware dependency Only propose the mediatek clock drivers on this platform, unless build-testing. Signed-off-by: Jean Delvare Cc: Shunli Wang Cc: James Liao Cc: Erin Lo Cc: Matthias Brugger Cc: Michael Turquette Reviewed-by: Matthias Brugger Signed-off-by: Stephen Boyd --- drivers/clk/mediatek/Kconfig | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index 380c372d528e..f042bd2a6a99 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK config COMMON_CLK_MT8135 bool "Clock driver for Mediatek MT8135" + depends on ARCH_MEDIATEK || COMPILE_TEST select COMMON_CLK_MEDIATEK default ARCH_MEDIATEK ---help--- @@ -15,6 +16,7 @@ config COMMON_CLK_MT8135 config COMMON_CLK_MT8173 bool "Clock driver for Mediatek MT8173" + depends on ARCH_MEDIATEK || COMPILE_TEST select COMMON_CLK_MEDIATEK default ARCH_MEDIATEK ---help--- -- cgit v1.2.3-59-g8ed1b From 981e1bea55e56abdb16505502e4a69ff868e87d3 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Thu, 29 Sep 2016 16:28:55 +0200 Subject: clk: mvebu: armada-37xx-periph: Fix the clock provider registration While trying using a peripheral clock on a driver, I saw that the clock pointer returned by the provider was NULL. The problem was a missing indirection. It was the pointer stored in the hws array which needed to be updated not the value it contains. Signed-off-by: Gregory CLEMENT Fixes: 8ca4746a78ab ("clk: mvebu: Add the peripheral clock driver for Armada 3700") Signed-off-by: Stephen Boyd --- drivers/clk/mvebu/armada-37xx-periph.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 45905fc0d75b..d5dfbad4ceab 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = { }; static int armada_3700_add_composite_clk(const struct clk_periph_data *data, void __iomem *reg, spinlock_t *lock, - struct device *dev, struct clk_hw *hw) + struct device *dev, struct clk_hw **hw) { const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *rate_ops = NULL; @@ -353,13 +353,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data, } } - hw = clk_hw_register_composite(dev, data->name, data->parent_names, + *hw = clk_hw_register_composite(dev, data->name, data->parent_names, data->num_parents, mux_hw, mux_ops, rate_hw, rate_ops, gate_hw, gate_ops, CLK_IGNORE_UNUSED); - if (IS_ERR(hw)) - return PTR_ERR(hw); + if (IS_ERR(*hw)) + return PTR_ERR(*hw); return 0; } @@ -400,7 +400,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev) spin_lock_init(&driver_data->lock); for (i = 0; i < num_periph; i++) { - struct clk_hw *hw = driver_data->hw_data->hws[i]; + struct clk_hw **hw = &driver_data->hw_data->hws[i]; if (armada_3700_add_composite_clk(&data[i], reg, &driver_data->lock, dev, hw)) -- cgit v1.2.3-59-g8ed1b From 1c7032258d568f9a7aeb4c541786699d9a219a2a Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 6 Oct 2016 11:59:59 -0300 Subject: clk: max77686: fix number of clocks setup for clk_hw based registration The commit 9b4cac33adc7 ("clk: max77686: Migrate to clk_hw based OF and registration APIs") converted the driver to use the new provider API to register clocks using clk_hw. But unfortunately, in the conversion it missed to set the num_clks value which lead to the following error when trying to register a clk provider: [ 1.963782] of_clk_max77686_get: invalid index 0 [ 1.967460] ERROR: could not get clock /rtc@10070000:rtc_src(1) [ 1.973638] s3c-rtc 10070000.rtc: failed to find rtc source clock Fix it by correctly set the max77686_clk_driver_data num_clks member. Fixes: 9b4cac33adc7 ("clk: max77686: Migrate to clk_hw based OF and registration APIs") Reported-by: Markus Reichl Suggested-by: Tobias Jakobi Signed-off-by: Javier Martinez Canillas Tested-by: Markus Reichl Reviewed-by: Chanwoo Choi Reviewed-by: Krzysztof Kozlowski Signed-off-by: Stephen Boyd --- drivers/clk/clk-max77686.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c index b637f5979023..eb953d3b0b69 100644 --- a/drivers/clk/clk-max77686.c +++ b/drivers/clk/clk-max77686.c @@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev) return -EINVAL; } + drv_data->num_clks = num_clks; drv_data->max_clk_data = devm_kcalloc(dev, num_clks, sizeof(*drv_data->max_clk_data), GFP_KERNEL); -- cgit v1.2.3-59-g8ed1b From c4e634ce412d97f0e61223b2a5b3f8f9600cd4dc Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 30 Sep 2016 10:07:27 -0700 Subject: clk: bcm2835: Clamp the PLL's requested rate to the hardware limits. Fixes setting low-resolution video modes on HDMI. Now the PLLH_PIX divider adjusts itself until the PLLH is within bounds. Signed-off-by: Eric Anholt Signed-off-by: Stephen Boyd --- drivers/clk/bcm/clk-bcm2835.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index b68bf573dcfb..8c7763fd9efc 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate, static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { + struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw); + const struct bcm2835_pll_data *data = pll->data; u32 ndiv, fdiv; + rate = clamp(rate, data->min_rate, data->max_rate); + bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv); return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1); @@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, u32 ana[4]; int i; - if (rate < data->min_rate || rate > data->max_rate) { - dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n", - clk_hw_get_name(hw), rate, - data->min_rate, data->max_rate); - return -EINVAL; - } - if (rate > data->max_fb_rate) { use_fb_prediv = true; rate /= 2; -- cgit v1.2.3-59-g8ed1b From 4aa6c99d31c0cc471b7f243f5d314391a1abcaf3 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Fri, 30 Sep 2016 10:33:59 +0200 Subject: clk: mvebu: armada-37xx-periph: Fix the clock gate flag For the gate part of the peripheral clock setting the bit disables the clock and clearing it enables the clock. This is not the default behavior of clk_gate component, so we need to use the CLK_GATE_SET_TO_DISABLE flag. Signed-off-by: Gregory CLEMENT Fixes: 8ca4746a78ab ("clk: mvebu: Add the peripheral clock driver for Armada 3700") Signed-off-by: Stephen Boyd --- drivers/clk/mvebu/armada-37xx-periph.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index d5dfbad4ceab..cecb0fdfaef6 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data, gate->lock = lock; gate_ops = gate_hw->init->ops; gate->reg = reg + (u64)gate->reg; + gate->flags = CLK_GATE_SET_TO_DISABLE; } if (data->rate_hw) { -- cgit v1.2.3-59-g8ed1b From d3397484bb5b8534289a630c1a78500ff4f2fbf4 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Sat, 8 Oct 2016 21:38:12 +0800 Subject: clk: hi6220: use CLK_OF_DECLARE_DRIVER for sysctrl and mediactrl clock init The hi6220-sysctrl and hi6220-mediactrl are not only clock provider but also reset controller. It worked fine that single sysctrl/mediactrl device node in DT can be used to initialize clock driver and populate platform device for reset controller. But it stops working after commit 989eafd0b609 ("clk: core: Avoid double initialization of clocks") gets merged. The commit sets flag OF_POPULATED during clock initialization to skip the platform device populating for the same device node. On hi6220, it effectively makes hi6220-sysctrl reset driver not probe any more. The patch changes hi6220 sysctrl and mediactrl clock init macro from CLK_OF_DECLARE to CLK_OF_DECLARE_DRIVER, so that the reset driver using the same hardware block can continue working. Signed-off-by: Shawn Guo Tested-by: John Stultz Signed-off-by: Stephen Boyd --- drivers/clk/hisilicon/clk-hi6220.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index fe364e63f8de..c0e8e1f196aa 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np) hi6220_clk_register_divider(hi6220_div_clks_sys, ARRAY_SIZE(hi6220_div_clks_sys), clk_data); } -CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); +CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); /* clocks in media controller */ @@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np) hi6220_clk_register_divider(hi6220_div_clks_media, ARRAY_SIZE(hi6220_div_clks_media), clk_data); } -CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); +CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); /* clocks in pmctrl */ -- cgit v1.2.3-59-g8ed1b From 2317eacd9cf9dc1beee74ddb453bdd7552a64a27 Mon Sep 17 00:00:00 2001 From: John Youn Date: Mon, 17 Oct 2016 17:36:23 -0700 Subject: Revert "usb: dwc2: gadget: change variable name to more meaningful" This reverts commit ba48eab8866c ("usb: dwc2: gadget: change variable name to more meaningful"). This is needed to cleanly revert commit aa381a7259c3 ("usb: dwc2: gadget: fix TX FIFO size and address initialization") which may cause regressions on some platforms. Signed-off-by: John Youn Cc: Robert Baldyga Cc: Stefan Wahren Signed-off-by: Felipe Balbi --- drivers/usb/dwc2/gadget.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 4cd6403a7566..aac4af3ea68f 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -186,7 +186,7 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, */ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) { - unsigned int fifo; + unsigned int ep; unsigned int addr; int timeout; u32 dptxfsizn; @@ -217,8 +217,8 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) * them to endpoints dynamically according to maxpacket size value of * given endpoint. */ - for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) { - dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)); + for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { + dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr; addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT; @@ -226,7 +226,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) if (addr > hsotg->fifo_mem) break; - dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo)); + dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); } /* -- cgit v1.2.3-59-g8ed1b From 3fa9538539ac737096c81f3315a14670b1609092 Mon Sep 17 00:00:00 2001 From: John Youn Date: Mon, 17 Oct 2016 17:36:25 -0700 Subject: Revert "usb: dwc2: gadget: fix TX FIFO size and address initialization" This reverts commit aa381a7259c3 ("usb: dwc2: gadget: fix TX FIFO size and address initialization"). The original commit removed the FIFO size programming per endpoint. The DPTXFSIZn register is also used for DIEPTXFn and the SIZE field is r/w in dedicated fifo mode. So it isn't appropriate to simply remove this initialization as it might break existing behavior. Also, some cores might not have enough fifo space to handle the programming method used in the reverted patch, resulting in fifo initialization failure. Signed-off-by: John Youn Cc: Robert Baldyga Cc: Stefan Wahren Signed-off-by: Felipe Balbi --- drivers/usb/dwc2/core.h | 7 +++++++ drivers/usb/dwc2/gadget.c | 47 +++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 46 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index aad4107ef927..2a21a0414b1d 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -259,6 +259,13 @@ enum dwc2_lx_state { DWC2_L3, /* Off state */ }; +/* + * Gadget periodic tx fifo sizes as used by legacy driver + * EP0 is not included + */ +#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \ + 768, 0, 0, 0, 0, 0, 0, 0} + /* Gadget ep0 states */ enum dwc2_ep0_state { DWC2_EP0_SETUP, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index aac4af3ea68f..24fbebc9b409 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -189,7 +189,6 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) unsigned int ep; unsigned int addr; int timeout; - u32 dptxfsizn; u32 val; /* Reset fifo map if not correctly cleared during previous session */ @@ -218,13 +217,13 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) * given endpoint. */ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { - dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); - - val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr; - addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT; - - if (addr > hsotg->fifo_mem) - break; + if (!hsotg->g_tx_fifo_sz[ep]) + continue; + val = addr; + val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT; + WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem, + "insufficient fifo memory"); + addr += hsotg->g_tx_fifo_sz[ep]; dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); } @@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { struct device_node *np = hsotg->dev->of_node; + u32 len = 0; + u32 i = 0; /* Enable dma if requested in device tree */ hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma"); + /* + * Register TX periodic fifo size per endpoint. + * EP0 is excluded since it has no fifo configuration. + */ + if (!of_find_property(np, "g-tx-fifo-size", &len)) + goto rx_fifo; + + len /= sizeof(u32); + + /* Read tx fifo sizes other than ep0 */ + if (of_property_read_u32_array(np, "g-tx-fifo-size", + &hsotg->g_tx_fifo_sz[1], len)) + goto rx_fifo; + + /* Add ep0 */ + len++; + + /* Make remaining TX fifos unavailable */ + if (len < MAX_EPS_CHANNELS) { + for (i = len; i < MAX_EPS_CHANNELS; i++) + hsotg->g_tx_fifo_sz[i] = 0; + } + +rx_fifo: /* Register RX fifo size */ of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz); @@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) struct device *dev = hsotg->dev; int epnum; int ret; + int i; + u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; /* Initialize to legacy fifo configuration values */ hsotg->g_rx_fifo_sz = 2048; hsotg->g_np_g_tx_fifo_sz = 1024; + memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); /* Device tree specific probe */ dwc2_hsotg_of_probe(hsotg); @@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", hsotg->g_np_g_tx_fifo_sz); dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz); + for (i = 0; i < MAX_EPS_CHANNELS; i++) + dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i, + hsotg->g_tx_fifo_sz[i]); hsotg->gadget.max_speed = USB_SPEED_HIGH; hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; -- cgit v1.2.3-59-g8ed1b From 5366f1460c447f8ecb7d13e99e5ccb4bcfc21927 Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Wed, 28 Sep 2016 00:20:13 +0930 Subject: pinctrl: aspeed: "Not enabled" is a significant mux state Consider a scenario with one pin P that has two signals A and B, where A is defined to be higher priority than B: That is, if the mux IP is in a state that would consider both A and B to be active on P, then A will be the active signal. To instead configure B as the active signal we must configure the mux so that A is inactive. The mux state for signals can be described by logical operations on one or more bits from one or more registers (a "signal expression"), which in some cases leads to aliased mux states for a particular signal. Further, signals described by multi-bit bitfields often do not only need to record the states that would make them active (the "enable" expressions), but also the states that makes them inactive (the "disable" expressions). All of this combined leads to four possible states for a signal: 1. A signal is active with respect to an "enable" expression 2. A signal is not active with respect to an "enable" expression 3. A signal is inactive with respect to a "disable" expression 4. A signal is not inactive with respect to a "disable" expression In the case of P, if we are looking to activate B without explicitly having configured A it's enough to consider A inactive if all of A's "enable" signal expressions evaluate to "not active". If any evaluate to "active" then the corresponding "disable" states must be applied so it becomes inactive. For example, on the AST2400 the pins composing GPIO bank H provide signals ROMD8 through ROMD15 (high priority) and those for UART6 (low priority). The mux states for ROMD8 through ROMD15 are aliased, i.e. there are two mux states that result in the respective signals being configured: A. SCU90[6]=1 B. Strap[4,1:0]=100 Further, the second mux state is a 3-bit bitfield that explicitly defines the enabled state but the disabled state is implicit, i.e. if Strap[4,1:0] is not exactly "100" then ROMD8 through ROMD15 are not considered active. This requires the mux function evaluation logic to use approach 2. above, however the existing code was using approach 3. The problem was brought to light on the Palmetto machines where the strap register value is 0x120ce416, and prevented GPIO requests in bank H from succeeding despite the hardware being in a position to allow them. Fixes: 318398c09a8d ("pinctrl: Add core pinctrl support for Aspeed SoCs") Signed-off-by: Andrew Jeffery Reviewed-by: Joel Stanley Signed-off-by: Linus Walleij --- drivers/pinctrl/aspeed/pinctrl-aspeed.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index 0391f9f13f3e..49aeba912531 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr, bool enable, struct regmap *map) { int i; - bool ret; - - ret = aspeed_sig_expr_eval(expr, enable, map); - if (ret) - return ret; for (i = 0; i < expr->ndescs; i++) { + bool ret; const struct aspeed_sig_desc *desc = &expr->descs[i]; u32 pattern = enable ? desc->enable : desc->disable; @@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr, static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr, struct regmap *map) { + if (aspeed_sig_expr_eval(expr, true, map)) + return true; + return aspeed_sig_expr_set(expr, true, map); } static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr, struct regmap *map) { + if (!aspeed_sig_expr_eval(expr, true, map)) + return true; + return aspeed_sig_expr_set(expr, false, map); } -- cgit v1.2.3-59-g8ed1b From 97e8c3f5e76582d63026585c79d39c1d1fb960e5 Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Wed, 28 Sep 2016 00:20:14 +0930 Subject: pinctrl: aspeed-g5: Fix names of GPID2 pins Fixes simple typos in the initial commit. There is no behavioural change. Fixes: 56e57cb6c07f (pinctrl: Add pinctrl-aspeed-g5 driver) Reported-by: Xo Wang Signed-off-by: Andrew Jeffery Reviewed-by: Joel Stanley Signed-off-by: Linus Walleij --- drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index e1ab864e1a7f..14639834a5eb 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21); #define GPID2_DESC SIG_DESC_SET(SCU8C, 9) -#define D20 26 +#define F20 26 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC); SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC); SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC); SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID); -MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN); +MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN); -#define D21 27 +#define D20 27 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC); SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC); SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC); SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID); -MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT); +MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT); -FUNC_GROUP_DECL(GPID2, D20, D21); +FUNC_GROUP_DECL(GPID2, F20, D20); #define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21) #define GPIE0_DESC SIG_DESC_SET(SCU8C, 12) @@ -614,7 +614,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { ASPEED_PINCTRL_PIN(D10), ASPEED_PINCTRL_PIN(D2), ASPEED_PINCTRL_PIN(D20), - ASPEED_PINCTRL_PIN(D21), ASPEED_PINCTRL_PIN(D4), ASPEED_PINCTRL_PIN(D5), ASPEED_PINCTRL_PIN(D6), @@ -630,6 +629,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { ASPEED_PINCTRL_PIN(E7), ASPEED_PINCTRL_PIN(E9), ASPEED_PINCTRL_PIN(F19), + ASPEED_PINCTRL_PIN(F20), ASPEED_PINCTRL_PIN(F9), ASPEED_PINCTRL_PIN(H20), ASPEED_PINCTRL_PIN(L1), -- cgit v1.2.3-59-g8ed1b From d3dbabe9848092d26d14076cdf4d52734f998580 Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Wed, 28 Sep 2016 00:20:15 +0930 Subject: pinctrl: aspeed-g5: Fix GPIOE1 typo This prevented C20 from successfully being muxed as GPIO. Fixes: 56e57cb6c07f (pinctrl: Add pinctrl-aspeed-g5 driver) Signed-off-by: Andrew Jeffery Reviewed-by: Joel Stanley Signed-off-by: Linus Walleij --- drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index 14639834a5eb..235d929e74fd 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -182,7 +182,7 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17)); SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC); SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC); SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE); -MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT); +MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT); FUNC_GROUP_DECL(GPIE0, B20, C20); -- cgit v1.2.3-59-g8ed1b From 8eb37aff76f4d97db39e62a838cd37c4d341d673 Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Wed, 28 Sep 2016 00:20:16 +0930 Subject: pinctrl: aspeed-g5: Fix pin association of SPI1 function The SPI1 function was associated with the wrong pins: The functions that those pins provide is either an SPI debug or passthrough function coupled to SPI1. Make the SPI1 mux function configure the relevant pins and associate new SPI1DEBUG and SPI1PASSTHRU functions with the pins that were already defined. The notation used in the datasheet's multi-function pin table for the SoC is often creative: in this case the SYS* signals are enabled by a single bit, which is nothing unusual on its own, but in this case the bit was also participating in a multi-bit bitfield and therefore represented multiple functions. This fact was overlooked in the original patch. Fixes: 56e57cb6c07f (pinctrl: Add pinctrl-aspeed-g5 driver) Signed-off-by: Andrew Jeffery Reviewed-by: Joel Stanley Acked-by: Rob Herring Signed-off-by: Linus Walleij --- .../devicetree/bindings/pinctrl/pinctrl-aspeed.txt | 4 +- drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | 86 ++++++++++++++++++++-- 2 files changed, 81 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt index 5e60ad18f147..2ad18c4ea55c 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt @@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl: GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 -RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8 +RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6 +TIMER7 TIMER8 VGABIOSROM + Examples: diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index 235d929e74fd..c8c72e8259d3 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -186,24 +186,84 @@ MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT); FUNC_GROUP_DECL(GPIE0, B20, C20); -#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13) +#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 } +#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 } +#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 } + #define C18 64 -SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU); SS_PIN_DECL(C18, GPIOI0, SYSCS); #define E15 65 -SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU); SS_PIN_DECL(E15, GPIOI1, SYSCK); -#define A14 66 -SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC); -SS_PIN_DECL(A14, GPIOI2, SYSMOSI); +#define B16 66 +SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU); +SS_PIN_DECL(B16, GPIOI2, SYSMOSI); #define C16 67 -SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU); SS_PIN_DECL(C16, GPIOI3, SYSMISO); -FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16); +#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5) + +#define B15 68 +SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1), + SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG), + SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU)); +SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC); +MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS); + +#define C15 69 +SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1), + SIG_EXPR_PTR(SPI1CK, SPI1DEBUG), + SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU)); +SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC); +MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK); + +#define A14 70 +SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1), + SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG), + SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU)); +SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC); +MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI); + +#define A15 71 +SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC); +SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC); +SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC); +SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1), + SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG), + SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU)); +SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC); +MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO); + +FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15); +FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15); +FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15); +FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15); + +#define R2 72 +SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8)); +SS_PIN_DECL(R2, GPIOJ0, SGPMCK); #define L2 73 SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9)); @@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { ASPEED_PINCTRL_PIN(A12), ASPEED_PINCTRL_PIN(A13), ASPEED_PINCTRL_PIN(A14), + ASPEED_PINCTRL_PIN(A15), ASPEED_PINCTRL_PIN(A2), ASPEED_PINCTRL_PIN(A3), ASPEED_PINCTRL_PIN(A4), @@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { ASPEED_PINCTRL_PIN(B12), ASPEED_PINCTRL_PIN(B13), ASPEED_PINCTRL_PIN(B14), + ASPEED_PINCTRL_PIN(B15), + ASPEED_PINCTRL_PIN(B16), ASPEED_PINCTRL_PIN(B2), ASPEED_PINCTRL_PIN(B20), ASPEED_PINCTRL_PIN(B3), @@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = { ASPEED_PINCTRL_PIN(C12), ASPEED_PINCTRL_PIN(C13), ASPEED_PINCTRL_PIN(C14), + ASPEED_PINCTRL_PIN(C15), ASPEED_PINCTRL_PIN(C16), ASPEED_PINCTRL_PIN(C18), ASPEED_PINCTRL_PIN(C2), @@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = { ASPEED_PINCTRL_GROUP(RMII2), ASPEED_PINCTRL_GROUP(SD1), ASPEED_PINCTRL_GROUP(SPI1), + ASPEED_PINCTRL_GROUP(SPI1DEBUG), + ASPEED_PINCTRL_GROUP(SPI1PASSTHRU), ASPEED_PINCTRL_GROUP(TIMER4), ASPEED_PINCTRL_GROUP(TIMER5), ASPEED_PINCTRL_GROUP(TIMER6), ASPEED_PINCTRL_GROUP(TIMER7), ASPEED_PINCTRL_GROUP(TIMER8), + ASPEED_PINCTRL_GROUP(VGABIOSROM), }; static const struct aspeed_pin_function aspeed_g5_functions[] = { @@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = { ASPEED_PINCTRL_FUNC(RMII2), ASPEED_PINCTRL_FUNC(SD1), ASPEED_PINCTRL_FUNC(SPI1), + ASPEED_PINCTRL_FUNC(SPI1DEBUG), + ASPEED_PINCTRL_FUNC(SPI1PASSTHRU), ASPEED_PINCTRL_FUNC(TIMER4), ASPEED_PINCTRL_FUNC(TIMER5), ASPEED_PINCTRL_FUNC(TIMER6), ASPEED_PINCTRL_FUNC(TIMER7), ASPEED_PINCTRL_FUNC(TIMER8), + ASPEED_PINCTRL_FUNC(VGABIOSROM), }; static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = { -- cgit v1.2.3-59-g8ed1b From a171bc51fa697021e1b2082d7e95c12a363bc0a9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 3 Oct 2016 17:56:55 +0300 Subject: pinctrl: baytrail: Fix lockdep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize the spinlock before using it. INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 2 PID: 1 Comm: swapper/0 Not tainted 4.8.0-dwc-bisect #4 Hardware name: Intel Corp. VALLEYVIEW C0 PLATFORM/BYT-T FFD8, BIOS BLAKFF81.X64.0088.R10.1403240443 FFD8_X64_R_2014_13_1_00 03/24/2014 0000000000000000 ffff8800788ff770 ffffffff8133d597 0000000000000000 0000000000000000 ffff8800788ff7e0 ffffffff810cfb9e 0000000000000002 ffff8800788ff7d0 ffffffff8205b600 0000000000000002 ffff8800788ff7f0 Call Trace: [] dump_stack+0x67/0x90 [] register_lock_class+0x52e/0x540 [] __lock_acquire+0x81/0x16b0 [] ? save_trace+0x41/0xd0 [] ? __lock_acquire+0x13b2/0x16b0 [] ? __lock_is_held+0x4a/0x70 [] lock_acquire+0xba/0x220 [] ? byt_gpio_get_direction+0x3e/0x80 [] _raw_spin_lock_irqsave+0x47/0x60 [] ? byt_gpio_get_direction+0x3e/0x80 [] byt_gpio_get_direction+0x3e/0x80 [] gpiochip_add_data+0x319/0x7d0 [] ? _raw_spin_unlock_irqrestore+0x43/0x70 [] byt_pinctrl_probe+0x2fb/0x620 [] platform_drv_probe+0x3c/0xa0 ... Based on the diff it looks like the problem was introduced in commit 71e6ca61e826 ("pinctrl: baytrail: Register pin control handling") but I wasn't able to verify that empirically as the parent commit just oopsed when I tried to boot it. Cc: Cristina Ciocan Cc: stable@vger.kernel.org Fixes: 71e6ca61e826 ("pinctrl: baytrail: Register pin control handling") Signed-off-by: Ville Syrjälä Acked-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-baytrail.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index d22a9fe2e6df..71bbeb9321ba 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(vg->pctl_dev); } + raw_spin_lock_init(&vg->lock); + ret = byt_gpio_probe(vg); if (ret) { pinctrl_unregister(vg->pctl_dev); @@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, vg); - raw_spin_lock_init(&vg->lock); pm_runtime_enable(&pdev->dev); return 0; -- cgit v1.2.3-59-g8ed1b From c538b9436751a0be2e1246b48353bc23156bdbcc Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 10 Oct 2016 16:39:31 +0300 Subject: pinctrl: intel: Only restore pins that are used by the driver Dell XPS 13 (and maybe some others) uses a GPIO (CPU_GP_1) during suspend to explicitly disable USB touchscreen interrupt. This is done to prevent situation where the lid is closed the touchscreen is left functional. The pinctrl driver (wrongly) assumes it owns all pins which are owned by host and not locked down. It is perfectly fine for BIOS to use those pins as it is also considered as host in this context. What happens is that when the lid of Dell XPS 13 is closed, the BIOS configures CPU_GP_1 low disabling the touchscreen interrupt. During resume we restore all host owned pins to the known state which includes CPU_GP_1 and this overwrites what the BIOS has programmed there causing the touchscreen to fail as no interrupts are reaching the CPU anymore. Fix this by restoring only those pins we know are explicitly requested by the kernel one way or other. Cc: stable@vger.kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=176361 Reported-by: AceLan Kao Tested-by: AceLan Kao Signed-off-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-intel.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 63387a40b973..01443762e570 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -19,6 +19,7 @@ #include #include +#include "../core.h" #include "pinctrl-intel.h" /* Offset from regs */ @@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev) EXPORT_SYMBOL_GPL(intel_pinctrl_remove); #ifdef CONFIG_PM_SLEEP +static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin) +{ + const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); + + if (!pd || !intel_pad_usable(pctrl, pin)) + return false; + + /* + * Only restore the pin if it is actually in use by the kernel (or + * by userspace). It is possible that some pins are used by the + * BIOS during resume and those are not always locked down so leave + * them alone. + */ + if (pd->mux_owner || pd->gpio_owner || + gpiochip_line_is_irq(&pctrl->chip, pin)) + return true; + + return false; +} + int intel_pinctrl_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev) const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; u32 val; - if (!intel_pad_usable(pctrl, desc->number)) + if (!intel_pinctrl_should_save(pctrl, desc->number)) continue; val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); @@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev) void __iomem *padcfg; u32 val; - if (!intel_pad_usable(pctrl, desc->number)) + if (!intel_pinctrl_should_save(pctrl, desc->number)) continue; padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); -- cgit v1.2.3-59-g8ed1b From 6bc80629eefc3731f5881092bed610d994e05763 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 18 Oct 2016 08:16:03 +0200 Subject: bnx2: fix locking when netconsole is used Functions bnx2_reg_rd_ind(), bnx2_reg_wr_ind() and bnx2_ctx_wr() can be called with IRQs disabled when netconsole is enabled. So they should use spin_{,un}lock_irq{save,restore} instead of _bh variants. Example call flow: bnx2_poll() ->bnx2_poll_link() ->bnx2_phy_int() ->bnx2_set_remote_link() ->bnx2_shmem_rd() ->bnx2_reg_rd_ind() -> spin_lock_bh(&bp->indirect_lock); spin_unlock_bh(&bp->indirect_lock); ... -> __local_bh_enable_ip static inline void __local_bh_enable_ip(unsigned long ip) WARN_ON_ONCE(in_irq() || irqs_disabled()); <<<<<< WARN Cc: Sony Chacko Cc: Dept-HSGLinuxNICDev@qlogic.com Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 27f11a5d5fe2..b3791b394715 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) { + unsigned long flags; u32 val; - spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); return val; } static void bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) { - spin_lock_bh(&bp->indirect_lock); + unsigned long flags; + + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); } static void @@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset) static void bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) { + unsigned long flags; + offset += cid_addr; - spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { int i; @@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); BNX2_WR(bp, BNX2_CTX_DATA, val); } - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); } #ifdef BCM_CNIC -- cgit v1.2.3-59-g8ed1b From 902943c0d759a090490b2bf6c91a49395b353653 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 18 Oct 2016 09:20:33 +0200 Subject: dwc_eth_qos: do not clear pause flags from phy_device->supported phy_device->supported is originally set by the PHY driver. The ethernet driver should filter phy_device->supported to only contain flags supported by the IP. The IP supports setting rx and tx flow control independently, therefore SUPPORTED_Pause and SUPPORTED_Asym_Pause should not be cleared. If the flags are cleared, pause frames cannot be enabled (even if they are supported by the PHY). Signed-off-by: Niklas Cassel Signed-off-by: Jesper Nilsson Acked-by: Lars Persson Signed-off-by: David S. Miller --- drivers/net/ethernet/synopsys/dwc_eth_qos.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 0d0053128542..d775729648ef 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -982,7 +982,8 @@ static int dwceqos_mii_probe(struct net_device *ndev) if (netif_msg_probe(lp)) phy_attached_info(phydev); - phydev->supported &= PHY_GBIT_FEATURES; + phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause; lp->link = 0; lp->speed = 0; -- cgit v1.2.3-59-g8ed1b From 1826277802d83b47d77e6a3e876aec07777aa271 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 18 Oct 2016 09:20:55 +0200 Subject: dwc_eth_qos: enable flow control by default Allow autoneg to enable flow control by default. The behavior when autoneg is off has not changed. Signed-off-by: Niklas Cassel Signed-off-by: Jesper Nilsson Acked-by: Lars Persson Signed-off-by: David S. Miller --- drivers/net/ethernet/synopsys/dwc_eth_qos.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index d775729648ef..5eedac495077 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -988,6 +988,7 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->link = 0; lp->speed = 0; lp->duplex = DUPLEX_UNKNOWN; + lp->flowcontrol.autoneg = AUTONEG_ENABLE; return 0; } -- cgit v1.2.3-59-g8ed1b From a56177e18f2e44499a8bf5bc03dbe896dbec657d Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 18 Oct 2016 14:21:25 +0530 Subject: cxgb4: Fix number of queue sets corssing the limit Do not let number of offload queue sets to go more than MAX_OFLD_QSETS, which would otherwise crash the driver on machines with cores more than MAX_OFLD_QSETS. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f320497368f4..57eb4e1345cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4057,7 +4057,7 @@ static void cfg_queues(struct adapter *adap) * capped by the number of available cores. */ if (n10g) { - i = num_online_cpus(); + i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); s->ofldqsets = roundup(i, adap->params.nports); } else { s->ofldqsets = adap->params.nports; -- cgit v1.2.3-59-g8ed1b From d09960b0032174eb493c4c13be5b9c9ef36dc9a7 Mon Sep 17 00:00:00 2001 From: Tahsin Erdogan Date: Mon, 10 Oct 2016 05:35:19 -0700 Subject: dm: free io_barrier after blk_cleanup_queue call dm_old_request_fn() has paths that access md->io_barrier. The party destroying io_barrier should ensure that no future execution of dm_old_request_fn() is possible. Move io_barrier destruction to below blk_cleanup_queue() to ensure this and avoid a NULL pointer crash during request-based DM device shutdown. Cc: stable@vger.kernel.org # 4.3+ Signed-off-by: Tahsin Erdogan Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index be35258324c1..ec513ee864f2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->bs) bioset_free(md->bs); - cleanup_srcu_struct(&md->io_barrier); - if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; @@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->queue) blk_cleanup_queue(md->queue); + cleanup_srcu_struct(&md->io_barrier); + if (md->bdev) { bdput(md->bdev); md->bdev = NULL; -- cgit v1.2.3-59-g8ed1b From 937fa62e8a00d0b4bc2c0a40567d7c88ab2b2e8d Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 18 Oct 2016 14:02:04 -0400 Subject: dm rq: clear kworker_task if kthread_run() returned an error cleanup_mapped_device() calls kthread_stop() if kworker_task is non-NULL. Currently the assigned value could be a valid task struct or an error code (e.g -ENOMEM). Reset md->kworker_task to NULL if kthread_run() returned an erorr. Fixes: 7193a9defc ("dm rq: check kthread_run return for .request_fn request-based DM") Cc: stable@vger.kernel.org # 4.8 Reported-by: Tahsin Erdogan Signed-off-by: Mike Snitzer --- drivers/md/dm-rq.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 5eacce1ef88b..63e43f261cce 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md) init_kthread_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); - if (IS_ERR(md->kworker_task)) - return PTR_ERR(md->kworker_task); + if (IS_ERR(md->kworker_task)) { + int error = PTR_ERR(md->kworker_task); + md->kworker_task = NULL; + return error; + } elv_register_queue(md->queue); -- cgit v1.2.3-59-g8ed1b From 9fa2f2cca21b14d17b1c1a37babb639a48a5a29f Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 17 Oct 2016 15:56:29 -0500 Subject: ibmvnic: Driver Version 1.0.1 Increment driver version to reflect features that have been added since release. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index bfc84c7d0e11..878e2c059f5c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -27,7 +27,7 @@ /**************************************************************************/ #define IBMVNIC_NAME "ibmvnic" -#define IBMVNIC_DRIVER_VERSION "1.0" +#define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 #define IBMVNIC_STATS_TIMEOUT 1 /* basic structures plus 100 2k buffers */ -- cgit v1.2.3-59-g8ed1b From 12608c260d2fe36746508cb4fa20b6e9a5f9c241 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 17 Oct 2016 15:28:09 -0500 Subject: ibmvnic: Fix GFP_KERNEL allocation in interrupt context Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index bfe17d9c022d..928bf8a5a567 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1190,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter if (!scrq) return NULL; - scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); memset(scrq->msgs, 0, 4 * PAGE_SIZE); if (!scrq->msgs) { dev_warn(dev, "Couldn't allocate crq queue messages page\n"); -- cgit v1.2.3-59-g8ed1b From 87737f8810db445db171ca81ca4cc43bd5b067ce Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 17 Oct 2016 15:28:10 -0500 Subject: ibmvnic: Update MTU after device initialization It is possible for the MTU to be changed during the initialization process with the VNIC Server. Ensure that the net device is updated to reflect the new MTU. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 928bf8a5a567..213162df1a9b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3654,6 +3654,7 @@ static void handle_crq_init_rsp(struct work_struct *work) goto task_failed; netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; if (adapter->failover) { adapter->failover = false; @@ -3792,6 +3793,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; rc = register_netdev(netdev); if (rc) { -- cgit v1.2.3-59-g8ed1b From 4b75ca5a7a9e01dda3ea70bc17b1826fd679af61 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Oct 2016 00:16:08 +0200 Subject: net: bcm63xx: avoid referencing uninitialized variable gcc found a reference to an uninitialized variable in the error handling of bcm_enet_open, introduced by a recent cleanup: drivers/net/ethernet/broadcom/bcm63xx_enet.c: In function 'bcm_enet_open' drivers/net/ethernet/broadcom/bcm63xx_enet.c:1129:2: warning: 'phydev' may be used uninitialized in this function [-Wmaybe-uninitialized] This makes the use of that variable conditional, so we only reference it here after it has been used before. Unlike my normal patches, I have not build-tested this one, as I don't currently have mips test in my randconfig setup. Fixes: 625eb8667d6f ("net: ethernet: broadcom: bcm63xx: use phydev from struct net_device") Cc: Philippe Reynes Reported-by: kbuild test robot Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index ae364c74baf3..537090952c45 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1126,7 +1126,8 @@ out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: - phy_disconnect(phydev); + if (priv->has_phy) + phy_disconnect(phydev); return ret; } -- cgit v1.2.3-59-g8ed1b From 52ccd6318481a467d8ad54ea40f60c61e957d58f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Oct 2016 00:16:09 +0200 Subject: net/hyperv: avoid uninitialized variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hdr_offset variable is only if we deal with a TCP or UDP packet, but as the check surrounding its usage tests for skb_is_gso() instead, the compiler has no idea if the variable is initialized or not at that point: drivers/net/hyperv/netvsc_drv.c: In function ‘netvsc_start_xmit’: drivers/net/hyperv/netvsc_drv.c:494:42: error: ‘hdr_offset’ may be used uninitialized in this function [-Werror=maybe-uninitialized] This adds an additional check for the transport type, which tells the compiler that this path cannot happen. Since the get_net_transport_info() function should always be inlined here, I don't expect this to result in additional runtime checks. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f0919bd3a563..5d6e75a40d38 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * Setup the sendside checksum offload only if this is not a * GSO packet. */ - if (skb_is_gso(skb)) { + if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; -- cgit v1.2.3-59-g8ed1b From ecf244f753e09486707843f2b7b1874f33027038 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Oct 2016 00:16:15 +0200 Subject: rocker: fix maybe-uninitialized warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some rare configurations, we get a warning about the 'index' variable being used without an initialization: drivers/net/ethernet/rocker/rocker_ofdpa.c: In function ‘ofdpa_port_fib_ipv4.isra.16.constprop’: drivers/net/ethernet/rocker/rocker_ofdpa.c:2425:92: warning: ‘index’ may be used uninitialized in this function [-Wmaybe-uninitialized] This is a false positive, the logic is just a bit too complex for gcc to follow here. Moving the intialization of 'index' a little further down makes it clear to gcc that the function always returns an error if it is not initialized. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 431a60804272..4ca461322d60 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); - if (found) - *index = found->index; updating = found && adding; removing = found && !adding; @@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, resolved = false; } else if (removing) { ofdpa_neigh_del(trans, found); + *index = found->index; } else if (updating) { ofdpa_neigh_update(found, trans, NULL, false); resolved = !is_zero_ether_addr(found->eth_dst); + *index = found->index; } else { err = -ENOENT; } -- cgit v1.2.3-59-g8ed1b From b4f0fd4baa90ecce798e0d26d1cce8f4457f2028 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 17 Oct 2016 15:17:51 +0000 Subject: qed: Use list_move_tail instead of list_del/list_add_tail Using list_move_tail() instead of list_del() + list_add_tail(). Signed-off-by: Wei Yongjun Acked-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index ce171a80bdaf..63e1a1b0ef8e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -538,8 +538,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) if (!p_pkt) break; - list_del(&p_pkt->list_entry); - list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); rx_buf_addr = p_pkt->rx_buf_addr; cookie = p_pkt->cookie; @@ -993,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, p_posting_packet = list_first_entry(&p_rx->posting_descq, struct qed_ll2_rx_packet, list_entry); - list_del(&p_posting_packet->list_entry); - list_add_tail(&p_posting_packet->list_entry, - &p_rx->active_descq); + list_move_tail(&p_posting_packet->list_entry, + &p_rx->active_descq); b_notify_fw = true; } @@ -1186,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, if (!p_pkt) break; - list_del(&p_pkt->list_entry); - list_add_tail(&p_pkt->list_entry, &p_tx->active_descq); + list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); } SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); -- cgit v1.2.3-59-g8ed1b From c164154f66f0c9b02673f07aa4f044f1d9c70274 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Oct 2016 01:20:13 +0100 Subject: mm: replace get_user_pages_unlocked() write/force parameters with gup_flags This removes the 'write' and 'force' use from get_user_pages_unlocked() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes Reviewed-by: Jan Kara Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- arch/mips/mm/gup.c | 2 +- arch/s390/mm/gup.c | 3 ++- arch/sh/mm/gup.c | 3 ++- arch/sparc/mm/gup.c | 3 ++- arch/x86/mm/gup.c | 2 +- drivers/media/pci/ivtv/ivtv-udma.c | 4 ++-- drivers/media/pci/ivtv/ivtv-yuv.c | 5 +++-- drivers/scsi/st.c | 5 ++--- drivers/video/fbdev/pvr2fb.c | 4 ++-- include/linux/mm.h | 2 +- mm/gup.c | 14 ++++---------- mm/nommu.c | 11 ++--------- mm/util.c | 3 ++- net/ceph/pagevec.c | 2 +- 14 files changed, 27 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 42d124fb6474..d8c3c159289a 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -287,7 +287,7 @@ slow_irqon: pages += nr; ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, - write, 0, pages); + pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index adb0c34bf431..18d4107e10ee 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); + ret = get_user_pages_unlocked(start, nr_pages - nr, pages, + write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) ret = (ret < 0) ? nr : ret + nr; diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index 40fa6c8adc43..063c298ba56c 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c @@ -258,7 +258,8 @@ slow_irqon: pages += nr; ret = get_user_pages_unlocked(start, - (end - start) >> PAGE_SHIFT, write, 0, pages); + (end - start) >> PAGE_SHIFT, pages, + write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 4e06750a5d29..cd0e32bbcb1d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -238,7 +238,8 @@ slow: pages += nr; ret = get_user_pages_unlocked(start, - (end - start) >> PAGE_SHIFT, write, 0, pages); + (end - start) >> PAGE_SHIFT, pages, + write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index b8b6a60b32cf..0d4fb3ebbbac 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -435,7 +435,7 @@ slow_irqon: ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, - write, 0, pages); + pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c index 4769469fe842..2c9232ef7baa 100644 --- a/drivers/media/pci/ivtv/ivtv-udma.c +++ b/drivers/media/pci/ivtv/ivtv-udma.c @@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, } /* Get user pages for DMA Xfer */ - err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, - 1, dma->map); + err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, + dma->map, FOLL_FORCE); if (user_dma.page_count != err) { IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index b094054cda6e..f7299d3d8244 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, /* Get user pages for DMA Xfer */ y_pages = get_user_pages_unlocked(y_dma.uaddr, - y_dma.page_count, 0, 1, &dma->map[0]); + y_dma.page_count, &dma->map[0], FOLL_FORCE); uv_pages = 0; /* silence gcc. value is set and consumed only if: */ if (y_pages == y_dma.page_count) { uv_pages = get_user_pages_unlocked(uv_dma.uaddr, - uv_dma.page_count, 0, 1, &dma->map[y_pages]); + uv_dma.page_count, &dma->map[y_pages], + FOLL_FORCE); } if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 7af5226aa55b..618422ea3a41 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp, res = get_user_pages_unlocked( uaddr, nr_pages, - rw == READ, - 0, /* don't force */ - pages); + pages, + rw == READ ? FOLL_WRITE : 0); /* don't force */ /* Errors and no page mapped should return here */ if (res < nr_pages) diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 3b1ca4411073..a2564ab91e62 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, if (!pages) return -ENOMEM; - ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE, - 0, pages); + ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages, + FOLL_WRITE); if (ret < nr_pages) { nr_pages = ret; diff --git a/include/linux/mm.h b/include/linux/mm.h index bcdea1f4e98c..abd53f2eb74e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1287,7 +1287,7 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages); + struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); diff --git a/mm/gup.c b/mm/gup.c index e997f545b059..373d1ec006e4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -907,17 +907,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked); * "force" parameter). */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages) + struct page **pages, unsigned int gup_flags) { - unsigned int flags = FOLL_TOUCH; - - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - return __get_user_pages_unlocked(current, current->mm, start, nr_pages, - pages, flags); + pages, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_unlocked); @@ -1535,7 +1528,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); + ret = get_user_pages_unlocked(start, nr_pages - nr, pages, + write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/mm/nommu.c b/mm/nommu.c index 925dcc1fa2f3..7e27add39f7e 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -197,17 +197,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, EXPORT_SYMBOL(__get_user_pages_unlocked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages) + struct page **pages, unsigned int gup_flags) { - unsigned int flags = 0; - - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - return __get_user_pages_unlocked(current, current->mm, start, nr_pages, - pages, flags); + pages, gup_flags); } EXPORT_SYMBOL(get_user_pages_unlocked); diff --git a/mm/util.c b/mm/util.c index 662cddf914af..4c685bde5ebc 100644 --- a/mm/util.c +++ b/mm/util.c @@ -283,7 +283,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); int __weak get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { - return get_user_pages_unlocked(start, nr_pages, write, 0, pages); + return get_user_pages_unlocked(start, nr_pages, pages, + write ? FOLL_WRITE : 0); } EXPORT_SYMBOL_GPL(get_user_pages_fast); diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 00d2601407c5..1a7c9a79a53c 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data, while (got < num_pages) { rc = get_user_pages_unlocked( (unsigned long)data + ((unsigned long)got * PAGE_SIZE), - num_pages - got, write_page, 0, pages + got); + num_pages - got, pages + got, write_page ? FOLL_WRITE : 0); if (rc < 0) break; BUG_ON(rc == 0); -- cgit v1.2.3-59-g8ed1b From 6d4952d9d9d4dc2bb9c0255d95a09405a1e958f7 Mon Sep 17 00:00:00 2001 From: Andrew Lutomirski Date: Mon, 17 Oct 2016 10:06:27 -0700 Subject: hwrng: core - Don't use a stack buffer in add_early_randomness() hw_random carefully avoids using a stack buffer except in add_early_randomness(). This causes a crash in virtio_rng if CONFIG_VMAP_STACK=y. Reported-by: Matt Mullins Tested-by: Matt Mullins Fixes: d3cc7996473a ("hwrng: fetch randomness only after device init") Signed-off-by: Andy Lutomirski Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 482794526e8c..d2d2c89de5b4 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -84,14 +84,14 @@ static size_t rng_buffer_size(void) static void add_early_randomness(struct hwrng *rng) { - unsigned char bytes[16]; int bytes_read; + size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + bytes_read = rng_get_data(rng, rng_buffer, size, 1); mutex_unlock(&reading_mutex); if (bytes_read > 0) - add_device_randomness(bytes, bytes_read); + add_device_randomness(rng_buffer, bytes_read); } static inline void cleanup_rng(struct kref *kref) -- cgit v1.2.3-59-g8ed1b From 70b565bbdb911023373e035225ab10077e4ab937 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Fri, 14 Oct 2016 15:08:36 +0530 Subject: cxl: Prevent adapter reset if an active context exists This patch prevents resetting the cxl adapter via sysfs in presence of one or more active cxl_context on it. This protects against an unrecoverable error caused by PSL owning a dirty cache line even after reset and host tries to touch the same cache line. In case a force reset of the card is required irrespective of any active contexts, the int value -1 can be stored in the 'reset' sysfs attribute of the card. The patch introduces a new atomic_t member named contexts_num inside struct cxl that holds the number of active context attached to the card , which is checked against '0' before proceeding with the reset. To prevent against a race condition where a context is activated just after reset check is performed, the contexts_num is atomically set to '-1' after reset-check to indicate that no more contexts can be activated on the card anymore. Before activating a context we atomically test if contexts_num is non-negative and if so, increment its value by one. In case the value of contexts_num is negative then it indicates that the card is about to be reset and context activation is error-ed out at that point. Fixes: 62fa19d4b4fd ("cxl: Add ability to reset the card") Cc: stable@vger.kernel.org # v4.0+ Acked-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Vaibhav Jain Signed-off-by: Michael Ellerman --- Documentation/ABI/testing/sysfs-class-cxl | 7 ++++-- drivers/misc/cxl/api.c | 9 +++++++ drivers/misc/cxl/context.c | 3 +++ drivers/misc/cxl/cxl.h | 24 ++++++++++++++++++ drivers/misc/cxl/file.c | 11 ++++++++ drivers/misc/cxl/guest.c | 3 +++ drivers/misc/cxl/main.c | 42 ++++++++++++++++++++++++++++++- drivers/misc/cxl/pci.c | 2 ++ drivers/misc/cxl/sysfs.c | 27 +++++++++++++++++--- 9 files changed, 121 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 4ba0a2a61926..640f65e79ef1 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl @@ -220,8 +220,11 @@ What: /sys/class/cxl//reset Date: October 2014 Contact: linuxppc-dev@lists.ozlabs.org Description: write only - Writing 1 will issue a PERST to card which may cause the card - to reload the FPGA depending on load_image_on_perst. + Writing 1 will issue a PERST to card provided there are no + contexts active on any one of the card AFUs. This may cause + the card to reload the FPGA depending on load_image_on_perst. + Writing -1 will do a force PERST irrespective of any active + contexts on the card AFUs. Users: https://github.com/ibm-capi/libcxl What: /sys/class/cxl//perst_reloads_same_image (not in a guest) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index f3d34b941f85..af23d7dfe752 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, if (ctx->status == STARTED) goto out; /* already started */ + /* + * Increment the mapped context count for adapter. This also checks + * if adapter_context_lock is taken. + */ + rc = cxl_adapter_context_get(ctx->afu->adapter); + if (rc) + goto out; + if (task) { ctx->pid = get_task_pid(task, PIDTYPE_PID); ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); @@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { put_pid(ctx->pid); + cxl_adapter_context_put(ctx->afu->adapter); cxl_ctx_put(); goto out; } diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index c466ee2b0c97..5e506c19108a 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx) put_pid(ctx->glpid); cxl_ctx_put(); + + /* Decrease the attached context count on the adapter */ + cxl_adapter_context_put(ctx->afu->adapter); return 0; } diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 01d372aba131..a144073593fa 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -618,6 +618,14 @@ struct cxl { bool perst_select_user; bool perst_same_image; bool psl_timebase_synced; + + /* + * number of contexts mapped on to this card. Possible values are: + * >0: Number of contexts mapped and new one can be mapped. + * 0: No active contexts and new ones can be mapped. + * -1: No contexts mapped and new ones cannot be mapped. + */ + atomic_t contexts_num; }; int cxl_pci_alloc_one_irq(struct cxl *adapter); @@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev); /* decode AFU error bits in the PSL register PSL_SERR_An */ void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); + +/* + * Increments the number of attached contexts on an adapter. + * In case an adapter_context_lock is taken the return -EBUSY. + */ +int cxl_adapter_context_get(struct cxl *adapter); + +/* Decrements the number of attached contexts on an adapter */ +void cxl_adapter_context_put(struct cxl *adapter); + +/* If no active contexts then prevents contexts from being attached */ +int cxl_adapter_context_lock(struct cxl *adapter); + +/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */ +void cxl_adapter_context_unlock(struct cxl *adapter); + #endif diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 5fb9894b157f..d0b421f49b39 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); + /* + * Increment the mapped context count for adapter. This also checks + * if adapter_context_lock is taken. + */ + rc = cxl_adapter_context_get(ctx->afu->adapter); + if (rc) { + afu_release_irqs(ctx, ctx); + goto out; + } + trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, amr))) { afu_release_irqs(ctx, ctx); + cxl_adapter_context_put(ctx->afu->adapter); goto out; } diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 9aa58a77a24d..3e102cd6ed91 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic if ((rc = cxl_sysfs_adapter_add(adapter))) goto err_put1; + /* release the context lock as the adapter is configured */ + cxl_adapter_context_unlock(adapter); + return adapter; err_put1: diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index d9be23b24aa3..62e0dfb5f15b 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void) if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) goto err2; - return adapter; + /* start with context lock taken */ + atomic_set(&adapter->contexts_num, -1); + return adapter; err2: cxl_remove_adapter_nr(adapter); err1: @@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu) return 0; } +int cxl_adapter_context_get(struct cxl *adapter) +{ + int rc; + + rc = atomic_inc_unless_negative(&adapter->contexts_num); + return rc >= 0 ? 0 : -EBUSY; +} + +void cxl_adapter_context_put(struct cxl *adapter) +{ + atomic_dec_if_positive(&adapter->contexts_num); +} + +int cxl_adapter_context_lock(struct cxl *adapter) +{ + int rc; + /* no active contexts -> contexts_num == 0 */ + rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1); + return rc ? -EBUSY : 0; +} + +void cxl_adapter_context_unlock(struct cxl *adapter) +{ + int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0); + + /* + * contexts lock taken -> contexts_num == -1 + * If not true then show a warning and force reset the lock. + * This will happen when context_unlock was requested without + * doing a context_lock. + */ + if (val != -1) { + atomic_set(&adapter->contexts_num, 0); + WARN(1, "Adapter context unlocked with %d active contexts", + val); + } +} + static int __init init_cxl(void) { int rc = 0; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 7afad8477ad5..e96be9ca4e60 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = cxl_native_register_psl_err_irq(adapter))) goto err; + /* Release the context lock as adapter is configured */ + cxl_adapter_context_unlock(adapter); return 0; err: diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index b043c20f158f..a8b6d6a635e9 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device, int val; rc = sscanf(buf, "%i", &val); - if ((rc != 1) || (val != 1)) + if ((rc != 1) || (val != 1 && val != -1)) return -EINVAL; - if ((rc = cxl_ops->adapter_reset(adapter))) - return rc; - return count; + /* + * See if we can lock the context mapping that's only allowed + * when there are no contexts attached to the adapter. Once + * taken this will also prevent any context from getting activated. + */ + if (val == 1) { + rc = cxl_adapter_context_lock(adapter); + if (rc) + goto out; + + rc = cxl_ops->adapter_reset(adapter); + /* In case reset failed release context lock */ + if (rc) + cxl_adapter_context_unlock(adapter); + + } else if (val == -1) { + /* Perform a forced adapter reset */ + rc = cxl_ops->adapter_reset(adapter); + } + +out: + return rc ? rc : count; } static ssize_t load_image_on_perst_show(struct device *device, -- cgit v1.2.3-59-g8ed1b From dd1dafcdf071d94cb53917612c66a169ce741461 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 17 Oct 2016 14:26:57 +0000 Subject: irqchip/eznps: Drop pointless static qualifier in nps400_of_init() There is no need to have the 'struct irq_domain *nps400_root_domain' variable static since new value is always assigned before use. Fixes: 44df427c894a ("irqchip: add nps Internal and external irqchips") Signed-off-by: Wei Yongjun Cc: Marc Zyngier Cc: Vineet Gupta Cc: Jason Cooper Link: http://lkml.kernel.org/r/1476714417-12095-1-git-send-email-weiyj.lk@gmail.com Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-eznps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c index ebc2b0b15f67..2a7a38830a8d 100644 --- a/drivers/irqchip/irq-eznps.c +++ b/drivers/irqchip/irq-eznps.c @@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = { static int __init nps400_of_init(struct device_node *node, struct device_node *parent) { - static struct irq_domain *nps400_root_domain; + struct irq_domain *nps400_root_domain; if (parent) { pr_err("DeviceTree incore ic not a root irq controller\n"); -- cgit v1.2.3-59-g8ed1b From b0dddf6c147e6fe61374d625c4bb2b7c52018639 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Oct 2016 16:53:11 +0100 Subject: efi/arm: Fix absolute relocation detection for older toolchains When building the ARM kernel with CONFIG_EFI=y, the following build error may occur when using a less recent version of binutils (2.23 or older): STUBCPY drivers/firmware/efi/libstub/lib-sort.stub.o 00000000 R_ARM_ABS32 sort 00000004 R_ARM_ABS32 __ksymtab_strings drivers/firmware/efi/libstub/lib-sort.stub.o: absolute symbol references not allowed in the EFI stub (and when building with debug symbols, the list above is much longer, and contains all the internal references between the .debug sections and the actual code) This issue is caused by the fact that objcopy v2.23 or earlier does not support wildcards in its -R and -j options, which means the following line from the Makefile: STUBCOPY_FLAGS-y := -R .debug* -R *ksymtab* -R *kcrctab* fails to take effect, leaving harmless absolute relocations in the binary that are indistinguishable from relocations that may cause crashes at runtime due to the fact that these relocations are resolved at link time using the virtual address of the kernel, which is always different from the address at which the EFI firmware loads and invokes the stub. So, as a workaround, disable debug symbols explicitly when building the stub for ARM, and strip the ksymtab and kcrctab symbols for the only exported symbol we currently reuse in the stub, which is 'sort'. Tested-by: Jon Hunter Signed-off-by: Ard Biesheuvel Reviewed-by: Matt Fleming Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1476805991-7160-2-git-send-email-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- drivers/firmware/efi/libstub/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index c06945160a41..5e23e2d305e7 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ -mno-mmx -mno-sse cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ +cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \ -fno-builtin -fpic -mno-single-pic-base cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt @@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@ # decompressor. So move our .data to .data.efistub, which is preserved # explicitly by the decompressor linker script. # -STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub +STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \ + -R ___ksymtab+sort -R ___kcrctab+sort STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS -- cgit v1.2.3-59-g8ed1b From 1ee1710cd6bbf49853688e97d6e1d98b48f28586 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 19 Oct 2016 13:19:02 +0000 Subject: wusb: fix error return code in wusb_prf() Fix to return error code -ENOMEM from the kmalloc() error handling case instead of 0, as done elsewhere in this function. Fixes: a19b882c07a6 ("wusb: Stop using the stack for sg crypto scratch space") Signed-off-by: Wei Yongjun Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/crypto.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index de089f3a82f3..79451f7ef1b7 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -339,8 +339,10 @@ ssize_t wusb_prf(void *out, size_t out_size, goto error_setkey_aes; } scratch = kmalloc(sizeof(*scratch), GFP_KERNEL); - if (!scratch) + if (!scratch) { + result = -ENOMEM; goto error_alloc_scratch; + } for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); -- cgit v1.2.3-59-g8ed1b From 7f23b3504a0df63b724180262c5f3f117f21bcae Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Oct 2016 01:20:15 +0100 Subject: mm: replace get_vaddr_frames() write/force parameters with gup_flags This removes the 'write' and 'force' from get_vaddr_frames() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes Acked-by: Michal Hocko Reviewed-by: Jan Kara Signed-off-by: Linus Torvalds --- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 3 ++- drivers/media/platform/omap/omap_vout.c | 2 +- drivers/media/v4l2-core/videobuf2-memops.c | 6 +++++- include/linux/mm.h | 2 +- mm/frame_vector.c | 13 ++----------- 5 files changed, 11 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index aa92decf4233..fbd13fabdf2d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, goto err_free; } - ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); + ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, + g2d_userptr->vec); if (ret != npages) { DRM_ERROR("failed to get user pages from userptr.\n"); if (ret < 0) diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index e668dde6d857..a31b95cb3b09 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp, if (!vec) return -ENOMEM; - ret = get_vaddr_frames(virtp, 1, true, false, vec); + ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec); if (ret != 1) { frame_vector_destroy(vec); return -EINVAL; diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c index 3c3b517f1d1c..1cd322e939c7 100644 --- a/drivers/media/v4l2-core/videobuf2-memops.c +++ b/drivers/media/v4l2-core/videobuf2-memops.c @@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start, unsigned long first, last; unsigned long nr; struct frame_vector *vec; + unsigned int flags = FOLL_FORCE; + + if (write) + flags |= FOLL_WRITE; first = start >> PAGE_SHIFT; last = (start + length - 1) >> PAGE_SHIFT; @@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start, vec = frame_vector_create(nr); if (!vec) return ERR_PTR(-ENOMEM); - ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); + ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec); if (ret < 0) goto out_destroy; /* We accept only complete set of PFNs */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 9fe9b0438169..91cc923ce985 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1305,7 +1305,7 @@ struct frame_vector { struct frame_vector *frame_vector_create(unsigned int nr_frames); void frame_vector_destroy(struct frame_vector *vec); int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, - bool write, bool force, struct frame_vector *vec); + unsigned int gup_flags, struct frame_vector *vec); void put_vaddr_frames(struct frame_vector *vec); int frame_vector_to_pages(struct frame_vector *vec); void frame_vector_to_pfns(struct frame_vector *vec); diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 81b67498bb9c..db77dcb38afd 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c @@ -11,10 +11,7 @@ * get_vaddr_frames() - map virtual addresses to pfns * @start: starting user address * @nr_frames: number of pages / pfns from start to map - * @write: whether pages will be written to by the caller - * @force: whether to force write access even if user mapping is - * readonly. See description of the same argument of - get_user_pages(). + * @gup_flags: flags modifying lookup behaviour * @vec: structure which receives pages / pfns of the addresses mapped. * It should have space for at least nr_frames entries. * @@ -34,23 +31,17 @@ * This function takes care of grabbing mmap_sem as necessary. */ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, - bool write, bool force, struct frame_vector *vec) + unsigned int gup_flags, struct frame_vector *vec) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int ret = 0; int err; int locked; - unsigned int gup_flags = 0; if (nr_frames == 0) return 0; - if (write) - gup_flags |= FOLL_WRITE; - if (force) - gup_flags |= FOLL_FORCE; - if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) nr_frames = vec->nr_allocated; -- cgit v1.2.3-59-g8ed1b From 768ae309a96103ed02eb1e111e838c87854d8b51 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Oct 2016 01:20:16 +0100 Subject: mm: replace get_user_pages() write/force parameters with gup_flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This removes the 'write' and 'force' from get_user_pages() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes Acked-by: Christian König Acked-by: Jesper Nilsson Acked-by: Michal Hocko Reviewed-by: Jan Kara Signed-off-by: Linus Torvalds --- arch/cris/arch-v32/drivers/cryptocop.c | 4 +--- arch/ia64/kernel/err_inject.c | 2 +- arch/x86/mm/mpx.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +++++-- drivers/gpu/drm/radeon/radeon_ttm.c | 3 ++- drivers/gpu/drm/via/via_dmablit.c | 4 ++-- drivers/infiniband/core/umem.c | 6 +++++- drivers/infiniband/hw/mthca/mthca_memfree.c | 2 +- drivers/infiniband/hw/qib/qib_user_pages.c | 3 ++- drivers/infiniband/hw/usnic/usnic_uiom.c | 5 ++++- drivers/media/v4l2-core/videobuf-dma-sg.c | 7 +++++-- drivers/misc/mic/scif/scif_rma.c | 3 +-- drivers/misc/sgi-gru/grufault.c | 2 +- drivers/platform/goldfish/goldfish_pipe.c | 3 ++- drivers/rapidio/devices/rio_mport_cdev.c | 3 ++- .../vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | 3 +-- .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 3 +-- drivers/virt/fsl_hypervisor.c | 4 ++-- include/linux/mm.h | 2 +- mm/gup.c | 12 +++--------- mm/mempolicy.c | 2 +- mm/nommu.c | 18 ++++-------------- 22 files changed, 49 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index b5698c876fcc..099e170a93ee 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c @@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig err = get_user_pages((unsigned long int)(oper.indata + prev_ix), noinpages, 0, /* read access only for in data */ - 0, /* no force */ inpages, NULL); @@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig if (oper.do_cipher){ err = get_user_pages((unsigned long int)oper.cipher_outdata, nooutpages, - 1, /* write access for out data */ - 0, /* no force */ + FOLL_WRITE, /* write access for out data */ outpages, NULL); up_read(¤t->mm->mmap_sem); diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 09f845793d12..5ed0ea92c5bf 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; - ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); + ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 80476878eb4c..e4f800999b32 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write) { long gup_ret; int nr_pages = 1; - int force = 0; - gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, - force, NULL, NULL); + gup_ret = get_user_pages((unsigned long)addr, nr_pages, + write ? FOLL_WRITE : 0, NULL, NULL); /* * get_user_pages() returns number of pages gotten. * 0 means we failed to fault in and get anything, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 887483b8b818..dcaf691f56b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -555,10 +555,13 @@ struct amdgpu_ttm_tt { int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); + unsigned int flags = 0; unsigned pinned = 0; int r; + if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) + flags |= FOLL_WRITE; + if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { /* check that we only use anonymous memory to prevent problems with writeback */ @@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) list_add(&guptask.list, >t->guptasks); spin_unlock(>t->guptasklock); - r = get_user_pages(userptr, num_pages, write, 0, p, NULL); + r = get_user_pages(userptr, num_pages, flags, p, NULL); spin_lock(>t->guptasklock); list_del(&guptask.list); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 455268214b89..3de5e6e21662 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; struct page **pages = ttm->pages + pinned; - r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); + r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, + pages, NULL); if (r < 0) goto release_pages; diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 7e2a12c4fed2..1a3ad769f8c8 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) down_read(¤t->mm->mmap_sem); ret = get_user_pages((unsigned long)xfer->mem_addr, vsg->num_pages, - (vsg->direction == DMA_FROM_DEVICE), - 0, vsg->pages, NULL); + (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0, + vsg->pages, NULL); up_read(¤t->mm->mmap_sem); if (ret != vsg->num_pages) { diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index c68746ce6624..224ad274ea0b 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, unsigned long dma_attrs = 0; struct scatterlist *sg, *sg_list_start; int need_release = 0; + unsigned int gup_flags = FOLL_WRITE; if (dmasync) dma_attrs |= DMA_ATTR_WRITE_BARRIER; @@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (ret) goto out; + if (!umem->writable) + gup_flags |= FOLL_FORCE; + need_release = 1; sg_list_start = umem->sg_head.sgl; @@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ret = get_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), - 1, !umem->writable, page_list, vma_list); + gup_flags, page_list, vma_list); if (ret < 0) goto out; diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 6c00d04b8b28..c6fe89d79248 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, goto out; } - ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); + ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL); if (ret < 0) goto out; diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 2d2b94fd3633..75f08624ac05 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, for (got = 0; got < num_pages; got += ret) { ret = get_user_pages(start_page + got * PAGE_SIZE, - num_pages - got, 1, 1, + num_pages - got, + FOLL_WRITE | FOLL_FORCE, p + got, NULL); if (ret < 0) goto bail_release; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index a0b6ebee4d8a..1ccee6ea5bc3 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int i; int flags; dma_addr_t pa; + unsigned int gup_flags; if (!can_do_mlock()) return -EPERM; @@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, flags = IOMMU_READ | IOMMU_CACHE; flags |= (writable) ? IOMMU_WRITE : 0; + gup_flags = FOLL_WRITE; + gup_flags |= (writable) ? 0 : FOLL_FORCE; cur_base = addr & PAGE_MASK; ret = 0; @@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = get_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - 1, !writable, page_list, NULL); + gup_flags, page_list, NULL); if (ret < 0) goto out; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index f300f060b3f3..1db0af6c7f94 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, { unsigned long first, last; int err, rw = 0; + unsigned int flags = FOLL_FORCE; dma->direction = direction; switch (dma->direction) { @@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, if (NULL == dma->pages) return -ENOMEM; + if (rw == READ) + flags |= FOLL_WRITE; + dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", data, size, dma->nr_pages); err = get_user_pages(data & PAGE_MASK, dma->nr_pages, - rw == READ, 1, /* force */ - dma->pages, NULL); + flags, dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index e0203b1a20fd..f806a4471eb9 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -1396,8 +1396,7 @@ retry: pinned_pages->nr_pages = get_user_pages( (u64)addr, nr_pages, - !!(prot & SCIF_PROT_WRITE), - 0, + (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, pinned_pages->pages, NULL); up_write(&mm->mmap_sem); diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index a2d97b9b17e3..6fb773dbcd0c 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, #else *pageshift = PAGE_SHIFT; #endif - if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) return -EFAULT; *paddr = page_to_phys(page); put_page(page); diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 07462d79d040..1aba2c74160e 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, * much memory to the process. */ down_read(¤t->mm->mmap_sem); - ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); + ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE, + &page, NULL); up_read(¤t->mm->mmap_sem); if (ret < 0) break; diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 436dfe871d32..9013a585507e 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, down_read(¤t->mm->mmap_sem); pinned = get_user_pages( (unsigned long)xfer->loc_addr & PAGE_MASK, - nr_pages, dir == DMA_FROM_DEVICE, 0, + nr_pages, + dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, page_list, NULL); up_read(¤t->mm->mmap_sem); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index c29040fdf9a7..1091b9f1dd07 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, actual_pages = get_user_pages(task, task->mm, (unsigned long)buf & ~(PAGE_SIZE - 1), num_pages, - (type == PAGELIST_READ) /*Write */ , - 0 /*Force */ , + (type == PAGELIST_READ) ? FOLL_WRITE : 0, pages, NULL /*vmas */); up_read(&task->mm->mmap_sem); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index e11c0e07471b..7b6cd4d80621 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes) current->mm, /* mm */ (unsigned long)virt_addr, /* start */ num_pages, /* len */ - 0, /* write */ - 0, /* force */ + 0, /* gup_flags */ pages, /* pages (array of page pointers) */ NULL); /* vmas */ up_read(¤t->mm->mmap_sem); diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c index 60bdad3a689b..150ce2abf6c8 100644 --- a/drivers/virt/fsl_hypervisor.c +++ b/drivers/virt/fsl_hypervisor.c @@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) /* Get the physical addresses of the source buffer */ down_read(¤t->mm->mmap_sem); num_pinned = get_user_pages(param.local_vaddr - lb_offset, - num_pages, (param.source == -1) ? READ : WRITE, - 0, pages, NULL); + num_pages, (param.source == -1) ? 0 : FOLL_WRITE, + pages, NULL); up_read(¤t->mm->mmap_sem); if (num_pinned != num_pages) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 91cc923ce985..30bb5d9631bb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1279,7 +1279,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, int write, int force, struct page **pages, struct vm_area_struct **vmas); long get_user_pages(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); diff --git a/mm/gup.c b/mm/gup.c index 3cfb55ef0cf4..bbc2e76cdd77 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -987,18 +987,12 @@ EXPORT_SYMBOL(get_user_pages_remote); * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { - unsigned int flags = FOLL_TOUCH; - - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - return __get_user_pages_locked(current, current->mm, start, nr_pages, - pages, vmas, NULL, false, flags); + pages, vmas, NULL, false, + gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ad1c96ac313c..0b859af06b87 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr) struct page *p; int err; - err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); + err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); if (err >= 0) { err = page_to_nid(p); put_page(p); diff --git a/mm/nommu.c b/mm/nommu.c index 842cfdd1a31e..70cb844dfd95 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -160,18 +160,11 @@ finish_or_fault: * - don't permit access to VMAs that don't support it, such as I/O mappings */ long get_user_pages(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { - int flags = 0; - - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - - return __get_user_pages(current, current->mm, start, nr_pages, flags, - pages, vmas, NULL); + return __get_user_pages(current, current->mm, start, nr_pages, + gup_flags, pages, vmas, NULL); } EXPORT_SYMBOL(get_user_pages); @@ -179,10 +172,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { - int write = gup_flags & FOLL_WRITE; - int force = gup_flags & FOLL_FORCE; - - return get_user_pages(start, nr_pages, write, force, pages, NULL); + return get_user_pages(start, nr_pages, gup_flags, pages, NULL); } EXPORT_SYMBOL(get_user_pages_locked); -- cgit v1.2.3-59-g8ed1b From 9beae1ea89305a9667ceaab6d0bf46a045ad71e7 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Oct 2016 01:20:17 +0100 Subject: mm: replace get_user_pages_remote() write/force parameters with gup_flags This removes the 'write' and 'force' from get_user_pages_remote() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes Acked-by: Michal Hocko Reviewed-by: Jan Kara Signed-off-by: Linus Torvalds --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 7 +++++-- drivers/gpu/drm/i915/i915_gem_userptr.c | 6 +++++- drivers/infiniband/core/umem_odp.c | 7 +++++-- fs/exec.c | 9 +++++++-- include/linux/mm.h | 2 +- kernel/events/uprobes.c | 6 ++++-- mm/gup.c | 22 +++++++--------------- mm/memory.c | 6 +++++- security/tomoyo/domain.c | 2 +- 9 files changed, 40 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5ce3603e6eac..0370b842d9cc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages( int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; struct page **pvec; uintptr_t ptr; + unsigned int flags = 0; pvec = drm_malloc_ab(npages, sizeof(struct page *)); if (!pvec) return ERR_PTR(-ENOMEM); + if (!etnaviv_obj->userptr.ro) + flags |= FOLL_WRITE; + pinned = 0; ptr = etnaviv_obj->userptr.ptr; down_read(&mm->mmap_sem); while (pinned < npages) { ret = get_user_pages_remote(task, mm, ptr, npages - pinned, - !etnaviv_obj->userptr.ro, 0, - pvec + pinned, NULL); + flags, pvec + pinned, NULL); if (ret < 0) break; diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index e537930c64b5..c6f780f5abc9 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; + unsigned int flags = 0; + + if (!obj->userptr.read_only) + flags |= FOLL_WRITE; ret = -EFAULT; if (atomic_inc_not_zero(&mm->mm_users)) { @@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) (work->task, mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, - !obj->userptr.read_only, 0, + flags, pvec + pinned, NULL); if (ret < 0) break; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 75077a018675..1f0fe3217f23 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, u64 off; int j, k, ret = 0, start_idx, npages = 0; u64 base_virt_addr; + unsigned int flags = 0; if (access_mask == 0) return -EINVAL; @@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, goto out_put_task; } + if (access_mask & ODP_WRITE_ALLOWED_BIT) + flags |= FOLL_WRITE; + start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; k = start_idx; @@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, */ npages = get_user_pages_remote(owning_process, owning_mm, user_virt, gup_num_pages, - access_mask & ODP_WRITE_ALLOWED_BIT, - 0, local_page_list, NULL); + flags, local_page_list, NULL); up_read(&owning_mm->mmap_sem); if (npages < 0) diff --git a/fs/exec.c b/fs/exec.c index 6fcfb3f7b137..4e497b9ee71e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, { struct page *page; int ret; + unsigned int gup_flags = FOLL_FORCE; #ifdef CONFIG_STACK_GROWSUP if (write) { @@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, return NULL; } #endif + + if (write) + gup_flags |= FOLL_WRITE; + /* * We are doing an exec(). 'current' is the process * doing the exec and bprm->mm is the new process's mm. */ - ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, - 1, &page, NULL); + ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, + &page, NULL); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index 30bb5d9631bb..ecc4be7b67e0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1276,7 +1276,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, struct vm_area_struct **vmas, int *nonblocking); long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d4129bb05e5d..f9ec9add2164 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, retry: /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); + ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, + &vma); if (ret <= 0) return ret; @@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * but we treat this as a 'remote' access since it is * essentially a kernel access to the memory. */ - result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); + result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, + NULL); if (result < 0) return result; diff --git a/mm/gup.c b/mm/gup.c index bbc2e76cdd77..7aa113c2d373 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -915,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin - * @write: whether pages will be written to by the caller - * @force: whether to force access even when user mapping is currently - * protected (but never forces write access to shared mapping). + * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. @@ -946,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * - * If write=0, the page must not be written to. If the page is written to, - * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called - * after the page is finished with, and before put_page is called. + * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page + * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must + * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual @@ -965,18 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked); */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { - unsigned int flags = FOLL_TOUCH | FOLL_REMOTE; - - if (write) - flags |= FOLL_WRITE; - if (force) - flags |= FOLL_FORCE; - return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, - NULL, false, flags); + NULL, false, + gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); diff --git a/mm/memory.c b/mm/memory.c index fc1987dfd8cc..20a9adb7b36e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3873,6 +3873,10 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, { struct vm_area_struct *vma; void *old_buf = buf; + unsigned int flags = FOLL_FORCE; + + if (write) + flags |= FOLL_WRITE; down_read(&mm->mmap_sem); /* ignore errors, just check how much was successfully transferred */ @@ -3882,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, struct page *page = NULL; ret = get_user_pages_remote(tsk, mm, addr, 1, - write, 1, &page, &vma); + flags, &page, &vma); if (ret <= 0) { #ifndef CONFIG_HAVE_IOREMAP_PROT break; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index ade7c6cad172..682b73af7766 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, * the execve(). */ if (get_user_pages_remote(current, bprm->mm, pos, 1, - 0, 1, &page, NULL) <= 0) + FOLL_FORCE, &page, NULL) <= 0) return false; #else page = bprm->page[pos / PAGE_SIZE]; -- cgit v1.2.3-59-g8ed1b From 17a51f12cfbd2814fd35966a069b242569c53e27 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Oct 2016 09:00:52 +0200 Subject: ahci: only try to use multi-MSI mode if there is more than 1 port We should only try to allocate multiple MSI or MSI-X vectors if the device actually has multiple ports. Otherwise pci_alloc_irq_vectors will return a single vector due to n_ports = 1, in which case we shouldn't set the AHCI_HFLAG_MULTI_MSI flag. Signed-off-by: Christoph Hellwig Fixes: 0b9e2988 ("ahci: use pci_alloc_irq_vectors") Reported-by: Emmanuel Benisty Tested-by: Emmanuel Benisty Signed-off-by: Tejun Heo --- drivers/ata/ahci.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ba5f11cebee2..ed311a040fed 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1418,21 +1418,24 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, * Message mode could be enforced. In this case assume that advantage * of multipe MSIs is negated and use single MSI mode instead. */ - nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, - PCI_IRQ_MSIX | PCI_IRQ_MSI); - if (nvec > 0) { - if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { - hpriv->get_irq_vector = ahci_get_irq_vector; - hpriv->flags |= AHCI_HFLAG_MULTI_MSI; - return nvec; - } + if (n_ports > 1) { + nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nvec > 0) { + if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { + hpriv->get_irq_vector = ahci_get_irq_vector; + hpriv->flags |= AHCI_HFLAG_MULTI_MSI; + return nvec; + } - /* - * Fallback to single MSI mode if the controller enforced MRSM - * mode. - */ - printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); - pci_free_irq_vectors(pdev); + /* + * Fallback to single MSI mode if the controller + * enforced MRSM mode. + */ + printk(KERN_INFO + "ahci: MRSM is on, fallback to single MSI\n"); + pci_free_irq_vectors(pdev); + } } /* -- cgit v1.2.3-59-g8ed1b From 75d29713b792da4782cadfaa87e802183440694e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Oct 2016 09:34:29 +0300 Subject: libnvdimm, namespace: potential NULL deref on allocation error If the kcalloc() fails then "devs" can be NULL and we dereference it checking "devs[i]". Fixes: 1b40e09a1232 ('libnvdimm: blk labels and namespace instantiation') Signed-off-by: Dan Carpenter Signed-off-by: Dan Williams --- drivers/nvdimm/namespace_devs.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 3509cff68ef9..abe5c6bc756c 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region) return devs; err: - for (i = 0; devs[i]; i++) - if (is_nd_blk(&nd_region->dev)) - namespace_blk_release(devs[i]); - else - namespace_pmem_release(devs[i]); - kfree(devs); + if (devs) { + for (i = 0; devs[i]; i++) + if (is_nd_blk(&nd_region->dev)) + namespace_blk_release(devs[i]); + else + namespace_pmem_release(devs[i]); + kfree(devs); + } return NULL; } -- cgit v1.2.3-59-g8ed1b From 3115bb02b5c23d960df5f1bf551ec394a9bb10ec Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Thu, 13 Oct 2016 09:54:21 -0600 Subject: pmem: report error on clear poison failure ACPI Clear Uncorrectable Error DSM function may fail or may be unsupported on a platform. pmem_clear_poison() returns without clearing badblocks in such cases. This failure is detected at the next read (-EIO). This behavior can lead to an issue when user keeps writing but does not read immediately. For instance, flight recorder file may be only read when it is necessary for troubleshooting. Change pmem_do_bvec() and pmem_clear_poison() to return -EIO so that filesystem can log an error message on a write error. Cc: Vishal Verma Signed-off-by: Toshi Kani Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 42b3a8217073..24618431a14b 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } -static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, +static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { struct device *dev = to_dev(pmem); @@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, __func__, (unsigned long long) sector, cleared / 512, cleared / 512 > 1 ? "s" : ""); badblocks_clear(&pmem->bb, sector, cleared / 512); + } else { + return -EIO; } + invalidate_pmem(pmem->virt_addr + offset, len); + return 0; } static void write_pmem(void *pmem_addr, struct page *page, @@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, flush_dcache_page(page); write_pmem(pmem_addr, page, off, len); if (unlikely(bad_pmem)) { - pmem_clear_poison(pmem, pmem_off, len); + rc = pmem_clear_poison(pmem, pmem_off, len); write_pmem(pmem_addr, page, off, len); } } -- cgit v1.2.3-59-g8ed1b From 8ef2074d28373014d05e92b5f13364ef51075b6e Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Wed, 19 Oct 2016 09:51:05 -0600 Subject: nvme: Add tertiary number to NVME_VS NVMe 1.2.1 specification adds a tertiary element to the version number. This updates the macro and its callers to include the final number and fixup a single place in nvmet where the version was generated manually. Signed-off-by: Gabriel Krisman Bertazi Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 8 ++++---- drivers/nvme/host/pci.c | 4 ++-- drivers/nvme/host/scsi.c | 4 ++-- drivers/nvme/target/core.c | 2 +- include/linux/nvme.h | 3 ++- 5 files changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2a57f5ede386..bb168b71048d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) return -ENODEV; } - if (ns->ctrl->vs >= NVME_VS(1, 1)) + if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); - if (ns->ctrl->vs >= NVME_VS(1, 2)) + if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); return 0; @@ -1242,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) } page_shift = NVME_CAP_MPSMIN(cap) + 12; - if (ctrl->vs >= NVME_VS(1, 1)) + if (ctrl->vs >= NVME_VS(1, 1, 0)) ctrl->subsystem = NVME_CAP_NSSRC(cap); ret = nvme_identify_ctrl(ctrl, &id); @@ -1842,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work) return; nn = le32_to_cpu(id->nn); - if (ctrl->vs >= NVME_VS(1, 1) && + if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { if (!nvme_scan_ns_list(ctrl, nn)) goto done; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a7c6e9d74943..26a8d31b291d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1214,7 +1214,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); struct nvme_queue *nvmeq; - dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? + dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? NVME_CAP_NSSRC(cap) : 0; if (dev->subsystem && @@ -1633,7 +1633,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) * NULL as final argument to sysfs_add_file_to_group. */ - if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) { + if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { dev->cmb = nvme_map_cmb(dev); if (dev->cmbsz) { diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index c2a0a1c7d05d..3eaa4d27801e 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, eui = id_ns->eui64; len = sizeof(id_ns->eui64); - if (ns->ctrl->vs >= NVME_VS(1, 2)) { + if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) { if (bitmap_empty(eui, len * 8)) { eui = id_ns->nguid; len = sizeof(id_ns->nguid); @@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, { int res; - if (ns->ctrl->vs >= NVME_VS(1, 1)) { + if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) { res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); if (res != -EOPNOTSUPP) return res; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6559d5afa7bf..b4cacb6f0258 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, if (!subsys) return NULL; - subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ + subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */ switch (type) { case NVME_NQN_NVME: diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7676557ce357..086d196e68f7 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -960,6 +960,7 @@ struct nvme_completion { __le16 status; /* did the command fail, and if so, why? */ }; -#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) +#define NVME_VS(major, minor, tertiary) \ + (((major) << 16) | ((minor) << 8) | (tertiary)) #endif /* _LINUX_NVME_H */ -- cgit v1.2.3-59-g8ed1b From a446c0840e244f34c22cc13b3a62d50aa51fb4c6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 30 Sep 2016 13:51:06 +0200 Subject: nvme.h: resync with nvme-cli Import a few updates to nvme.h from nvme-cli. This mostly includes a few new fields and error codes, but also a few renames that so far are only used in user space. Also one field is moved from an array of two le64 values to one of 16 u8 values so that we can more easily access it. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Gabriel Krisman Bertazi Signed-off-by: Jens Axboe --- drivers/nvme/target/admin-cmd.c | 2 +- drivers/nvme/target/discovery.c | 2 +- include/linux/nvme.h | 33 +++++++++++++++++++++++++++------ 3 files changed, 29 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 7ab9c9381b98..6944f246e562 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) */ /* we support multiple ports and multiples hosts: */ - id->mic = (1 << 0) | (1 << 1); + id->cmic = (1 << 0) | (1 << 1); /* no limit on data transfer sizes for now */ id->mdts = 0; diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 6f65646e89cf..a6c25fbcff3f 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, /* we support only dynamic controllers */ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); - e->nqntype = type; + e->subtype = type; memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 086d196e68f7..989699641e10 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -182,7 +182,7 @@ struct nvme_id_ctrl { char fr[8]; __u8 rab; __u8 ieee[3]; - __u8 mic; + __u8 cmic; __u8 mdts; __le16 cntlid; __le32 ver; @@ -202,7 +202,13 @@ struct nvme_id_ctrl { __u8 apsta; __le16 wctemp; __le16 cctemp; - __u8 rsvd270[50]; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __u8 rsvd316[4]; __le16 kas; __u8 rsvd322[190]; __u8 sqes; @@ -267,7 +273,7 @@ struct nvme_id_ns { __le16 nabo; __le16 nabspf; __u16 rsvd46; - __le64 nvmcap[2]; + __u8 nvmcap[16]; __u8 rsvd64[40]; __u8 nguid[16]; __u8 eui64[8]; @@ -556,8 +562,10 @@ enum nvme_admin_opcode { nvme_admin_set_features = 0x09, nvme_admin_get_features = 0x0a, nvme_admin_async_event = 0x0c, + nvme_admin_ns_mgmt = 0x0d, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, + nvme_admin_ns_attach = 0x15, nvme_admin_keep_alive = 0x18, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, @@ -583,6 +591,7 @@ enum { NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_HOST_MEM_BUF = 0x0d, NVME_FEAT_KATO = 0x0f, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, @@ -745,7 +754,7 @@ struct nvmf_common_command { struct nvmf_disc_rsp_page_entry { __u8 trtype; __u8 adrfam; - __u8 nqntype; + __u8 subtype; __u8 treq; __le16 portid; __le16 cntlid; @@ -905,12 +914,23 @@ enum { NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_FORMAT = 0x10a, - NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, NVME_SC_INVALID_QUEUE = 0x10c, NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, - NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, + NVME_SC_FW_NEEDS_RESET = 0x111, + NVME_SC_FW_NEEDS_MAX_TIME = 0x112, + NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, + NVME_SC_OVERLAPPING_RANGE = 0x114, + NVME_SC_NS_INSUFFICENT_CAP = 0x115, + NVME_SC_NS_ID_UNAVAILABLE = 0x116, + NVME_SC_NS_ALREADY_ATTACHED = 0x118, + NVME_SC_NS_IS_PRIVATE = 0x119, + NVME_SC_NS_NOT_ATTACHED = 0x11a, + NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, + NVME_SC_CTRL_LIST_INVALID = 0x11c, /* * I/O Command Set Specific - NVM commands: @@ -941,6 +961,7 @@ enum { NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_UNWRITTEN_BLOCK = 0x287, NVME_SC_DNR = 0x4000, }; -- cgit v1.2.3-59-g8ed1b From fa60682677c594d81e9b68b8a4046cde75a7374b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 30 Sep 2016 13:51:09 +0200 Subject: nvme: use symbolic constants for CNS values Signed-off-by: Christoph Hellwig Reviewed-by: Gabriel Krisman Bertazi Reviewed-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index bb168b71048d..79e679d12f3b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify; - c.identify.cns = cpu_to_le32(1); + c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL); *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); if (!*id) @@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n struct nvme_command c = { }; c.identify.opcode = nvme_admin_identify; - c.identify.cns = cpu_to_le32(2); + c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST); c.identify.nsid = cpu_to_le32(nsid); return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); } -- cgit v1.2.3-59-g8ed1b From e9c9346e20c1b18a3ec30defd5ff7134bcc0da6d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 30 Sep 2016 13:51:10 +0200 Subject: nvmet: use symbolic constants for CNS values Signed-off-by: Christoph Hellwig Reviewed-by: Gabriel Krisman Bertazi Reviewed-by: Jay Freyensee Signed-off-by: Jens Axboe --- drivers/nvme/target/admin-cmd.c | 6 +++--- drivers/nvme/target/discovery.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 6944f246e562..6fe4c48a21e4 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) case nvme_admin_identify: req->data_len = 4096; switch (le32_to_cpu(cmd->identify.cns)) { - case 0x00: + case NVME_ID_CNS_NS: req->execute = nvmet_execute_identify_ns; return 0; - case 0x01: + case NVME_ID_CNS_CTRL: req->execute = nvmet_execute_identify_ctrl; return 0; - case 0x02: + case NVME_ID_CNS_NS_ACTIVE_LIST: req->execute = nvmet_execute_identify_nslist; return 0; } diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index a6c25fbcff3f..12f39eea569f 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) case nvme_admin_identify: req->data_len = 4096; switch (le32_to_cpu(cmd->identify.cns)) { - case 0x01: + case NVME_ID_CNS_CTRL: req->execute = nvmet_execute_identify_disc_ctrl; return 0; -- cgit v1.2.3-59-g8ed1b From c30a70d3ac60f3216e8d60d7746caad084a7eb46 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Wed, 19 Oct 2016 09:06:41 +0200 Subject: stmmac: fix and review the ptp registration. The commit commit 7086605a6ab5 ("stmmac: fix error check when init ptp") breaks the procedure added by the commit efee95f42b5d ("ptp_clock: future-proofing drivers against PTP subsystem becoming optional") So this patch tries to re-import the logic added by the latest commit above: it makes sense to have the stmmac_ptp_register as void function and, inside the main, the stmmac_init_ptp can fails in case of the capability cannot be supported by the HW. Signed-off-by: Giuseppe Cavallaro Cc: Alexandre TORGUE Cc: Rayagond Kokatanur Cc: Dan Carpenter Cc: Nicolas Pitre Acked-by: Nicolas Pitre Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++++-- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 15 ++++----------- 3 files changed, 9 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 8dc9056c1001..b15fc55f1b96 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -145,7 +145,7 @@ int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); -int stmmac_ptp_register(struct stmmac_priv *priv); +void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6c85b61aaa0b..48e71fad4210 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -676,7 +676,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; - return stmmac_ptp_register(priv); + stmmac_ptp_register(priv); + + return 0; } static void stmmac_release_ptp(struct stmmac_priv *priv) @@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (init_ptp) { ret = stmmac_init_ptp(priv); if (ret) - netdev_warn(priv->dev, "PTP support cannot init.\n"); + netdev_warn(priv->dev, "fail to init PTP.\n"); } #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 5d61fb27219a..1477471f8d44 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { * Description: this function will register the ptp clock driver * to kernel. It also does some house keeping work. */ -int stmmac_ptp_register(struct stmmac_priv *priv) +void stmmac_ptp_register(struct stmmac_priv *priv) { spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; @@ -185,17 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv) priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); if (IS_ERR(priv->ptp_clock)) { - int ret = PTR_ERR(priv->ptp_clock); - + netdev_err(priv->dev, "ptp_clock_register failed\n"); priv->ptp_clock = NULL; - return ret; - } - - spin_lock_init(&priv->ptp_lock); - - netdev_dbg(priv->dev, "Added PTP HW clock successfully\n"); - - return 0; + } else if (priv->ptp_clock) + netdev_info(priv->dev, "registered PTP clock\n"); } /** -- cgit v1.2.3-59-g8ed1b From 7d36b9c102318aa86aceb074359305da88ce9ef9 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 19 Oct 2016 20:49:39 +0900 Subject: clk: uniphier: fix memory overrun bug The first loop of this "for" statement writes memory beyond the allocated clk_hw_onecell_data. It should be: for (clk_num--; clk_num >= 0; clk_num--) ... Or more simply: while (--clk_num >= 0) ... Fixes: 734d82f4a678 ("clk: uniphier: add core support code for UniPhier clock driver") Signed-off-by: Masahiro Yamada Signed-off-by: Stephen Boyd --- drivers/clk/uniphier/clk-uniphier-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index f4e0f6be5f33..84bc465d31aa 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev) hw_data->num = clk_num; /* avoid returning NULL for unused idx */ - for (; clk_num >= 0; clk_num--) + while (--clk_num >= 0) hw_data->hws[clk_num] = ERR_PTR(-EINVAL); for (p = data; p->name; p++) { -- cgit v1.2.3-59-g8ed1b From 5c6201e60a57c6b240d446c8a2d83063283b2743 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 19 Oct 2016 17:22:07 +0900 Subject: clk: uniphier: rename MIO clock to SD clock for Pro5, PXs2, LD20 SoCs I made a mistake as for naming for this block. The MIO block is not implemented for these 3 SoCs in the first place. The current naming will be a trouble if an SoC with both MIO and SD-ctrl blocks appear in the future. This driver has just been merged in the previous merge window. Rename it before the release. Signed-off-by: Masahiro Yamada Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/uniphier-clock.txt | 16 ++++++++-------- drivers/clk/uniphier/clk-uniphier-core.c | 14 +++++++------- drivers/clk/uniphier/clk-uniphier-mio.c | 2 +- drivers/clk/uniphier/clk-uniphier.h | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt index c7179d3b5c33..812163060fa3 100644 --- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt +++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt @@ -24,7 +24,7 @@ Example: reg = <0x61840000 0x4000>; clock { - compatible = "socionext,uniphier-ld20-clock"; + compatible = "socionext,uniphier-ld11-clock"; #clock-cells = <1>; }; @@ -43,8 +43,8 @@ Provided clocks: 21: USB3 ch1 PHY1 -Media I/O (MIO) clock ---------------------- +Media I/O (MIO) clock, SD clock +------------------------------- Required properties: - compatible: should be one of the following: @@ -52,10 +52,10 @@ Required properties: "socionext,uniphier-ld4-mio-clock" - for LD4 SoC. "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC. "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC. - "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC. - "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC. + "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC. + "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC. "socionext,uniphier-ld11-mio-clock" - for LD11 SoC. - "socionext,uniphier-ld20-mio-clock" - for LD20 SoC. + "socionext,uniphier-ld20-sd-clock" - for LD20 SoC. - #clock-cells: should be 1. Example: @@ -66,7 +66,7 @@ Example: reg = <0x59810000 0x800>; clock { - compatible = "socionext,uniphier-ld20-mio-clock"; + compatible = "socionext,uniphier-ld11-mio-clock"; #clock-cells = <1>; }; @@ -112,7 +112,7 @@ Example: reg = <0x59820000 0x200>; clock { - compatible = "socionext,uniphier-ld20-peri-clock"; + compatible = "socionext,uniphier-ld11-peri-clock"; #clock-cells = <1>; }; diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c index 84bc465d31aa..26c53f7963a4 100644 --- a/drivers/clk/uniphier/clk-uniphier-core.c +++ b/drivers/clk/uniphier/clk-uniphier-core.c @@ -142,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = { .compatible = "socionext,uniphier-ld20-clock", .data = uniphier_ld20_sys_clk_data, }, - /* Media I/O clock */ + /* Media I/O clock, SD clock */ { .compatible = "socionext,uniphier-sld3-mio-clock", .data = uniphier_sld3_mio_clk_data, @@ -160,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = { .data = uniphier_sld3_mio_clk_data, }, { - .compatible = "socionext,uniphier-pro5-mio-clock", - .data = uniphier_pro5_mio_clk_data, + .compatible = "socionext,uniphier-pro5-sd-clock", + .data = uniphier_pro5_sd_clk_data, }, { - .compatible = "socionext,uniphier-pxs2-mio-clock", - .data = uniphier_pro5_mio_clk_data, + .compatible = "socionext,uniphier-pxs2-sd-clock", + .data = uniphier_pro5_sd_clk_data, }, { .compatible = "socionext,uniphier-ld11-mio-clock", .data = uniphier_sld3_mio_clk_data, }, { - .compatible = "socionext,uniphier-ld20-mio-clock", - .data = uniphier_pro5_mio_clk_data, + .compatible = "socionext,uniphier-ld20-sd-clock", + .data = uniphier_pro5_sd_clk_data, }, /* Peripheral clock */ { diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c index 6aa7ec768d0b..218d20f099ce 100644 --- a/drivers/clk/uniphier/clk-uniphier-mio.c +++ b/drivers/clk/uniphier/clk-uniphier-mio.c @@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = { { /* sentinel */ } }; -const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = { +const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = { UNIPHIER_MIO_CLK_SD_FIXED, UNIPHIER_MIO_CLK_SD(0, 0), UNIPHIER_MIO_CLK_SD(1, 1), diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h index 3ae184062388..0244dba1f4cf 100644 --- a/drivers/clk/uniphier/clk-uniphier.h +++ b/drivers/clk/uniphier/clk-uniphier.h @@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[]; extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[]; -extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[]; +extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[]; extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[]; extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[]; -- cgit v1.2.3-59-g8ed1b From a15cf34fed779d3d49d76e3a7e63bd0132b6a458 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 15 Oct 2016 17:00:11 +0100 Subject: ubi: fix swapped arguments to call to ubi_alloc_aeb Static analysis by CoverityScan detected the ec and pnum arguments are in the wrong order on a call to ubi_alloc_aeb. Swap the order to fix this. Fixes: 91f4285fe389a27 ("UBI: provide helpers to allocate and free aeb elements") Signed-off-by: Colin Ian King Reviewed-by: Boris Brezillon Signed-off-by: Richard Weinberger --- drivers/mtd/ubi/fastmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index d6384d965788..2ff62157d3bb 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, /* new_aeb is newer */ if (cmp_res & 1) { - victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum); + victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec); if (!victim) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 884a3b647809cb31cf6bd948f814e93753b38502 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 13 Oct 2016 16:05:36 +0200 Subject: UBI: Fix crash in try_recover_peb() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/mtd/ubi/eba.c: In function ‘try_recover_peb’: drivers/mtd/ubi/eba.c:744: warning: ‘vid_hdr’ is used uninitialized in this function The pointer vid_hdr is indeed not initialized, leading to a crash when it is dereferenced. Fix this by obtaining the pointer from the VID buffer, like is done everywhere else. Fixes: 3291b52f9ff0acc8 ("UBI: introduce the VID buffer concept") Signed-off-by: Geert Uytterhoeven Reviewed-by: Boris Brezillon Signed-off-by: Richard Weinberger --- drivers/mtd/ubi/eba.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 95c4048a371e..388e46be6ad9 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum, goto out_put; } + vid_hdr = ubi_get_vid_hdr(vidb); ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); mutex_lock(&ubi->buf_mutex); -- cgit v1.2.3-59-g8ed1b From 8dedefbc38172f3fcb43a26b6d0e394dcb1ee562 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 4 Oct 2016 16:40:14 -0700 Subject: drm/fsl-dcu: enable TCON bypass mode by default Do not use encoder disable/enable callbacks to control bypass mode as this seems to mess with the signals not liked by displays. This also makes more sense since the encoder is already defined to be parallel RGB/LVDS at creation time. Signed-off-by: Stefan Agner Tested-By: Meng Yi --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 2 ++ drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 39 ++++--------------------------- 2 files changed, 7 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0884c45aefe8..3897f5671776 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -273,6 +273,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) goto disable_dcu_clk; } + if (fsl_dev->tcon) + fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 26edcc899712..e1dd75b18118 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -20,38 +20,6 @@ #include "fsl_dcu_drm_drv.h" #include "fsl_tcon.h" -static int -fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - return 0; -} - -static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; - - if (fsl_dev->tcon) - fsl_tcon_bypass_disable(fsl_dev->tcon); -} - -static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; - - if (fsl_dev->tcon) - fsl_tcon_bypass_enable(fsl_dev->tcon); -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .atomic_check = fsl_dcu_drm_encoder_atomic_check, - .disable = fsl_dcu_drm_encoder_disable, - .enable = fsl_dcu_drm_encoder_enable, -}; - static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); @@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, int ret; encoder->possible_crtcs = 1; + + /* Use bypass mode for parallel RGB/LVDS encoder */ + if (fsl_dev->tcon) + fsl_tcon_bypass_enable(fsl_dev->tcon); + ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, DRM_MODE_ENCODER_LVDS, NULL); if (ret < 0) return ret; - drm_encoder_helper_add(encoder, &encoder_helper_funcs); - return 0; } -- cgit v1.2.3-59-g8ed1b From b6ead864ea893ef55828be0ec0d6c10f7c1c4e30 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 4 Oct 2016 16:56:38 -0700 Subject: drm/fsl-dcu: do not transfer registers on plane init There is no need to explicitly initiate a register transfer and turn off the DCU after initializing the plane registers. In fact, this is harmful and leads to unnecessary flickers if the DCU has been left on by the bootloader. Signed-off-by: Stefan Agner Tested-By: Meng Yi --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index a7e5486bd1e9..9e6f7d8112b3 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev) for (j = 1; j <= fsl_dev->soc->layer_regs; j++) regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); } - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_OFF)); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); } struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) -- cgit v1.2.3-59-g8ed1b From 9789037695018e25902469ea0e540c07580b940f Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Wed, 5 Oct 2016 14:37:57 -0700 Subject: drm/fsl-dcu: do not transfer registers in mode_set_nofb Do not schedule a transfer of mode settings early. Modes should get applied on on CRTC enable where we also enable the pixel clock. Signed-off-by: Stefan Agner Tested-By: Meng Yi --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 3371635cd4d7..5ad1d68c8194 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -116,8 +116,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); return; } -- cgit v1.2.3-59-g8ed1b From 0a70c998d0c571c66d0ba8ffd9f803392a53193d Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 4 Oct 2016 17:40:29 -0700 Subject: drm/fsl-dcu: enable pixel clock when enabling CRTC The pixel clock should not be on if the CRTC is not in use, hence move clock enable/disable calls into CRTC callbacks. Signed-off-by: Stefan Agner Tested-By: Meng Yi --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 2 ++ drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 21 +-------------------- 2 files changed, 3 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 5ad1d68c8194..b2d5e188b1b8 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) DCU_MODE_DCU_MODE(DCU_MODE_OFF)); regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); + clk_disable_unprepare(fsl_dev->pix_clk); } static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) @@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + clk_prepare_enable(fsl_dev->pix_clk); regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 3897f5671776..e04efbed1a54 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -267,12 +267,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) return ret; } - ret = clk_prepare_enable(fsl_dev->pix_clk); - if (ret < 0) { - dev_err(dev, "failed to enable pix clk\n"); - goto disable_dcu_clk; - } - if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); @@ -286,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) enable_irq(fsl_dev->irq); return 0; - -disable_dcu_clk: - clk_disable_unprepare(fsl_dev->clk); - return ret; } #endif @@ -403,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) goto disable_clk; } - ret = clk_prepare_enable(fsl_dev->pix_clk); - if (ret < 0) { - dev_err(dev, "failed to enable pix clk\n"); - goto unregister_pix_clk; - } - fsl_dev->tcon = fsl_tcon_init(dev); drm = drm_dev_alloc(driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); - goto disable_pix_clk; + goto unregister_pix_clk; } fsl_dev->dev = dev; @@ -435,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) unref: drm_dev_unref(drm); -disable_pix_clk: - clk_disable_unprepare(fsl_dev->pix_clk); unregister_pix_clk: clk_unregister(fsl_dev->pix_clk); disable_clk: @@ -449,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev) struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); clk_disable_unprepare(fsl_dev->clk); - clk_disable_unprepare(fsl_dev->pix_clk); clk_unregister(fsl_dev->pix_clk); drm_put_dev(fsl_dev->drm); -- cgit v1.2.3-59-g8ed1b From 02eb924fabc5b699c0d9d354491e6f0767e3c139 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Thu, 6 Oct 2016 10:07:07 -0500 Subject: target/user: Use sense_reason_t in tcmu_queue_cmd_ring Instead of using -ERROR-style returns, use sense_reason_t. This lets us remove tcmu_pass_op(), and return more correct sense values. Signed-off-by: Andy Grover Signed-off-by: Bryant G. Ly Reviewed-by: Christoph Hellwig Reviewed-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 62bf4fe5704a..0cd1c61ba2ed 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d return true; } -static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) +static sense_reason_t +tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; struct se_cmd *se_cmd = tcmu_cmd->se_cmd; @@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) - return -EINVAL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* * Must be a certain minimum size for response sense info, but @@ -450,7 +451,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) finish_wait(&udev->wait_cmdr, &__wait); if (!ret) { pr_warn("tcmu: command timed out\n"); - return -ETIMEDOUT; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } spin_lock_irq(&udev->cmdr_lock); @@ -526,10 +527,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) mod_timer(&udev->timeout, round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); - return 0; + return TCM_NO_SENSE; } -static int tcmu_queue_cmd(struct se_cmd *se_cmd) +static sense_reason_t +tcmu_queue_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); @@ -538,10 +540,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd) tcmu_cmd = tcmu_alloc_cmd(se_cmd); if (!tcmu_cmd) - return -ENOMEM; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = tcmu_queue_cmd_ring(tcmu_cmd); - if (ret < 0) { + if (ret != TCM_NO_SENSE) { pr_err("TCMU: Could not queue command\n"); spin_lock_irq(&udev->commands_lock); idr_remove(&udev->commands, tcmu_cmd->cmd_id); @@ -1128,21 +1130,10 @@ static sector_t tcmu_get_blocks(struct se_device *dev) dev->dev_attrib.block_size); } -static sense_reason_t -tcmu_pass_op(struct se_cmd *se_cmd) -{ - int ret = tcmu_queue_cmd(se_cmd); - - if (ret != 0) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - else - return TCM_NO_SENSE; -} - static sense_reason_t tcmu_parse_cdb(struct se_cmd *cmd) { - return passthrough_parse_cdb(cmd, tcmu_pass_op); + return passthrough_parse_cdb(cmd, tcmu_queue_cmd); } static const struct target_backend_ops tcmu_ops = { -- cgit v1.2.3-59-g8ed1b From 554617b2bbe25c3fb3c80c28fe7a465884bb40b1 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Thu, 25 Aug 2016 08:55:53 -0700 Subject: target/user: Return an error if cmd data size is too large Userspace should be implementing VPD B0 (Block Limits) to inform the initiator of max data size, but just in case we do get a too-large request, do what the spec says and return INVALID_CDB_FIELD. Make sure to unlock udev->cmdr_lock before returning. Signed-off-by: Andy Grover Reviewed-by: Christoph Hellwig Reviewed-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 0cd1c61ba2ed..5de1eac17fed 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -433,11 +433,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); data_length += se_cmd->t_bidi_data_sg->length; } - if ((command_size > (udev->cmdr_size / 2)) - || data_length > udev->data_size) - pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " + if ((command_size > (udev->cmdr_size / 2)) || + data_length > udev->data_size) { + pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " "cmd/data ring buffers\n", command_size, data_length, udev->cmdr_size, udev->data_size); + spin_unlock_irq(&udev->cmdr_lock); + return TCM_INVALID_CDB_FIELD; + } while (!is_ring_space_avail(udev, command_size, data_length)) { int ret; -- cgit v1.2.3-59-g8ed1b From 3d9b95558f5874ac5d63a057813dc66b480de7e1 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Thu, 25 Aug 2016 08:55:54 -0700 Subject: target/user: Fix comments to not refer to data ring We no longer use a ringbuffer for the data area, so this might cause confusion. Just call it the data area. Signed-off-by: Andy Grover Reviewed-by: Christoph Hellwig Reviewed-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 5de1eac17fed..47562509b489 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -96,7 +96,7 @@ struct tcmu_dev { size_t dev_size; u32 cmdr_size; u32 cmdr_last_cleaned; - /* Offset of data ring from start of mb */ + /* Offset of data area from start of mb */ /* Must add data_off and mb_addr to get the address */ size_t data_off; size_t data_size; @@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap) /* * We can't queue a command until we have space available on the cmd ring *and* - * space available on the data ring. + * space available on the data area. * * Called with ring lock held. */ @@ -436,7 +436,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) if ((command_size > (udev->cmdr_size / 2)) || data_length > udev->data_size) { pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " - "cmd/data ring buffers\n", command_size, data_length, + "cmd ring/data area\n", command_size, data_length, udev->cmdr_size, udev->data_size); spin_unlock_irq(&udev->cmdr_lock); return TCM_INVALID_CDB_FIELD; @@ -491,9 +491,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS); - /* - * Fix up iovecs, and handle if allocation in data ring wrapped. - */ + /* Handle allocating space from the data area */ iov = &entry->req.iov[0]; iov_cnt = 0; copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE @@ -566,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { /* * cmd has been completed already from timeout, just reclaim - * data ring space and free cmd + * data area space and free cmd */ free_data_area(udev, cmd); -- cgit v1.2.3-59-g8ed1b From 3fc6a642e4355abef986b2dd11672216fb18212e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 2 Sep 2016 15:30:34 +0100 Subject: iscsi-target: fix spelling mistake "Unsolicitied" -> "Unsolicited" Trivial fix to spelling mistakes in pr_debug message and comments Signed-off-by: Colin Ian King Reviewed-by: Bart Van Assche Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 2 +- drivers/target/iscsi/iscsi_target_login.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 39b928c2849d..1aaf1f3148b2 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2982,7 +2982,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," " StatSN: 0x%08x, Length %u\n", (nopout_response) ? - "Solicitied" : "Unsolicitied", cmd->init_task_tag, + "Solicited" : "Unsolicited", cmd->init_task_tag, cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); } EXPORT_SYMBOL(iscsit_build_nopin_rsp); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index adf419fa4291..15f79a2ca34a 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2( /* * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for - * Immediate Data + Unsolicitied Data-OUT if necessary.. + * Immediate Data + Unsolicited Data-OUT if necessary.. */ param = iscsi_find_param_from_key("MaxRecvDataSegmentLength", conn->param_list); @@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; /* - * FIXME: Unsolicitied NopIN support for ISER + * FIXME: Unsolicited NopIN support for ISER */ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) return; -- cgit v1.2.3-59-g8ed1b From 1a40f0a36fb669226f0fd29aaece5ff7b6399e80 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Thu, 15 Sep 2016 21:20:11 +0530 Subject: iscsi-target: fix iscsi cmd leak If iscsi-target receives NOP OUT with ITT and TTT set to 0xffffffff it allocates iscsi_cmd but does not free the cmd, so free iscsi_cmd in this case. Signed-off-by: Varun Prakash Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1aaf1f3148b2..b7d747e92c7a 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * Otherwise, initiator is not expecting a NOPIN is response. * Just ignore for now. */ + + if (cmd) + iscsit_free_cmd(cmd, false); + return 0; } EXPORT_SYMBOL(iscsit_process_nop_out); -- cgit v1.2.3-59-g8ed1b From 527268df31e57cf2b6d417198717c6d6afdb1e3e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 4 Oct 2016 16:37:05 -0700 Subject: target: Re-add missing SCF_ACK_KREF assignment in v4.1.y This patch fixes a regression in >= v4.1.y code where the original SCF_ACK_KREF assignment in target_get_sess_cmd() was dropped upstream in commit 054922bb, but the series for addressing TMR ABORT_TASK + LUN_RESET with fabric session reinstatement in commit febe562c20 still depends on this code in transport_cmd_finish_abort(). The regression manifests itself as a se_cmd->cmd_kref +1 leak, where ABORT_TASK + LUN_RESET can hang indefinately for a specific I_T session for drivers using SCF_ACK_KREF, resulting in hung kthreads. This patch has been verified with v4.1.y code. Reported-by: Vaibhav Tandon Tested-by: Vaibhav Tandon Cc: Vaibhav Tandon Cc: stable@vger.kernel.org # 4.1+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6094a6beddde..00ec46413146 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2547,8 +2547,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref) + if (ack_kref) { kref_get(&se_cmd->cmd_kref); + se_cmd->se_cmd_flags |= SCF_ACK_KREF; + } spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (se_sess->sess_tearing_down) { -- cgit v1.2.3-59-g8ed1b From 449a137846c84829a328757cd21fd9ca65c08519 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 8 Oct 2016 17:26:44 -0700 Subject: target: Make EXTENDED_COPY 0xe4 failure return COPY TARGET DEVICE NOT REACHABLE This patch addresses a bug where EXTENDED_COPY across multiple LUNs results in a CHECK_CONDITION when the source + destination are not located on the same physical node. ESX Host environments expect sense COPY_ABORTED w/ COPY TARGET DEVICE NOT REACHABLE to be returned when this occurs, in order to signal fallback to local copy method. As described in section 6.3.3 of spc4r22: "If it is not possible to complete processing of a segment because the copy manager is unable to establish communications with a copy target device, because the copy target device does not respond to INQUIRY, or because the data returned in response to INQUIRY indicates an unsupported logical unit, then the EXTENDED COPY command shall be terminated with CHECK CONDITION status, with the sense key set to COPY ABORTED, and the additional sense code set to COPY TARGET DEVICE NOT REACHABLE." Tested on v4.1.y with ESX v5.5u2+ with BlockCopy across multiple nodes. Reported-by: Nixon Vincent Tested-by: Nixon Vincent Cc: Nixon Vincent Tested-by: Dinesh Israni Signed-off-by: Dinesh Israni Cc: Dinesh Israni Cc: stable@vger.kernel.org # 3.14+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 7 +++++++ drivers/target/target_core_xcopy.c | 22 ++++++++++++++++------ include/target/target_core_base.h | 1 + 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 00ec46413146..000bc6d077ea 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1706,6 +1706,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2873,6 +2874,12 @@ static const struct sense_info sense_info_table[] = { .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ .add_sector_info = true, }, + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { + .key = COPY_ABORTED, + .asc = 0x0d, + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ + + }, [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { /* * Returning ILLEGAL REQUEST would cause immediate IO errors on diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 75cd85426ae3..f2a1443ecb33 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op } mutex_unlock(&g_device_mutex); - pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); return -EINVAL; } @@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, struct xcopy_op *xop, unsigned char *p, - unsigned short tdll) + unsigned short tdll, sense_reason_t *sense_ret) { struct se_device *local_dev = se_cmd->se_dev; unsigned char *desc = p; @@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, unsigned short start = 0; bool src = true; + *sense_ret = TCM_INVALID_PARAMETER_LIST; + if (offset != 0) { pr_err("XCOPY target descriptor list length is not" " multiple of %d\n", XCOPY_TARGET_DESC_LEN); @@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); else rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); - - if (rc < 0) + /* + * If a matching IEEE NAA 0x83 descriptor for the requested device + * is not located on this node, return COPY_ABORTED with ASQ/ASQC + * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the + * initiator to fall back to normal copy method. + */ + if (rc < 0) { + *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; goto out; + } pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", xop->src_dev, &xop->src_tid_wwn[0]); @@ -816,7 +825,8 @@ out: xcopy_pt_undepend_remotedev(xop); kfree(xop); - pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY CHECK_CONDITION" + " -> sending response\n", rc); ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); } @@ -875,7 +885,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); - rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); if (rc <= 0) goto out; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index fb8e3b6febdf..c2119008990a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -177,6 +177,7 @@ enum tcm_sense_reason_table { TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), #undef R }; -- cgit v1.2.3-59-g8ed1b From 926317de33998c112c5510301868ea9aa34097e2 Mon Sep 17 00:00:00 2001 From: Dinesh Israni Date: Mon, 10 Oct 2016 20:22:03 -0700 Subject: target: Don't override EXTENDED_COPY xcopy_pt_cmd SCSI status code This patch addresses a bug where a local EXTENDED_COPY WRITE or READ backend I/O request would always return SAM_STAT_CHECK_CONDITION, even if underlying xcopy_pt_cmd->se_cmd generated a different SCSI status code. ESX host environments expect to hit SAM_STAT_RESERVATION_CONFLICT for certain scenarios, and SAM_STAT_CHECK_CONDITION results in non-retriable status for these cases. Tested on v4.1.y with ESX v5.5u2+ with local IBLOCK backend copy. Reported-by: Nixon Vincent Tested-by: Nixon Vincent Cc: Nixon Vincent Tested-by: Dinesh Israni Signed-off-by: Dinesh Israni Cc: Dinesh Israni Cc: stable@vger.kernel.org # 3.14+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_xcopy.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index f2a1443ecb33..094a1440eacb 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -662,6 +662,7 @@ static int target_xcopy_read_source( rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], remote_port, true); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; transport_generic_free_cmd(se_cmd, 0); return rc; } @@ -673,6 +674,7 @@ static int target_xcopy_read_source( rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; transport_generic_free_cmd(se_cmd, 0); return rc; } @@ -723,6 +725,7 @@ static int target_xcopy_write_destination( remote_port, false); if (rc < 0) { struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; /* * If the failure happened before the t_mem_list hand-off in * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that @@ -738,6 +741,7 @@ static int target_xcopy_write_destination( rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; transport_generic_free_cmd(se_cmd, 0); return rc; @@ -824,10 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work) out: xcopy_pt_undepend_remotedev(xop); kfree(xop); - - pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY CHECK_CONDITION" - " -> sending response\n", rc); - ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + /* + * Don't override an error scsi status if it has already been set + */ + if (ec_cmd->scsi_status == SAM_STAT_GOOD) { + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" + " CHECK_CONDITION -> sending response\n", rc); + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + } target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); } -- cgit v1.2.3-59-g8ed1b From 61f36166c245e563c7a2b624f4c78c5ce0f680d6 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 16 Oct 2016 00:27:42 -0700 Subject: Revert "target: Fix residual overflow handling in target_complete_cmd_with_length" This reverts commit c1ccbfe0311e2380a6d2dcb0714b36904f5d586f. Reverting this patch, as it incorrectly assumes the additional length for INQUIRY in target_complete_cmd_with_length() is SCSI allocation length, which breaks existing user-space code when SCSI allocation length is smaller than additional length. root@scsi-mq:~# sg_inq --len=4 -vvvv /dev/sdb found bsg_major=253 open /dev/sdb with flags=0x800 inquiry cdb: 12 00 00 00 04 00 duration=0 ms inquiry: pass-through requested 4 bytes (data-in) but got -28 bytes inquiry: pass-through can't get negative bytes, say it got none inquiry: got too few bytes (0) INQUIRY resid (32) should never exceed requested len=4 inquiry: failed requesting 4 byte response: Malformed response to SCSI command [resid=32] AFAICT the original change was not to address a specific host issue, so go ahead and revert to original logic for now. Cc: Douglas Gilbert Cc: Martin K. Petersen Cc: Sumit Rai Cc: stable@vger.kernel.org # 4.8+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 000bc6d077ea..e825d580ccee 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd); void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) { - if (scsi_status != SAM_STAT_GOOD) { - return; - } - - /* - * Calculate new residual count based upon length of SCSI data - * transferred. - */ - if (length < cmd->data_length) { + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { cmd->residual_count += cmd->data_length - length; } else { @@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len } cmd->data_length = length; - } else if (length > cmd->data_length) { - cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; - cmd->residual_count = length - cmd->data_length; - } else { - cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT); - cmd->residual_count = 0; } target_complete_cmd(cmd, scsi_status); -- cgit v1.2.3-59-g8ed1b From aed3f249f93fe15f16242bbd4e6c3931674b8d91 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 3 Oct 2016 12:36:24 -0700 Subject: thermal: intel_pch_thermal: Add an ACPI passive trip On the platforms which has an ACPI companion device associated with PCH thermal device, read passive trip temperature via ACPI _PSV control method. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui --- drivers/thermal/intel_pch_thermal.c | 51 +++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'drivers') diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 9b4815e81b0d..5cf644a2c012 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -66,9 +67,53 @@ struct pch_thermal_device { unsigned long crt_temp; int hot_trip_id; unsigned long hot_temp; + int psv_trip_id; + unsigned long psv_temp; bool bios_enabled; }; +#ifdef CONFIG_ACPI + +/* + * On some platforms, there is a companion ACPI device, which adds + * passive trip temperature using _PSV method. There is no specific + * passive temperature setting in MMIO interface of this PCI device. + */ +static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, + int *nr_trips) +{ + struct acpi_device *adev; + + ptd->psv_trip_id = -1; + + adev = ACPI_COMPANION(&ptd->pdev->dev); + if (adev) { + unsigned long long r; + acpi_status status; + + status = acpi_evaluate_integer(adev->handle, "_PSV", NULL, + &r); + if (ACPI_SUCCESS(status)) { + unsigned long trip_temp; + + trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r); + if (trip_temp) { + ptd->psv_temp = trip_temp; + ptd->psv_trip_id = *nr_trips; + ++(*nr_trips); + } + } + } +} +#else +static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, + int *nr_trips) +{ + ptd->psv_trip_id = -1; + +} +#endif + static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) { u8 tsel; @@ -119,6 +164,8 @@ read_trips: ++(*nr_trips); } + pch_wpt_add_acpi_psv_trip(ptd, nr_trips); + return 0; } @@ -194,6 +241,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip, *type = THERMAL_TRIP_CRITICAL; else if (ptd->hot_trip_id == trip) *type = THERMAL_TRIP_HOT; + else if (ptd->psv_trip_id == trip) + *type = THERMAL_TRIP_PASSIVE; else return -EINVAL; @@ -208,6 +257,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem *temp = ptd->crt_temp; else if (ptd->hot_trip_id == trip) *temp = ptd->hot_temp; + else if (ptd->psv_trip_id == trip) + *temp = ptd->psv_temp; else return -EINVAL; -- cgit v1.2.3-59-g8ed1b From 33086a9a3071812a829d4b63bed826736a5d8b17 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 3 Oct 2016 12:36:02 -0700 Subject: thermal: intel_pch_thermal: Enable Haswell PCH Added missing support for Haswell PCH thermal sensor. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui --- drivers/thermal/intel_pch_thermal.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 5cf644a2c012..19bf2028e508 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -25,6 +25,8 @@ #include /* Intel PCH thermal Device IDs */ +#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */ +#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ @@ -293,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev, ptd->ops = &pch_dev_ops_wpt; dev_name = "pch_skylake"; break; + case PCH_THERMAL_DID_HSW_1: + case PCH_THERMAL_DID_HSW_2: + ptd->ops = &pch_dev_ops_wpt; + dev_name = "pch_haswell"; + break; default: dev_err(&pdev->dev, "unknown pch thermal device\n"); return -ENODEV; @@ -375,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device) static struct pci_device_id intel_pch_thermal_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); -- cgit v1.2.3-59-g8ed1b From 3105f234e0aba43e44e277c20f9b32ee8add43d4 Mon Sep 17 00:00:00 2001 From: Eric Ernst Date: Thu, 6 Oct 2016 08:56:49 -0700 Subject: thermal/powerclamp: correct cpu support check Initial logic for checking CPU match resulted in OR of CPU features rather than the intended AND. Updated to use boot_cpu_has macro rather than x86_match_cpu. In addition, MWAIT is the only required CPU feature for idle injection to work. Drop other feature requirements since they are only needed for optimal efficiency. CC: stable@vger.kernel.org #v4.7 Signed-off-by: Eric Ernst Acked-by: Jacob Pan Signed-off-by: Zhang Rui --- drivers/thermal/intel_powerclamp.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 0e4dc0afcfd2..7a223074df3d 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { .set_cur_state = powerclamp_set_cur_state, }; -static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT }, - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC }, - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC}, - {} -}; -MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); - static int __init powerclamp_probe(void) { - if (!x86_match_cpu(intel_powerclamp_ids)) { - pr_err("Intel powerclamp does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_err("CPU does not support MWAIT"); return -ENODEV; } -- cgit v1.2.3-59-g8ed1b From 6add49fff9233a5c43011eb42b521257cbaa9bda Mon Sep 17 00:00:00 2001 From: Jacob Siverskog Date: Thu, 20 Oct 2016 09:05:09 +0200 Subject: Bluetooth: btwilink: Fix probe return value Probe functions should return 0 on success. This driver's probe returns the value returned by hci_register_dev(), which is the hci index. This works for systems with only one hci device (id = 0) but for systems where the btwilink device ends up with an id larger than 0, things will start to fall apart. Make the probe function return 0 on success. Signed-off-by: Jacob Siverskog Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btwilink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index ef51c9c864c5..b6bb58c41df5 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev) BT_DBG("HCI device registered (hdev %p)", hdev); dev_set_drvdata(&pdev->dev, hst); - return err; + return 0; } static int bt_ti_remove(struct platform_device *pdev) -- cgit v1.2.3-59-g8ed1b From de24e0a108bc48062e1c7acaa97014bce32a919f Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 19 Oct 2016 15:45:07 +0200 Subject: USB: serial: cp210x: fix tiocmget error handling The current tiocmget implementation would fail to report errors up the stack and instead leaked a few bits from the stack as a mask of modem-status flags. Fixes: 39a66b8d22a3 ("[PATCH] USB: CP2101 Add support for flow control") Cc: stable Signed-off-by: Johan Hovold --- drivers/usb/serial/cp210x.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 54a4de0efdba..f61477bed3a8 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty) u8 control; int result; - cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control); + result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control); + if (result) + return result; result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) |((control & CONTROL_RTS) ? TIOCM_RTS : 0) -- cgit v1.2.3-59-g8ed1b From 3602ffdee9afa727320d33bda57a6957d72b1df2 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Wed, 19 Oct 2016 17:53:52 +0000 Subject: irqchip/jcore: Don't show Kconfig menu item for driver Core drivers for J-Core SoCs will be selected implicitly via CONFIG_SH_JCORE_SOC instead. Based on a corresponding change to the clocksource/timer driver requested by Daniel Lezcano. Signed-off-by: Rich Felker Cc: Marc Zyngier Cc: Jason Cooper Cc: linux-sh@vger.kernel.org Link: http://lkml.kernel.org/r/883a3d17084003e3cf21bab73ec12828fe4ff6c6.1476899495.git.dalias@libc.org Signed-off-by: Thomas Gleixner --- drivers/irqchip/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 82b0b5daf3f5..bc0af3307bbf 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -158,8 +158,8 @@ config PIC32_EVIC select IRQ_DOMAIN config JCORE_AIC - bool "J-Core integrated AIC" - depends on OF && (SUPERH || COMPILE_TEST) + bool "J-Core integrated AIC" if COMPILE_TEST + depends on OF select IRQ_DOMAIN help Support for the J-Core integrated AIC. -- cgit v1.2.3-59-g8ed1b From fd5bed48b446d5edfb319b5ecbef7154f29bd73e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 20 Oct 2016 11:21:01 +0100 Subject: irqchip/gic: Add missing \n to CPU IF adjustment message It really looks bad without a newline. Signed-off-by: Marc Zyngier --- drivers/irqchip/irq-gic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 58e5b4e87056..d6c404b3584d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) */ *base += 0xf000; cpuif_res.start += 0xf000; - pr_warn("GIC: Adjusting CPU interface base to %pa", + pr_warn("GIC: Adjusting CPU interface base to %pa\n", &cpuif_res.start); } -- cgit v1.2.3-59-g8ed1b From 44df08198bc98d75085bb0ff4b54bf43e0bc40c0 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 5 Oct 2016 15:08:36 +0530 Subject: gpio: mxs: Unmap region obtained by of_iomap Free memory mapping, if mxs_gpio_probe is not successful. Signed-off-by: Arvind Yadav Signed-off-by: Linus Walleij --- drivers/gpio/gpio-mxs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index b9daa0bf32a4..ee1724806f46 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev) writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR); irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); - if (irq_base < 0) - return irq_base; + if (irq_base < 0) { + err = irq_base; + goto out_iounmap; + } port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, &irq_domain_simple_ops, NULL); @@ -349,6 +351,8 @@ out_irqdomain_remove: irq_domain_remove(port->domain); out_irqdesc_free: irq_free_descs(irq_base, 32); +out_iounmap: + iounmap(port->base); return err; } -- cgit v1.2.3-59-g8ed1b From d1ca19cb3bc88318a54643f7a250ec6abab51108 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Oct 2016 09:25:20 +0300 Subject: gpio: stmpe: || vs && typo && was obviously intended here. Fixes: 6936e1f88d23 ('gpio: stmpe: Write int status register only when needed') Signed-off-by: Dan Carpenter Acked-by: Patrice Chotard Signed-off-by: Linus Walleij --- drivers/gpio/gpio-stmpe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index e7d422a6b90b..5b0042776ec7 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev) * 801/1801/1600, bits are cleared when read. * Edge detect register is not present on 801/1600/1801 */ - if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 || + if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 && stmpe->partnum != STMPE1801) { stmpe_reg_write(stmpe, statmsbreg + i, status[i]); stmpe_reg_write(stmpe, -- cgit v1.2.3-59-g8ed1b From 0cb940927df72a514588d8286916eda4aaa4d011 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 10 Oct 2016 14:42:46 +0200 Subject: gpio: mockup: add sysfs dependency Building the gpio-mockup driver without SYSFS results in a harmless Kconfig warning: warning: (GPIO_MOCKUP) selects GPIO_SYSFS which has unmet direct dependencies (GPIOLIB && SYSFS) We can easily avoid that warning by adding a dependency on SYSFS. Fixes: 0f98dd1b27d2 ("gpio/mockup: add virtual gpio device") Signed-off-by: Arnd Bergmann Signed-off-by: Linus Walleij --- drivers/gpio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 26ee00f6bd58..d011cb89d25e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ config GPIO_MOCKUP tristate "GPIO Testing Driver" - depends on GPIOLIB + depends on GPIOLIB && SYSFS select GPIO_SYSFS help This enables GPIO Testing driver, which provides a way to test GPIO -- cgit v1.2.3-59-g8ed1b From 67bf5156edc4f58241fd7c119ae145c552adddd6 Mon Sep 17 00:00:00 2001 From: David Arcari Date: Wed, 12 Oct 2016 18:40:30 +0200 Subject: gpio / ACPI: fix returned error from acpi_dev_gpio_irq_get() acpi_dev_gpio_irq_get() currently ignores the error returned by acpi_get_gpiod_by_index() and overwrites it with -ENOENT. Problem is this error can be -EPROBE_DEFER, which just blows up some drivers when the module ordering is not correct. Cc: stable@vger.kernel.org Signed-off-by: David Arcari Signed-off-by: Benjamin Tissoires Acked-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-acpi.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 58ece201b8e6..72a4b326fd0d 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { int idx, i; unsigned int irq_flags; + int ret = -ENOENT; for (i = 0, idx = 0; idx <= index; i++) { struct acpi_gpio_info info; struct gpio_desc *desc; desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); - if (IS_ERR(desc)) + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); break; + } if (info.gpioint && idx++ == index) { int irq = gpiod_to_irq(desc); @@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) } } - return -ENOENT; + return ret; } EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get); -- cgit v1.2.3-59-g8ed1b From 19271c1a083282fbd568f3f214a6182e973626eb Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 20 Oct 2016 16:05:42 +0200 Subject: mlxsw: spectrum_router: Use correct tree index for binding By a mistake, there is tree index 0 passed to RALTB. Should be MLXSW_SP_LPM_TREE_MIN. Fixes: b45f64d16d45 ("mlxsw: spectrum_router: Use FIB notifications instead of switchdev calls") Reported-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 78fc557d6dd7..1b3a2cb65837 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1862,7 +1862,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (err) return err; - mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0); + mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_SP_LPM_TREE_MIN); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); if (err) return err; -- cgit v1.2.3-59-g8ed1b From 37956d78b8f0c5560421f83d1a5e337ee81c685c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 20 Oct 2016 16:05:43 +0200 Subject: mlxsw: spectrum_router: Make mlxsw_sp_router_fib4_del return void and remove warn The function return value is not checked anywhere. Also, the warning causes huge slowdown when removing large number of FIB entries which were not offloaded, because of ordering issue. Ido's preparing a patchset to fix the ordering issue, but that is definitelly not net tree material. Fixes: b45f64d16d45 ("mlxsw: spectrum_router: Use FIB notifications instead of switchdev calls") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1b3a2cb65837..f3d50d369bbe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1820,19 +1820,17 @@ err_fib_entry_insert: return err; } -static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, - struct fib_entry_notifier_info *fen_info) +static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, + struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_entry *fib_entry; if (mlxsw_sp->router.aborted) - return 0; + return; fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); - if (!fib_entry) { - dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); - return -ENOENT; - } + if (!fib_entry) + return; if (fib_entry->ref_count == 1) { mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); @@ -1840,7 +1838,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); - return 0; } static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) -- cgit v1.2.3-59-g8ed1b From 7fb6a36bab6b0b158f93eb13faa1b440f8b26009 Mon Sep 17 00:00:00 2001 From: Elad Raz Date: Thu, 20 Oct 2016 16:05:44 +0200 Subject: mlxsw: switchx2: Fix ethernet port initialization When creating an ethernet port fails, we must move the port to disable, otherwise putting the port in switch partition 0 (ETH) or 1 (IB) will always fails. Fixes: 31557f0f9755 ("mlxsw: Introduce Mellanox SwitchX-2 ASIC support") Signed-off-by: Elad Raz Reviewed-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index c0c23e2f3275..92bda8703f87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1088,6 +1088,7 @@ err_port_stp_state_set: err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: err_port_system_port_mapping_set: port_not_usable: -- cgit v1.2.3-59-g8ed1b From 164c971d1cc365dc85e5cf9a9302a91c0cf7f126 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 20 Oct 2016 16:05:45 +0200 Subject: mlxsw: pci: Fix reset wait for SwitchX2 SwitchX2 firmware does not implement reset done yet. Moreover, when busy-polled for ready magic, that slows down firmware and reset takes longer than the defined timeout, causing initialization to fail. So restore the previous behaviour and just sleep in this case. Fixes: 233fa44bd67a ("mlxsw: pci: Implement reset done check") Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index e742bd4e8894..912f71f84209 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .cmd_exec = mlxsw_pci_cmd_exec, }; -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) { unsigned long end; mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; + } + wmb(); /* reset needs to be written before we read control register */ end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { @@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); - err = mlxsw_pci_sw_reset(mlxsw_pci); + err = mlxsw_pci_sw_reset(mlxsw_pci, id); if (err) { dev_err(&pdev->dev, "Software reset failed\n"); goto err_sw_reset; -- cgit v1.2.3-59-g8ed1b From 8be0328e52dc2c8c6f61563c9ccc95ae4d63e9ce Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Thu, 20 Oct 2016 10:01:28 +0200 Subject: stmmac: display the descriptors if DES0 = 0 It makes sense to display the descriptors even if DES0 is zero. This helps for example in case of it is needed to dump rx write-back descriptors to get timestamp status. Signed-off-by: Giuseppe Cavallaro Cc: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 4ec7397e7fb3..a1b17cd7886b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx) pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); for (i = 0; i < size; i++) { - if (p->des0) - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(p), - p->des0, p->des1, p->des2, p->des3); + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(p), + p->des0, p->des1, p->des2, p->des3); p++; } } -- cgit v1.2.3-59-g8ed1b From 4c39135aa412d2f1381e43802523da110ca7855c Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Thu, 20 Oct 2016 18:09:18 +0300 Subject: xhci: add restart quirk for Intel Wildcatpoint PCH xHC in Wildcatpoint-LP PCH is similar to LynxPoint-LP and need the same quirks to prevent machines from spurious restart while shutting them down. Reported-by: Hasan Mahmood CC: Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index d7b0f97abbad..314179b4824e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -45,6 +45,7 @@ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f @@ -153,7 +154,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { + (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } -- cgit v1.2.3-59-g8ed1b From 346e99736c3ce328fd42d678343b70243aca5f36 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Thu, 20 Oct 2016 18:09:19 +0300 Subject: xhci: workaround for hosts missing CAS bit If a device is unplugged and replugged during Sx system suspend some Intel xHC hosts will overwrite the CAS (Cold attach status) flag and no device connection is noticed in resume. A device in this state can be identified in resume if its link state is in polling or compliance mode, and the current connect status is 0. A device in this state needs to be warm reset. Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8 Observed on Cherryview and Apollolake as they go into compliance mode if LFPS times out during polling, and re-plugged devices are not discovered at resume. Signed-off-by: Mathias Nyman CC: Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-hub.c | 37 +++++++++++++++++++++++++++++++++++++ drivers/usb/host/xhci-pci.c | 6 ++++++ drivers/usb/host/xhci.h | 3 +++ 3 files changed, 46 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 730b9fd26685..c047362b3768 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd) return 0; } +/* + * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3. + * warm reset a USB3 device stuck in polling or compliance mode after resume. + * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8 + */ +static bool xhci_port_missing_cas_quirk(int port_index, + __le32 __iomem **port_array) +{ + u32 portsc; + + portsc = readl(port_array[port_index]); + + /* if any of these are set we are not stuck */ + if (portsc & (PORT_CONNECT | PORT_CAS)) + return false; + + if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) && + ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE)) + return false; + + /* clear wakeup/change bits, and do a warm port reset */ + portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); + portsc |= PORT_WR; + writel(portsc, port_array[port_index]); + /* flush write */ + readl(port_array[port_index]); + return true; +} + int xhci_bus_resume(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd) u32 temp; temp = readl(port_array[port_index]); + + /* warm reset CAS limited ports stuck in polling/compliance */ + if ((xhci->quirks & XHCI_MISSING_CAS) && + (hcd->speed >= HCD_USB3) && + xhci_port_missing_cas_quirk(port_index, port_array)) { + xhci_dbg(xhci, "reset stuck port %d\n", port_index); + continue; + } if (DEV_SUPERSPEED_ANY(temp)) temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); else diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 314179b4824e..e96ae80d107e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -51,6 +51,7 @@ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 +#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 static const char hcd_name[] = "xhci_hcd"; @@ -171,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { xhci->quirks |= XHCI_SSIC_PORT_UNUSED; } + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) + xhci->quirks |= XHCI_MISSING_CAS; + if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_EJ168) { xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index b2c1dc5dc0f3..f945380035d0 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -314,6 +314,8 @@ struct xhci_op_regs { #define XDEV_U2 (0x2 << 5) #define XDEV_U3 (0x3 << 5) #define XDEV_INACTIVE (0x6 << 5) +#define XDEV_POLLING (0x7 << 5) +#define XDEV_COMP_MODE (0xa << 5) #define XDEV_RESUME (0xf << 5) /* true: port has power (see HCC_PPC) */ #define PORT_POWER (1 << 9) @@ -1653,6 +1655,7 @@ struct xhci_hcd { #define XHCI_MTK_HOST (1 << 21) #define XHCI_SSIC_PORT_UNUSED (1 << 22) #define XHCI_NO_64BIT_SUPPORT (1 << 23) +#define XHCI_MISSING_CAS (1 << 24) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ -- cgit v1.2.3-59-g8ed1b From 7d3b016a6f5a0fa610dfd02b05654c08fa4ae514 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Thu, 20 Oct 2016 18:09:20 +0300 Subject: xhci: use default USB_RESUME_TIMEOUT when resuming ports. USB2 host inititated resume, and system suspend bus resume need to use the same USB_RESUME_TIMEOUT as elsewhere. This resolves a device disconnect issue at system resume seen on Intel Braswell and Apollolake, but is in no way limited to those platforms. Signed-off-by: Mathias Nyman CC: Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-hub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c047362b3768..0ef16900efed 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_set_link_state(xhci, port_array, wIndex, XDEV_RESUME); spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); + msleep(USB_RESUME_TIMEOUT); spin_lock_irqsave(&xhci->lock, flags); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); @@ -1447,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) if (need_usb2_u3_exit) { spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); + msleep(USB_RESUME_TIMEOUT); spin_lock_irqsave(&xhci->lock, flags); } -- cgit v1.2.3-59-g8ed1b From a478b097474cd9f2268ab1beaca74ff09e652b9b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Oct 2016 17:15:41 +0200 Subject: ahci: fix nvec check commit 17a51f12 ("ahci: only try to use multi-MSI mode if there is more than 1 port") lead to a case where nvec isn't initialized before it's used. Fix this by moving the check into the n_ports conditional. Reported-and-reviewed-by Colin Ian King Signed-off-by: Christoph Hellwig Signed-off-by: Tejun Heo --- drivers/ata/ahci.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ed311a040fed..60e42e2ed68f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1436,14 +1436,14 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, "ahci: MRSM is on, fallback to single MSI\n"); pci_free_irq_vectors(pdev); } - } - /* - * -ENOSPC indicated we don't have enough vectors. Don't bother trying - * a single vectors for any other error: - */ - if (nvec < 0 && nvec != -ENOSPC) - return nvec; + /* + * -ENOSPC indicated we don't have enough vectors. Don't bother + * trying a single vectors for any other error: + */ + if (nvec < 0 && nvec != -ENOSPC) + return nvec; + } /* * If the host is not capable of supporting per-port vectors, fall -- cgit v1.2.3-59-g8ed1b From 9995f4f184613fb02ee73092b03545520a72b104 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Thu, 13 Oct 2016 21:51:06 +0000 Subject: clocksource: Add J-Core timer/clocksource driver At the hardware level, the J-Core PIT is integrated with the interrupt controller, but it is represented as its own device and has an independent programming interface. It provides a 12-bit countdown timer, which is not presently used, and a periodic timer. The interval length for the latter is programmable via a 32-bit throttle register whose units are determined by a bus-period register. The periodic timer is used to implement both periodic and oneshot clock event modes; in oneshot mode the interrupt handler simply disables the timer as soon as it fires. Despite its device tree node representing an interrupt for the PIT, the actual irq generated is programmable, not hard-wired. The driver is responsible for programming the PIT to generate the hardware irq number that the DT assigns to it. On SMP configurations, J-Core provides cpu-local instances of the PIT; no broadcast timer is needed. This driver supports the creation of the necessary per-cpu clock_event_device instances. A nanosecond-resolution clocksource is provided using the J-Core "RTC" registers, which give a 64-bit seconds count and 32-bit nanoseconds that wrap every second. The driver converts these to a full-range 32-bit nanoseconds count. Signed-off-by: Rich Felker Cc: Mark Rutland Cc: devicetree@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: Daniel Lezcano Cc: Rob Herring Link: http://lkml.kernel.org/r/b591ff12cc5ebf63d1edc98da26046f95a233814.1476393790.git.dalias@libc.org Signed-off-by: Thomas Gleixner --- drivers/clocksource/Kconfig | 10 ++ drivers/clocksource/Makefile | 1 + drivers/clocksource/jcore-pit.c | 249 ++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 4 files changed, 261 insertions(+) create mode 100644 drivers/clocksource/jcore-pit.c (limited to 'drivers') diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 245190839359..e2c6e43cf8ca 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU config SYS_SUPPORTS_EM_STI bool +config CLKSRC_JCORE_PIT + bool "J-Core PIT timer driver" if COMPILE_TEST + depends on OF + depends on GENERIC_CLOCKEVENTS + depends on HAS_IOMEM + select CLKSRC_MMIO + help + This enables build of clocksource and clockevent driver for + the integrated PIT in the J-Core synthesizable, open source SoC. + config SH_TIMER_CMT bool "Renesas CMT timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index fd9d6df0bbc0..cf87f407f1ad 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o +obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c new file mode 100644 index 000000000000..54e1665aa03c --- /dev/null +++ b/drivers/clocksource/jcore-pit.c @@ -0,0 +1,249 @@ +/* + * J-Core SoC PIT/clocksource driver + * + * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PIT_IRQ_SHIFT 12 +#define PIT_PRIO_SHIFT 20 +#define PIT_ENABLE_SHIFT 26 +#define PIT_PRIO_MASK 0xf + +#define REG_PITEN 0x00 +#define REG_THROT 0x10 +#define REG_COUNT 0x14 +#define REG_BUSPD 0x18 +#define REG_SECHI 0x20 +#define REG_SECLO 0x24 +#define REG_NSEC 0x28 + +struct jcore_pit { + struct clock_event_device ced; + void __iomem *base; + unsigned long periodic_delta; + u32 enable_val; +}; + +static void __iomem *jcore_pit_base; +static struct jcore_pit __percpu *jcore_pit_percpu; + +static notrace u64 jcore_sched_clock_read(void) +{ + u32 seclo, nsec, seclo0; + __iomem void *base = jcore_pit_base; + + seclo = readl(base + REG_SECLO); + do { + seclo0 = seclo; + nsec = readl(base + REG_NSEC); + seclo = readl(base + REG_SECLO); + } while (seclo0 != seclo); + + return seclo * NSEC_PER_SEC + nsec; +} + +static cycle_t jcore_clocksource_read(struct clocksource *cs) +{ + return jcore_sched_clock_read(); +} + +static int jcore_pit_disable(struct jcore_pit *pit) +{ + writel(0, pit->base + REG_PITEN); + return 0; +} + +static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit) +{ + jcore_pit_disable(pit); + writel(delta, pit->base + REG_THROT); + writel(pit->enable_val, pit->base + REG_PITEN); + return 0; +} + +static int jcore_pit_set_state_shutdown(struct clock_event_device *ced) +{ + struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced); + + return jcore_pit_disable(pit); +} + +static int jcore_pit_set_state_oneshot(struct clock_event_device *ced) +{ + struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced); + + return jcore_pit_disable(pit); +} + +static int jcore_pit_set_state_periodic(struct clock_event_device *ced) +{ + struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced); + + return jcore_pit_set(pit->periodic_delta, pit); +} + +static int jcore_pit_set_next_event(unsigned long delta, + struct clock_event_device *ced) +{ + struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced); + + return jcore_pit_set(delta, pit); +} + +static int jcore_pit_local_init(unsigned cpu) +{ + struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu); + unsigned buspd, freq; + + pr_info("Local J-Core PIT init on cpu %u\n", cpu); + + buspd = readl(pit->base + REG_BUSPD); + freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd); + pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd); + + clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX); + + return 0; +} + +static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id) +{ + struct jcore_pit *pit = this_cpu_ptr(dev_id); + + if (clockevent_state_oneshot(&pit->ced)) + jcore_pit_disable(pit); + + pit->ced.event_handler(&pit->ced); + + return IRQ_HANDLED; +} + +static int __init jcore_pit_init(struct device_node *node) +{ + int err; + unsigned pit_irq, cpu; + unsigned long hwirq; + u32 irqprio, enable_val; + + jcore_pit_base = of_iomap(node, 0); + if (!jcore_pit_base) { + pr_err("Error: Cannot map base address for J-Core PIT\n"); + return -ENXIO; + } + + pit_irq = irq_of_parse_and_map(node, 0); + if (!pit_irq) { + pr_err("Error: J-Core PIT has no IRQ\n"); + return -ENXIO; + } + + pr_info("Initializing J-Core PIT at %p IRQ %d\n", + jcore_pit_base, pit_irq); + + err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs", + NSEC_PER_SEC, 400, 32, + jcore_clocksource_read); + if (err) { + pr_err("Error registering clocksource device: %d\n", err); + return err; + } + + sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC); + + jcore_pit_percpu = alloc_percpu(struct jcore_pit); + if (!jcore_pit_percpu) { + pr_err("Failed to allocate memory for clock event device\n"); + return -ENOMEM; + } + + err = request_irq(pit_irq, jcore_timer_interrupt, + IRQF_TIMER | IRQF_PERCPU, + "jcore_pit", jcore_pit_percpu); + if (err) { + pr_err("pit irq request failed: %d\n", err); + free_percpu(jcore_pit_percpu); + return err; + } + + /* + * The J-Core PIT is not hard-wired to a particular IRQ, but + * integrated with the interrupt controller such that the IRQ it + * generates is programmable, as follows: + * + * The bit layout of the PIT enable register is: + * + * .....e..ppppiiiiiiii............ + * + * where the .'s indicate unrelated/unused bits, e is enable, + * p is priority, and i is hard irq number. + * + * For the PIT included in AIC1 (obsolete but still in use), + * any hard irq (trap number) can be programmed via the 8 + * iiiiiiii bits, and a priority (0-15) is programmable + * separately in the pppp bits. + * + * For the PIT included in AIC2 (current), the programming + * interface is equivalent modulo interrupt mapping. This is + * why a different compatible tag was not used. However only + * traps 64-127 (the ones actually intended to be used for + * interrupts, rather than syscalls/exceptions/etc.) can be + * programmed (the high 2 bits of i are ignored) and the + * priority pppp is <<2'd and or'd onto the irq number. This + * choice seems to have been made on the hardware engineering + * side under an assumption that preserving old AIC1 priority + * mappings was important. Future models will likely ignore + * the pppp field. + */ + hwirq = irq_get_irq_data(pit_irq)->hwirq; + irqprio = (hwirq >> 2) & PIT_PRIO_MASK; + enable_val = (1U << PIT_ENABLE_SHIFT) + | (hwirq << PIT_IRQ_SHIFT) + | (irqprio << PIT_PRIO_SHIFT); + + for_each_present_cpu(cpu) { + struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu); + + pit->base = of_iomap(node, cpu); + if (!pit->base) { + pr_err("Unable to map PIT for cpu %u\n", cpu); + continue; + } + + pit->ced.name = "jcore_pit"; + pit->ced.features = CLOCK_EVT_FEAT_PERIODIC + | CLOCK_EVT_FEAT_ONESHOT + | CLOCK_EVT_FEAT_PERCPU; + pit->ced.cpumask = cpumask_of(cpu); + pit->ced.rating = 400; + pit->ced.irq = pit_irq; + pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown; + pit->ced.set_state_periodic = jcore_pit_set_state_periodic; + pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot; + pit->ced.set_next_event = jcore_pit_set_next_event; + + pit->enable_val = enable_val; + } + + cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, + "AP_JCORE_TIMER_STARTING", + jcore_pit_local_init, NULL); + + return 0; +} + +CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9b207a8c5af3..afe641c02dca 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -81,6 +81,7 @@ enum cpuhp_state { CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From fcd91dd449867c6bfe56a81cabba76b829fd05cd Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 20 Oct 2016 15:58:02 +0200 Subject: net: add recursion limit to GRO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, GRO can do unlimited recursion through the gro_receive handlers. This was fixed for tunneling protocols by limiting tunnel GRO to one level with encap_mark, but both VLAN and TEB still have this problem. Thus, the kernel is vulnerable to a stack overflow, if we receive a packet composed entirely of VLAN headers. This patch adds a recursion counter to the GRO layer to prevent stack overflow. When a gro_receive function hits the recursion limit, GRO is aborted for this skb and it is processed normally. This recursion counter is put in the GRO CB, but could be turned into a percpu counter if we run out of space in the CB. Thanks to Vladimír Beneš for the initial bug report. Fixes: CVE-2016-7039 Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.") Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan") Signed-off-by: Sabrina Dubroca Reviewed-by: Jiri Benc Acked-by: Hannes Frederic Sowa Acked-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 +- drivers/net/vxlan.c | 2 +- include/linux/netdevice.h | 39 ++++++++++++++++++++++++++++++++++++++- net/8021q/vlan.c | 2 +- net/core/dev.c | 1 + net/ethernet/eth.c | 2 +- net/ipv4/af_inet.c | 2 +- net/ipv4/fou.c | 4 ++-- net/ipv4/gre_offload.c | 2 +- net/ipv4/udp_offload.c | 2 +- net/ipv6/ip6_offload.c | 2 +- 11 files changed, 49 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 3c20e87bb761..16af1ce99233 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; out_unlock: diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e7d16687538b..c1639a3e95a4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, } } - pp = eth_gro_receive(head, skb); + pp = call_gro_receive(eth_gro_receive, head, skb); flush = 0; out: diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 465e128699a1..91ee3643ccc8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2169,7 +2169,10 @@ struct napi_gro_cb { /* Used to determine if flush_id can be ignored */ u8 is_atomic:1; - /* 5 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 1 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2180,6 +2183,40 @@ struct napi_gro_cb { #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + +typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, + struct sk_buff *); +static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(sk, head, skb); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 8de138d3306b..f2531ad66b68 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/core/dev.c b/net/core/dev.c index b09ac57f4348..dbc871306910 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4511,6 +4511,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->recursion_counter = 0; NAPI_GRO_CB(skb)->is_fou = 0; NAPI_GRO_CB(skb)->is_atomic = 1; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 66dff5e3d772..02acfff36028 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1effc986739e..9648c97e541f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index cf50f7e2b012..030d1531e897 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk, if (!ops || !ops->callbacks.gro_receive) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -441,7 +441,7 @@ next_proto: if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) goto out_unlock; - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); flush = 0; out_unlock: diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 96e0efecefa6..d5cac99170b1 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen); - pp = ptype->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; out_unlock: diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index f9333c963607..b2be1d9757ef 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -295,7 +295,7 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); - pp = udp_sk(sk)->gro_receive(sk, head, skb); + pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); out_unlock: rcu_read_unlock(); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e7bfd55899a3..1fcf61f1cbc3 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, skb_gro_postpull_rcsum(skb, iph, nlen); - pp = ops->callbacks.gro_receive(head, skb); + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); -- cgit v1.2.3-59-g8ed1b From 2399d6143f85b155ae84ccd94237befd36b8f6c7 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 20 Oct 2016 09:32:19 -0700 Subject: net: dsa: bcm_sf2: Prevent GPHY shutdown for kexec'd kernels For a kernel that is being kexec'd we re-enable the integrated GPHY in order for the subsequent MDIO bus scan to succeed and properly bind to the bcm7xxx PHY driver. If we did not do that, the GPHY would be shut down by the time the MDIO driver is probing the bus, and it would fail to read the correct PHY OUI and therefore bind to an appropriate PHY driver. Later on, this would cause DSA not to be able to successfully attach to the PHY, and the interface would not be created at all. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 0427009bc924..077a24541584 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -1133,6 +1134,18 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) return 0; } +static void bcm_sf2_sw_shutdown(struct platform_device *pdev) +{ + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + /* For a kernel about to be kexec'd we want to keep the GPHY on for a + * successful MDIO bus scan to occur. If we did turn off the GPHY + * before (e.g: port_disable), this will also power it back on. + */ + if (priv->hw_params.num_gphy == 1) + bcm_sf2_gphy_enable_set(priv->dev->ds, kexec_in_progress); +} + #ifdef CONFIG_PM_SLEEP static int bcm_sf2_suspend(struct device *dev) { @@ -1163,6 +1176,7 @@ MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, .remove = bcm_sf2_sw_remove, + .shutdown = bcm_sf2_sw_shutdown, .driver = { .name = "brcm-sf2", .of_match_table = bcm_sf2_of_match, -- cgit v1.2.3-59-g8ed1b From 593876838826914a7e4e05fbbcb728be6fbc4d89 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 18 Oct 2016 13:49:18 +0800 Subject: Revert "clocksource/drivers/timer_sun5i: Replace code by clocksource_mmio_init" struct clocksource is also used by the clk notifier callback, to unregister and re-register the clocksource with a different clock rate. clocksource_mmio_init does not pass back a pointer to the struct used, and the clk notifier callback assumes that the struct clocksource in struct sun5i_timer_clksrc is valid. This results in a kernel NULL pointer dereference when the hstimer clock is changed: Unable to handle kernel NULL pointer dereference at virtual address 00000004 [] (clocksource_unbind) from [] (clocksource_unregister+0x2c/0x44) [] (clocksource_unregister) from [] (sun5i_rate_cb_clksrc+0x34/0x3c) [] (sun5i_rate_cb_clksrc) from [] (notifier_call_chain+0x44/0x84) [] (notifier_call_chain) from [] (__srcu_notifier_call_chain+0x44/0x60) [] (__srcu_notifier_call_chain) from [] (srcu_notifier_call_chain+0x18/0x20) [] (srcu_notifier_call_chain) from [] (__clk_notify+0x70/0x7c) [] (__clk_notify) from [] (clk_propagate_rate_change+0xa4/0xc4) [] (clk_propagate_rate_change) from [] (clk_propagate_rate_change+0x6c/0xc4) Revert the commit for now. clocksource_mmio_init can be made to pass back a pointer, but the code churn and usage of an inner struct might not be worth it. Fixes: 157dfadef832 ("clocksource/drivers/timer_sun5i: Replace code by clocksource_mmio_init") Reported-by: Maxime Ripard Signed-off-by: Chen-Yu Tsai Cc: linux-sunxi@googlegroups.com Cc: Daniel Lezcano Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20161018054918.26855-1-wens@csie.org Signed-off-by: Thomas Gleixner --- drivers/clocksource/timer-sun5i.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index c184eb84101e..4f87f3e76d83 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) +{ + struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); + + return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1)); +} + static int sun5i_rate_cb_clksrc(struct notifier_block *nb, unsigned long event, void *data) { @@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node, writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, base + TIMER_CTL_REG(1)); - ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name, - rate, 340, 32, clocksource_mmio_readl_down); + cs->clksrc.name = node->name; + cs->clksrc.rating = 340; + cs->clksrc.read = sun5i_clksrc_read; + cs->clksrc.mask = CLOCKSOURCE_MASK(32); + cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + ret = clocksource_register_hz(&cs->clksrc, rate); if (ret) { pr_err("Couldn't register clock source.\n"); goto err_remove_notifier; -- cgit v1.2.3-59-g8ed1b From 28e3d7002ba9f773662b2cf75d28cadfa29dc442 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 20 Oct 2016 18:03:36 +0300 Subject: watchdog: wdat_wdt: Ping the watchdog on resume It turns out we need to ping the watchdog hardware on resume when we re-program it. Otherwise this causes inadvertent reset to trigger right after the resume is complete. Fixes: 058dfc767008 (ACPI / watchdog: Add support for WDAT hardware watchdog) Signed-off-by: Mika Westerberg Acked-by: Guenter Roeck Signed-off-by: Rafael J. Wysocki --- drivers/watchdog/wdat_wdt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index e473e3b23720..6d1fbda0f461 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev) ret = wdat_wdt_enable_reboot(wdat); if (ret) return ret; + + ret = wdat_wdt_ping(&wdat->wdd); + if (ret) + return ret; } return wdat_wdt_start(&wdat->wdd); -- cgit v1.2.3-59-g8ed1b From 91bbc174d45c347aa7aedb2215cc7d2013c06c1f Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 25 Sep 2016 13:53:58 +0200 Subject: clk: at91: Fix a return value in case of error If 'clk_hw_register()' fails, it is likely that we expect to return an error instead of a valid pointer (which would mean success). Fix commit f5644f10dcfb ("clk: at91: Migrate to clk_hw based registration and OF APIs") Signed-off-by: Christophe JAILLET Signed-off-by: Stephen Boyd --- drivers/clk/at91/clk-programmable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 190122e64a3a..85a449cf61e3 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap, ret = clk_hw_register(NULL, &prog->hw); if (ret) { kfree(prog); - hw = &prog->hw; + hw = ERR_PTR(ret); } return hw; -- cgit v1.2.3-59-g8ed1b From 1b4c59b7a1d0b9d8019254a5f2e35b2663f49a9e Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Aug 2016 10:54:07 +0200 Subject: target: fix potential race window in target_sess_cmd_list_waiting() target_sess_cmd_list_waiting() might hit on a condition where the kref for the command is already 0, but the destructor has not been called yet (or is stuck in waiting for a spin lock). Rather than leaving the command on the list we should explicitly remove it to avoid race issues later on. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e825d580ccee..7dfefd66df93 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2535,7 +2535,9 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) * invocations before se_cmd descriptor release. */ if (ack_kref) { - kref_get(&se_cmd->cmd_kref); + if (!kref_get_unless_zero(&se_cmd->cmd_kref)) + return -EINVAL; + se_cmd->se_cmd_flags |= SCF_ACK_KREF; } @@ -2616,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd); */ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) { - struct se_cmd *se_cmd; + struct se_cmd *se_cmd, *tmp_cmd; unsigned long flags; int rc; @@ -2628,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) se_sess->sess_tearing_down = 1; list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { rc = kref_get_unless_zero(&se_cmd->cmd_kref); if (rc) { se_cmd->cmd_wait_set = 1; spin_lock(&se_cmd->t_state_lock); se_cmd->transport_state |= CMD_T_FABRIC_STOP; spin_unlock(&se_cmd->t_state_lock); - } + } else + list_del_init(&se_cmd->se_cmd_list); } spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -- cgit v1.2.3-59-g8ed1b From b04bf5833e8a0d6ae4e3f7b35b30c4ab6ff7ea1b Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Aug 2016 10:54:08 +0200 Subject: target/tcm_fc: print command pointer in debug message When allocating a new command we should add the pointer to the debug statements; that allows us to match this with other debug statements for handling data. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 216e18cc9133..36f08644250c 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -575,7 +575,7 @@ static void ft_send_work(struct work_struct *work) TARGET_SCF_ACK_KREF)) goto err; - pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); + pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); return; err: -- cgit v1.2.3-59-g8ed1b From 91b385b4909e040450389b11961a8ba8f8e9a35e Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Aug 2016 10:54:09 +0200 Subject: target/tcm_fc: return detailed error in ft_sess_create() Not every failure is due to out-of-memory; the ACLs might not be set, too. So return a detailed error code in ft_sess_create() instead of just a NULL pointer. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_sess.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 6ffbb603d912..cc8c2614397b 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -223,7 +223,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) - return NULL; + return ERR_PTR(-ENOMEM); kref_init(&sess->kref); /* ref for table entry */ sess->tport = tport; @@ -234,8 +234,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, TARGET_PROT_NORMAL, &initiatorname[0], sess, ft_sess_alloc_cb); if (IS_ERR(sess->se_sess)) { + int rc = PTR_ERR(sess->se_sess); kfree(sess); - return NULL; + sess = ERR_PTR(rc); } return sess; } -- cgit v1.2.3-59-g8ed1b From 8962a4d29bcb3d12164c02d207c8ff1ab8b04558 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Aug 2016 10:54:10 +0200 Subject: target/tcm_fc: Update debugging statements to match libfc usage Update the debug statements to match those from libfc. Signed-off-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_sess.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index cc8c2614397b..fd5c3de79470 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -39,6 +39,11 @@ #include "tcm_fc.h" +#define TFC_SESS_DBG(lport, fmt, args...) \ + pr_debug("host%u: rport %6.6x: " fmt, \ + (lport)->host->host_no, \ + (lport)->port_id, ##args ) + static void ft_sess_delete_all(struct ft_tport *); /* @@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) struct ft_tport *tport; struct hlist_head *head; struct ft_sess *sess; + char *reason = "no session created"; rcu_read_lock(); tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); - if (!tport) + if (!tport) { + reason = "not an FCP port"; goto out; + } head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu(sess, head, hash) { if (sess->port_id == port_id) { kref_get(&sess->kref); rcu_read_unlock(); - pr_debug("port_id %x found %p\n", port_id, sess); + TFC_SESS_DBG(lport, "port_id %x found %p\n", + port_id, sess); return sess; } } out: rcu_read_unlock(); - pr_debug("port_id %x not found\n", port_id); + TFC_SESS_DBG(lport, "port_id %x not found, %s\n", + port_id, reason); return NULL; } @@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg, struct ft_tport *tport = sess->tport; struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)]; - pr_debug("port_id %x sess %p\n", sess->port_id, sess); + TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess); hlist_add_head_rcu(&sess->hash, head); tport->sess_count++; @@ -320,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess) mutex_unlock(&ft_lport_lock); return; } - pr_debug("port_id %x\n", port_id); + TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id); ft_sess_unhash(sess); mutex_unlock(&ft_lport_lock); ft_close_sess(sess); @@ -380,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, if (!(fcp_parm & FCP_SPPF_INIT_FCN)) return FC_SPP_RESP_CONF; sess = ft_sess_create(tport, rdata->ids.port_id, rdata); - if (!sess) - return FC_SPP_RESP_RES; + if (IS_ERR(sess)) { + if (PTR_ERR(sess) == -EACCES) { + spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR; + return FC_SPP_RESP_CONF; + } else + return FC_SPP_RESP_RES; + } if (!sess->params) rdata->prli_count++; sess->params = fcp_parm; @@ -424,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, mutex_lock(&ft_lport_lock); ret = ft_prli_locked(rdata, spp_len, rspp, spp); mutex_unlock(&ft_lport_lock); - pr_debug("port_id %x flags %x ret %x\n", - rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); + TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n", + rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); return ret; } @@ -478,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) struct ft_sess *sess; u32 sid = fc_frame_sid(fp); - pr_debug("sid %x\n", sid); + TFC_SESS_DBG(lport, "recv sid %x\n", sid); sess = ft_sess_get(lport, sid); if (!sess) { - pr_debug("sid %x sess lookup failed\n", sid); + TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid); /* TBD XXX - if FCP_CMND, send PRLO */ fc_frame_free(fp); return; -- cgit v1.2.3-59-g8ed1b From 1ba0158fa66b5b2c597a748f87be1650c9960ccc Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Aug 2016 10:54:11 +0200 Subject: target/tcm_fc: use CPU affinity for responses The libfc stack assigns exchange IDs based on the CPU the request was received on, so we need to send the responses via the same CPU. Otherwise the send logic gets confuses and responses will be delayed, causing exchange timeouts on the initiator side. Signed-off-by: Hannes Reinecke Cc: stable@vger.kernel.org # 4.5+ Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 36f08644250c..ff5de9a96643 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -572,7 +572,7 @@ static void ft_send_work(struct work_struct *work) if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, - TARGET_SCF_ACK_KREF)) + TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID)) goto err; pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); -- cgit v1.2.3-59-g8ed1b From 1f1cc4566bd9dd8d3cf19965a4b6392143618536 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:53:59 +0200 Subject: gpio: GPIO_GET_CHIPINFO_IOCTL: Fix line offset validation The current line offset validation is off by one. Depending on the data stored behind the descs array this can either cause undefined behavior or disclose arbitrary, potentially sensitive, memory to the issuing userspace application. Make sure that offset is within the bounds of the desc array. Cc: stable@vger.kernel.org Fixes: 521a2ad6f862 ("gpio: add userspace ABI for GPIO line information") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f0fc3a0d37c8..dac975397253 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -839,7 +839,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) return -EFAULT; - if (lineinfo.line_offset > gdev->ngpio) + if (lineinfo.line_offset >= gdev->ngpio) return -EINVAL; desc = &gdev->descs[lineinfo.line_offset]; -- cgit v1.2.3-59-g8ed1b From 0f4bbb233743bdfd51d47688b0bc2561f310488b Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:00 +0200 Subject: gpio: GPIO_GET_CHIPINFO_IOCTL: Fix information leak The GPIO_GET_CHIPINFO_IOCTL handler allocates a gpiochip_info struct on the stack and then passes it to copy_to_user(). But depending on the length of the GPIO chip name and label the struct is only partially initialized. This exposes the previous, potentially sensitive, stack content to the issuing userspace application. To avoid this make sure that the struct is fully initialized. Cc: stable@vger.kernel.org Fixes: 521a2ad6f862 ("gpio: add userspace ABI for GPIO line information") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index dac975397253..7a01969169e8 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -823,6 +823,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (cmd == GPIO_GET_CHIPINFO_IOCTL) { struct gpiochip_info chipinfo; + memset(&chipinfo, 0, sizeof(chipinfo)); + strncpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); chipinfo.name[sizeof(chipinfo.name)-1] = '\0'; -- cgit v1.2.3-59-g8ed1b From e405f9fcb63602d35f7a419ededa3f952a395a72 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:01 +0200 Subject: gpio: GPIO_GET_LINEHANDLE_IOCTL: Validate line offset The line offset that is used as an index into the descs array is provided by userspace and might go beyond the bounds of the array. If that happens undefined behavior will occur. Make sure that the offset is within the bounds of the desc array and reject any requests that specify a value outside of it. Cc: stable@vger.kernel.org Fixes: d7c51b47ac11 ("gpio: userspace ABI for reading/writing GPIO lines") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7a01969169e8..d287cb4e97c4 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -444,6 +444,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) u32 lflags = handlereq.flags; struct gpio_desc *desc; + if (offset >= gdev->ngpio) { + ret = -EINVAL; + goto out_free_descs; + } + desc = &gdev->descs[offset]; ret = gpiod_request(desc, lh->label); if (ret) -- cgit v1.2.3-59-g8ed1b From d82aa4a8f2e8df9673ddb099262355da4c9b99b1 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:04 +0200 Subject: gpio: GPIOHANDLE_GET_LINE_VALUES_IOCTL: Fix information leak The GPIOHANDLE_GET_LINE_VALUES_IOCTL handler allocates a gpiohandle_data struct on the stack and then passes it to copy_to_user(). But only the first element of the values array in the struct is set, which leaves the struct partially initialized. This exposes the previous, potentially sensitive, stack content to the issuing userspace application. To avoid this make sure that the struct is fully initialized. Cc: stable@vger.kernel.org Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d287cb4e97c4..4ed26bcf054c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -628,6 +628,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd, if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { int val; + memset(&ghd, 0, sizeof(ghd)); + val = gpiod_get_value_cansleep(le->desc); if (val < 0) return val; -- cgit v1.2.3-59-g8ed1b From b8b0e3d303654b3bb7b31b0266c513fd6f4132ce Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:03 +0200 Subject: gpio: GPIO_GET_LINEEVENT_IOCTL: Validate line offset The line offset that is used as an index into the descs array is provided by userspace and might go beyond the bounds of the array. If that happens undefined behavior will occur. Make sure that the offset is within the bounds of the desc array and reject any requests that specify a value outside of it. Cc: stable@vger.kernel.org Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4ed26bcf054c..db44a7da5eb4 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -733,6 +733,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) lflags = eventreq.handleflags; eflags = eventreq.eventflags; + if (offset >= gdev->ngpio) { + ret = -EINVAL; + goto out_free_label; + } + /* This is just wrong: we don't look for events on output lines */ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { ret = -EINVAL; -- cgit v1.2.3-59-g8ed1b From 3eded5d83bf4e36ad78775c7ceb44a45480b0abd Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:02 +0200 Subject: gpio: GPIOHANDLE_GET_LINE_VALUES_IOCTL: Fix information leak The GPIOHANDLE_GET_LINE_VALUES_IOCTL handler allocates a gpiohandle_data struct on the stack and then passes it to copy_to_user(). But depending on the number of requested line handles the struct is only partially initialized. This exposes the previous, potentially sensitive, stack content to the issuing userspace application. To avoid this make sure that the struct is fully initialized. Cc: stable@vger.kernel.org Fixes: d7c51b47ac11 ("gpio: userspace ABI for reading/writing GPIO lines") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index db44a7da5eb4..7f92f8964efd 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -344,6 +344,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd, if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { int val; + memset(&ghd, 0, sizeof(ghd)); + /* TODO: check if descriptors are really input */ for (i = 0; i < lh->numdescs; i++) { val = gpiod_get_value_cansleep(lh->descs[i]); -- cgit v1.2.3-59-g8ed1b From e3e847c7f15a27c80f526b2a7a8d4dd7ce0960a0 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:05 +0200 Subject: gpio: GPIO_GET_LINEHANDLE_IOCTL: Reject invalid line flags The GPIO_GET_LINEHANDLE_IOCTL currently ignores unknown or undefined linehandle flags. From a backwards and forwards compatibility viewpoint it is highly desirable to reject unknown flags though. On one hand an application that is using newer flags and is running on an older kernel has no way to detect if the new flags were handled correctly if they are silently discarded. On the other hand an application that (accidentally) passes undefined flags will run fine on an older kernel, but may break on a newer kernel when these flags get defined. Ensure that requests that have undefined flags set are rejected with an error, rather than silently discarding the undefined flags. Cc: stable@vger.kernel.org Fixes: d7c51b47ac11 ("gpio: userspace ABI for reading/writing GPIO lines") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7f92f8964efd..b5b1a8425907 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -333,6 +333,13 @@ struct linehandle_state { u32 numdescs; }; +#define GPIOHANDLE_REQUEST_VALID_FLAGS \ + (GPIOHANDLE_REQUEST_INPUT | \ + GPIOHANDLE_REQUEST_OUTPUT | \ + GPIOHANDLE_REQUEST_ACTIVE_LOW | \ + GPIOHANDLE_REQUEST_OPEN_DRAIN | \ + GPIOHANDLE_REQUEST_OPEN_SOURCE) + static long linehandle_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { @@ -451,6 +458,12 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) goto out_free_descs; } + /* Return an error if a unknown flag is set */ + if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) { + ret = -EINVAL; + goto out_free_descs; + } + desc = &gdev->descs[offset]; ret = gpiod_request(desc, lh->label); if (ret) -- cgit v1.2.3-59-g8ed1b From ac7dbb991ee5afc0beacce3a252dcaaa249a7786 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 18 Oct 2016 16:54:06 +0200 Subject: gpio: GPIO_GET_LINEEVENT_IOCTL: Reject invalid line and event flags The GPIO_GET_LINEEVENT_IOCTL currently ignores unknown or undefined linehandle and lineevent flags. From a backwards and forwards compatibility viewpoint it is highly desirable to reject unknown flags though. On one hand an application that is using newer flags and is running on an older kernel has no way to detect if the new flags were handled correctly if they are silently discarded. On the other hand an application that (accidentally) passes undefined flags will run fine on an older kernel, but may break on a newer kernel when these flags get defined. Ensure that requests that have undefined flags set are rejected with an error, rather than silently discarding the undefined flags. Cc: stable@vger.kernel.org Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events") Signed-off-by: Lars-Peter Clausen Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index b5b1a8425907..20e09b7c2de3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -556,6 +556,10 @@ struct lineevent_state { struct mutex read_lock; }; +#define GPIOEVENT_REQUEST_VALID_FLAGS \ + (GPIOEVENT_REQUEST_RISING_EDGE | \ + GPIOEVENT_REQUEST_FALLING_EDGE) + static unsigned int lineevent_poll(struct file *filep, struct poll_table_struct *wait) { @@ -753,6 +757,13 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) goto out_free_label; } + /* Return an error if a unknown flag is set */ + if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || + (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) { + ret = -EINVAL; + goto out_free_label; + } + /* This is just wrong: we don't look for events on output lines */ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { ret = -EINVAL; -- cgit v1.2.3-59-g8ed1b From 91f1551a746f62937957fa1bff4165e1b7a45337 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 17:44:02 -0300 Subject: gpio: ts4800: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/gpio/gpio-ts4800.ko | grep alias $ After this patch: $ modinfo drivers/gpio/gpio-ts4800.ko | grep alias alias: of:N*T*Ctechnologic,ts4800-gpioC* alias: of:N*T*Ctechnologic,ts4800-gpio Signed-off-by: Javier Martinez Canillas Signed-off-by: Linus Walleij --- drivers/gpio/gpio-ts4800.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c index 99256115bea5..c2a80b4cbf32 100644 --- a/drivers/gpio/gpio-ts4800.c +++ b/drivers/gpio/gpio-ts4800.c @@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = { { .compatible = "technologic,ts4800-gpio", }, {}, }; +MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match); static struct platform_driver ts4800_gpio_driver = { .driver = { -- cgit v1.2.3-59-g8ed1b From 126d26f66d9890a69158812a6caa248c05359daa Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 21 Oct 2016 12:56:27 +0200 Subject: USB: serial: fix potential NULL-dereference at probe Make sure we have at least one port before attempting to register a console. Currently, at least one driver binds to a "dummy" interface and requests zero ports for it. Should such an interface also lack endpoints, we get a NULL-deref during probe. Fixes: e5b1e2062e05 ("USB: serial: make minor allocation dynamic") Cc: stable # 3.11 Signed-off-by: Johan Hovold --- drivers/usb/serial/usb-serial.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index d213cf44a7e4..4a037b4a79cf 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface, serial->disconnected = 0; - usb_serial_console_init(serial->port[0]->minor); + if (num_ports > 0) + usb_serial_console_init(serial->port[0]->minor); exit: module_put(type->driver.owner); return 0; -- cgit v1.2.3-59-g8ed1b From 02a1b8f4167eac709d269341f7c3c340984c28a6 Mon Sep 17 00:00:00 2001 From: Joao Pinto Date: Fri, 21 Oct 2016 10:31:48 +0100 Subject: PCI: designware-plat: Update author email address Although I am leaving Synopsys, I would like to keep working with the linux kernel community and help in what you might find useful. For that I am sending this patch to change my contact e-mail. Signed-off-by: Joao Pinto Signed-off-by: Bjorn Helgaas --- drivers/pci/host/pcie-designware-plat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 537f58a664fa..8df6312ed300 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -3,7 +3,7 @@ * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * - * Authors: Joao Pinto + * Authors: Joao Pinto * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as -- cgit v1.2.3-59-g8ed1b From a6c6ead14183ea4ec8ce7551e1f3451024b9c4db Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 19 Oct 2016 02:57:22 +0200 Subject: cpufreq: intel_pstate: Set P-state upfront in performance mode After commit a4675fbc4a7a (cpufreq: intel_pstate: Replace timers with utilization update callbacks) the cpufreq governor callbacks may not be invoked on NOHZ_FULL CPUs and, in particular, switching to the "performance" policy via sysfs may not have any effect on them. That is a problem, because it usually is desirable to squeeze the last bit of performance out of those CPUs, so work around it by setting the maximum P-state (within the limits) in intel_pstate_set_policy() upfront when the policy is CPUFREQ_POLICY_PERFORMANCE. Fixes: a4675fbc4a7a (cpufreq: intel_pstate: Replace timers with utilization update callbacks) Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada --- drivers/cpufreq/intel_pstate.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f535f8123258..ac7c58d20b58 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1142,10 +1142,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_min_pstate(struct cpudata *cpu) +static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { - int pstate = cpu->pstate.min_pstate; - trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; /* @@ -1157,6 +1155,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) pstate_funcs.get_val(cpu, pstate)); } +static void intel_pstate_set_min_pstate(struct cpudata *cpu) +{ + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); +} + +static void intel_pstate_max_within_limits(struct cpudata *cpu) +{ + int min_pstate, max_pstate; + + update_turbo_state(); + intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); + intel_pstate_set_pstate(cpu, max_pstate); +} + static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); @@ -1491,7 +1503,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) pr_debug("set_policy cpuinfo.max %u policy->max %u\n", policy->cpuinfo.max_freq, policy->max); - cpu = all_cpu_data[0]; + cpu = all_cpu_data[policy->cpu]; if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && policy->max < policy->cpuinfo.max_freq && policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { @@ -1535,6 +1547,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = round_up(limits->max_perf, FRAC_BITS); out: + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + /* + * NOHZ_FULL CPUs need this as the governor callback may not + * be invoked on them. + */ + intel_pstate_clear_update_util_hook(policy->cpu); + intel_pstate_max_within_limits(cpu); + } + intel_pstate_set_update_util_hook(policy->cpu); intel_pstate_hwp_set_policy(policy); -- cgit v1.2.3-59-g8ed1b From 19eb4a47224da4bc118f1e304e16b6c08ed172c9 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 19 Oct 2016 17:23:49 +0900 Subject: reset: uniphier: rename MIO reset to SD reset for Pro5, PXs2, LD20 SoCs I made a mistake as for naming for this block. The MIO block is not implemented for these 3 SoCs in the first place. The current naming will be a trouble if an SoC with both MIO and SD-ctrl blocks appear in the future. This driver has just been merged in the previous merge window. Rename it before the release. Signed-off-by: Masahiro Yamada Acked-by: Philipp Zabel --- .../devicetree/bindings/reset/uniphier-reset.txt | 62 +++++++++++----------- drivers/reset/reset-uniphier.c | 16 +++--- 2 files changed, 39 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt index e6bbfccd56c3..5020524cddeb 100644 --- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt +++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt @@ -6,25 +6,25 @@ System reset Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC. - "socionext,uniphier-ld4-reset" - for PH1-LD4 SoC. - "socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC. - "socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC. - "socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC. - "socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC. - "socionext,uniphier-ld11-reset" - for PH1-LD11 SoC. - "socionext,uniphier-ld20-reset" - for PH1-LD20 SoC. + "socionext,uniphier-sld3-reset" - for sLD3 SoC. + "socionext,uniphier-ld4-reset" - for LD4 SoC. + "socionext,uniphier-pro4-reset" - for Pro4 SoC. + "socionext,uniphier-sld8-reset" - for sLD8 SoC. + "socionext,uniphier-pro5-reset" - for Pro5 SoC. + "socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC. + "socionext,uniphier-ld11-reset" - for LD11 SoC. + "socionext,uniphier-ld20-reset" - for LD20 SoC. - #reset-cells: should be 1. Example: sysctrl@61840000 { - compatible = "socionext,uniphier-ld20-sysctrl", + compatible = "socionext,uniphier-ld11-sysctrl", "simple-mfd", "syscon"; reg = <0x61840000 0x4000>; reset { - compatible = "socionext,uniphier-ld20-reset"; + compatible = "socionext,uniphier-ld11-reset"; #reset-cells = <1>; }; @@ -32,30 +32,30 @@ Example: }; -Media I/O (MIO) reset ---------------------- +Media I/O (MIO) reset, SD reset +------------------------------- Required properties: - compatible: should be one of the following: - "socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC. - "socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC. - "socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC. - "socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC. - "socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC. - "socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC. - "socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC. - "socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC. + "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC. + "socionext,uniphier-ld4-mio-reset" - for LD4 SoC. + "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC. + "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC. + "socionext,uniphier-pro5-sd-reset" - for Pro5 SoC. + "socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC. + "socionext,uniphier-ld11-mio-reset" - for LD11 SoC. + "socionext,uniphier-ld20-sd-reset" - for LD20 SoC. - #reset-cells: should be 1. Example: mioctrl@59810000 { - compatible = "socionext,uniphier-ld20-mioctrl", + compatible = "socionext,uniphier-ld11-mioctrl", "simple-mfd", "syscon"; reg = <0x59810000 0x800>; reset { - compatible = "socionext,uniphier-ld20-mio-reset"; + compatible = "socionext,uniphier-ld11-mio-reset"; #reset-cells = <1>; }; @@ -68,24 +68,24 @@ Peripheral reset Required properties: - compatible: should be one of the following: - "socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC. - "socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC. - "socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC. - "socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC. - "socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC. - "socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC. - "socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC. + "socionext,uniphier-ld4-peri-reset" - for LD4 SoC. + "socionext,uniphier-pro4-peri-reset" - for Pro4 SoC. + "socionext,uniphier-sld8-peri-reset" - for sLD8 SoC. + "socionext,uniphier-pro5-peri-reset" - for Pro5 SoC. + "socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC. + "socionext,uniphier-ld11-peri-reset" - for LD11 SoC. + "socionext,uniphier-ld20-peri-reset" - for LD20 SoC. - #reset-cells: should be 1. Example: perictrl@59820000 { - compatible = "socionext,uniphier-ld20-perictrl", + compatible = "socionext,uniphier-ld11-perictrl", "simple-mfd", "syscon"; reg = <0x59820000 0x200>; reset { - compatible = "socionext,uniphier-ld20-peri-reset"; + compatible = "socionext,uniphier-ld11-peri-reset"; #reset-cells = <1>; }; diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index 8b2558e7363e..968c3ae4535c 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c @@ -154,7 +154,7 @@ const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = { UNIPHIER_RESET_END, }; -const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = { +const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = { UNIPHIER_MIO_RESET_SD(0, 0), UNIPHIER_MIO_RESET_SD(1, 1), UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1), @@ -360,7 +360,7 @@ static const struct of_device_id uniphier_reset_match[] = { .compatible = "socionext,uniphier-ld20-reset", .data = uniphier_ld20_sys_reset_data, }, - /* Media I/O reset */ + /* Media I/O reset, SD reset */ { .compatible = "socionext,uniphier-sld3-mio-reset", .data = uniphier_sld3_mio_reset_data, @@ -378,20 +378,20 @@ static const struct of_device_id uniphier_reset_match[] = { .data = uniphier_sld3_mio_reset_data, }, { - .compatible = "socionext,uniphier-pro5-mio-reset", - .data = uniphier_pro5_mio_reset_data, + .compatible = "socionext,uniphier-pro5-sd-reset", + .data = uniphier_pro5_sd_reset_data, }, { - .compatible = "socionext,uniphier-pxs2-mio-reset", - .data = uniphier_pro5_mio_reset_data, + .compatible = "socionext,uniphier-pxs2-sd-reset", + .data = uniphier_pro5_sd_reset_data, }, { .compatible = "socionext,uniphier-ld11-mio-reset", .data = uniphier_sld3_mio_reset_data, }, { - .compatible = "socionext,uniphier-ld20-mio-reset", - .data = uniphier_pro5_mio_reset_data, + .compatible = "socionext,uniphier-ld20-sd-reset", + .data = uniphier_pro5_sd_reset_data, }, /* Peripheral reset */ { -- cgit v1.2.3-59-g8ed1b From 4a2947e32ea9ca2bc36d68022b7d7b1014dca18f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 21 Oct 2016 14:21:56 -0700 Subject: net: dsa: bcm_sf2: Do not rely on kexec_in_progress After discussing with Eric, it turns out that, while using kexec_in_progress is a nice optimization, which prevents us from always powering on the integrated PHY, let's just turn it on in the shutdown path. This removes a dependency on kexec_in_progress which, according to Eric should not be used by modules Fixes: 2399d6143f85 ("net: dsa: bcm_sf2: Prevent GPHY shutdown for kexec'd kernels") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 077a24541584..e3ee27ce13dd 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -1141,9 +1140,11 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev) /* For a kernel about to be kexec'd we want to keep the GPHY on for a * successful MDIO bus scan to occur. If we did turn off the GPHY * before (e.g: port_disable), this will also power it back on. + * + * Do not rely on kexec_in_progress, just power the PHY on. */ if (priv->hw_params.num_gphy == 1) - bcm_sf2_gphy_enable_set(priv->dev->ds, kexec_in_progress); + bcm_sf2_gphy_enable_set(priv->dev->ds, true); } #ifdef CONFIG_PM_SLEEP -- cgit v1.2.3-59-g8ed1b From bdc8cbd34d5d9d82c3db4df6a182b0530040062d Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:39 -0400 Subject: qede: get_channels() need to populate max tx/rx coalesce values Recent changes in kernel ethtool implementation requires the driver callback for get_channels() has to populate the values for max tx/rx coalesce fields. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 25a9b293ee8f..16a1a478a2bd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); channels->max_combined = QEDE_MAX_RSS_CNT(edev); + channels->max_rx = QEDE_MAX_RSS_CNT(edev); + channels->max_tx = QEDE_MAX_RSS_CNT(edev); channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - edev->fp_num_rx; channels->tx_count = edev->fp_num_tx; -- cgit v1.2.3-59-g8ed1b From ba300ce35123a9ba8ea55318b0f579cc1ab3f51a Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:40 -0400 Subject: qede: Do not allow RSS config for 100G devices RSS configuration is not supported for 100G adapters. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 16a1a478a2bd..bf04fb9e1ae6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1055,6 +1055,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, struct qede_dev *edev = netdev_priv(dev); int i; + if (edev->dev_info.common.num_hwfns > 1) { + DP_INFO(edev, + "RSS configuration is not supported for 100G devices\n"); + return -EOPNOTSUPP; + } + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; -- cgit v1.2.3-59-g8ed1b From 837d4eb6ed7cb0341079fac97e3037df6bef7482 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:41 -0400 Subject: qede: Loopback implementation should ignore the normal traffic During the execution of loopback test, driver may receive the packets which are not originated by this test, loopback implementation need to skip those packets. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 80 +++++++++++++++---------- drivers/net/ethernet/qlogic/qede/qede_main.c | 3 +- 3 files changed, 49 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 28c0e9f42c9e..d6a4a9a5eb4d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -348,6 +348,7 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq); int qede_txq_has_work(struct qede_tx_queue *txq); void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, u8 count); +void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index bf04fb9e1ae6..7da184cfe72e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1207,8 +1207,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; + int i, rc = 0; u8 *data_ptr; - int i; for_each_queue(i) { if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { @@ -1227,46 +1227,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) * queue and that the loopback traffic is not IP. */ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { - if (qede_has_rx_work(rxq)) + if (!qede_has_rx_work(rxq)) { + usleep_range(100, 200); + continue; + } + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative + * reads of CQE/BD before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, + * and then the CPU reads the hw_comp_cons, it will use an old + * CQE. + */ + rmb(); + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + data_ptr = (u8 *)(page_address(sw_rx_data->data) + + fp_cqe->placement_offset + + sw_rx_data->page_offset); + if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && + ether_addr_equal(data_ptr + ETH_ALEN, + edev->ndev->dev_addr)) { + for (i = ETH_HLEN; i < len; i++) + if (data_ptr[i] != (unsigned char)(i & 0xff)) { + rc = -1; + break; + } + + qede_recycle_rx_bd_ring(rxq, edev, 1); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); break; - usleep_range(100, 200); + } + + DP_INFO(edev, "Not the transmitted packet\n"); + qede_recycle_rx_bd_ring(rxq, edev, 1); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); } - if (!qede_has_rx_work(rxq)) { + if (i == QEDE_SELFTEST_POLL_COUNT) { DP_NOTICE(edev, "Failed to receive the traffic\n"); return -1; } - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD before reading hw_comp_cons. If the CQE is read before it is - * written by FW, then FW writes CQE and SB, and then the CPU reads the - * hw_comp_cons, it will use an old CQE. - */ - rmb(); - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); - - /* Get the data from the SW ring */ - sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - data_ptr = (u8 *)(page_address(sw_rx_data->data) + - fp_cqe->placement_offset + sw_rx_data->page_offset); - for (i = ETH_HLEN; i < len; i++) - if (data_ptr[i] != (unsigned char)(i & 0xff)) { - DP_NOTICE(edev, "Loopback test failed\n"); - qede_recycle_rx_bd_ring(rxq, edev, 1); - return -1; - } - - qede_recycle_rx_bd_ring(rxq, edev, 1); + qede_update_rx_prod(edev, rxq); - return 0; + return rc; } static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 343038ca047d..da8ef69cedd6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, return 0; } -static inline void qede_update_rx_prod(struct qede_dev *edev, - struct qede_rx_queue *rxq) +void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) { u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); -- cgit v1.2.3-59-g8ed1b From 0e191827383a6503a3bc547e63c74ff093f450f5 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:42 -0400 Subject: qed*: Reduce the memory footprint for Rx path With the current default values for Rx path i.e., 8 queues of 8Kb entries each with 4Kb size, interface will consume 256Mb for Rx. The default values causing the driver probe to fail when the system memory is low. Based on the perforamnce results, rx-ring count value of 1Kb gives the comparable performance with Rx coalesce timeout of 12 seconds. Updating the default values. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 1 + drivers/net/ethernet/qlogic/qede/qede.h | 2 +- include/linux/qed/qed_if.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8dc3f4670f64..c418360ba02a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -878,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, } } + cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; rc = qed_nic_setup(cdev); if (rc) goto err; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index d6a4a9a5eb4d..974689a13337 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -354,7 +354,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 -#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX +#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) #define TX_RING_SIZE_POW 13 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f9ae903bbb84..8978a60371f4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -146,6 +146,7 @@ enum qed_led_mode { #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define QED_COALESCE_MAX 0xFF +#define QED_DEFAULT_RX_USECS 12 /* forward */ struct qed_dev; -- cgit v1.2.3-59-g8ed1b From ed0dd91515bbe19a19cfccca366cf163ed581cac Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:43 -0400 Subject: qede: Reconfigure rss indirection direction table when rss count is updated Rx indirection table entries are in the range [0, (rss_count - 1)]. If user reduces the rss count, the table entries may not be in the ccorrect range. Need to reconfigure the table with new rss_count as a basis. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 7da184cfe72e..d47bdaa1f9a8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -822,6 +822,13 @@ static int qede_set_channels(struct net_device *dev, edev->req_queues = count; edev->req_num_tx = channels->tx_count; edev->req_num_rx = channels->rx_count; + /* Reset the indirection table if rx queue count is updated */ + if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { + edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; + memset(&edev->rss_params.rss_ind_table, 0, + sizeof(edev->rss_params.rss_ind_table)); + } + if (netif_running(dev)) qede_reload(edev, NULL, NULL); -- cgit v1.2.3-59-g8ed1b From 15c6de2c65048d45484e396194ba2598618d4cb4 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 04:43:44 -0400 Subject: qed: Zero-out the buffer paased to dcbx_query() API qed_dcbx_query_params() implementation populate the values to input buffer based on the dcbx mode and, the current negotiated state/params, the caller of this API need to memset the buffer to zero. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 130da1c0490b..a4789a93b692 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, if (!dcbx_info) return -ENOMEM; + memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); if (rc) { kfree(dcbx_info); @@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, if (!dcbx_info) return NULL; + memset(dcbx_info, 0, sizeof(*dcbx_info)); if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { kfree(dcbx_info); return NULL; -- cgit v1.2.3-59-g8ed1b From fabd545c6d27ac1977fe567c43cd4c72fad04172 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 21 Oct 2016 04:43:45 -0400 Subject: qede: Fix incorrrect usage of APIs for un-mapping DMA memory Driver uses incorrect APIs to unmap DMA memory which were mapped using dma_map_single(). This patch fixes it to use appropriate APIs for un-mapping DMA memory. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 4 ++-- drivers/net/ethernet/qlogic/qede/qede_main.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index d47bdaa1f9a8..12251a1032d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1199,8 +1199,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, } first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); txq->sw_tx_cons++; txq->sw_tx_ring[idx].skb = NULL; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index da8ef69cedd6..444b271059b2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev, split_bd_len = BD_UNMAP_LEN(split); bds_consumed++; } - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); /* Unmap the data of the skb frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { @@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, nbd--; } - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); /* Unmap the data of the skb frags */ for (i = 0; i < nbd; i++) { -- cgit v1.2.3-59-g8ed1b From d1fe85ec7702917f2f1515b4c421d5d4792201a0 Mon Sep 17 00:00:00 2001 From: Sandhya Bankar Date: Sun, 25 Sep 2016 00:46:21 +0530 Subject: iio:chemical:atlas-ph-sensor: Fix use of 32 bit int to hold 16 bit big endian value This will result in a random value being reported on big endian architectures. (thanks to Lars-Peter Clausen for pointing out the effects of this bug) Only effects a value printed to the log, but as this reports the settings of the probe in question it may be of direct interest to users. Also, fixes the following sparse endianness warnings: drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 drivers/iio/chemical/atlas-ph-sensor.c:215:9: warning: cast to restricted __be16 Signed-off-by: Sandhya Bankar Fixes: e8dd92bfbff25 ("iio: chemical: atlas-ph-sensor: add EC feature") Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/chemical/atlas-ph-sensor.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index bd321b305a0a..ef761a508630 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c @@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data) struct device *dev = &data->client->dev; int ret; unsigned int val; + __be16 rval; - ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2); + ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2); if (ret) return ret; - dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100, - be16_to_cpu(val) % 100); + val = be16_to_cpu(rval); + dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100); ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val); if (ret) -- cgit v1.2.3-59-g8ed1b From 64bc2d02d754f4143d65cc21c644176db12ab5c8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Oct 2016 00:13:35 +0200 Subject: iio: accel: sca3000_core: avoid potentially uninitialized variable The newly added __sca3000_get_base_freq function handles all valid modes of the SCA3000_REG_ADDR_MODE register, but gcc notices that any other value (i.e. 0x00) causes the base_freq variable to not get initialized: drivers/staging/iio/accel/sca3000_core.c: In function 'sca3000_write_raw': drivers/staging/iio/accel/sca3000_core.c:527:23: error: 'base_freq' may be used uninitialized in this function [-Werror=maybe-uninitialized] This adds explicit error handling for unexpected register values, to ensure this cannot happen. Fixes: e0f3fc9b47e6 ("iio: accel: sca3000_core: implemented IIO_CHAN_INFO_SAMP_FREQ") Signed-off-by: Arnd Bergmann Cc: Ico Doornekamp Cc: Jonathan Cameron Signed-off-by: Jonathan Cameron --- drivers/staging/iio/accel/sca3000_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c index d626125d7af9..564b36d4f648 100644 --- a/drivers/staging/iio/accel/sca3000_core.c +++ b/drivers/staging/iio/accel/sca3000_core.c @@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st, case SCA3000_MEAS_MODE_OP_2: *base_freq = info->option_mode_2_freq; break; + default: + ret = -EINVAL; } error_ret: return ret; -- cgit v1.2.3-59-g8ed1b From a6e2846cacf97d4c70c5e923325b015cfa1e9053 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Fri, 21 Oct 2016 02:09:17 -0400 Subject: bnx2x: Use the correct divisor value for PHC clock readings. Time Sync (PTP) implementation uses the divisor/shift value for converting the clock ticks to nanoseconds. Driver currently defines shift value as 1, this results in the nanoseconds value to be calculated as half the actual value. Hence the user application fails to synchronize the device clock value with the PTP master device clock. Need to use the 'shift' value of 0. Signed-off-by: Sony.Chacko Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 20fe6a8c35c1..0cee4c0283f9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp) memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); bp->cyclecounter.read = bnx2x_cyclecounter_read; bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); - bp->cyclecounter.shift = 1; + bp->cyclecounter.shift = 0; bp->cyclecounter.mult = 1; } -- cgit v1.2.3-59-g8ed1b From 235bde1ed3f0fff0f68f367ec8807b89ea151258 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 21 Oct 2016 09:34:29 -0200 Subject: net: fec: Call swap_buffer() prior to IP header alignment Commit 3ac72b7b63d5 ("net: fec: align IP header in hardware") breaks networking on mx28. There is an erratum on mx28 (ENGR121613 - ENET big endian mode not compatible with ARM little endian) that requires an additional byte-swap operation to workaround this problem. So call swap_buffer() prior to performing the IP header alignment to restore network functionality on mx28. Fixes: 3ac72b7b63d5 ("net: fec: align IP header in hardware") Reported-and-tested-by: Henri Roosen Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 48a033e64423..5aa9d4ded214 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) skb_put(skb, pkt_len - 4); data = skb->data; + if (!is_copybreak && need_swap) + swap_buffer(data, pkt_len); + #if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) data = skb_pull_inline(skb, 2); #endif - if (!is_copybreak && need_swap) - swap_buffer(data, pkt_len); - /* Extract the enhanced buffer descriptor */ ebdp = NULL; if (fep->bufdesc_ex) -- cgit v1.2.3-59-g8ed1b From 6d8d271eee0eaa3c3ffd6db29a825e02316359d4 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 17:44:01 -0300 Subject: gpio: ath79: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/gpio/gpio-ath79.ko | grep alias $ After this patch: $ modinfo drivers/gpio/gpio-ath79.ko | grep alias alias: of:N*T*Cqca,ar9340-gpioC* alias: of:N*T*Cqca,ar9340-gpio alias: of:N*T*Cqca,ar7100-gpioC* alias: of:N*T*Cqca,ar7100-gpio Signed-off-by: Javier Martinez Canillas Acked-by: Aban Bedel Signed-off-by: Linus Walleij --- drivers/gpio/gpio-ath79.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index 9457e2022bf6..dc37dbe4b46d 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = { { .compatible = "qca,ar9340-gpio" }, {}, }; +MODULE_DEVICE_TABLE(of, ath79_gpio_of_match); static int ath79_gpio_probe(struct platform_device *pdev) { -- cgit v1.2.3-59-g8ed1b From d71cf15b865bdd45925f7b094d169aaabd705145 Mon Sep 17 00:00:00 2001 From: Liu Gang Date: Fri, 21 Oct 2016 15:31:28 +0800 Subject: gpio: mpc8xxx: Correct irq handler function From the beginning of the gpio-mpc8xxx.c, the "handle_level_irq" has being used to handle GPIO interrupts in the PowerPC/Layerscape platforms. But actually, almost all PowerPC/Layerscape platforms assert an interrupt request upon either a high-to-low change or any change on the state of the signal. So the "handle_level_irq" is not reasonable for PowerPC/Layerscape GPIO interrupt, it should be "handle_edge_irq". Otherwise the system may lost some interrupts from the PIN's state changes. Signed-off-by: Liu Gang Signed-off-by: Linus Walleij --- drivers/gpio/gpio-mpc8xxx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 425501c39527..793518a30afe 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_data(irq, h->host_data); - irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); + irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq); return 0; } -- cgit v1.2.3-59-g8ed1b From a05b82d5149dfeef05254a11c3636a89a854520a Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Fri, 21 Oct 2016 14:53:53 +0530 Subject: cxl: Fix leaking pid refs in some error paths In some error paths in functions cxl_start_context and afu_ioctl_start_work pid references to the current & group-leader tasks can leak after they are taken. This patch fixes these error paths to release these pid references before exiting the error path. Fixes: 7b8ad495d592 ("cxl: Fix DSI misses when the context owning task exits") Cc: stable@vger.kernel.org # v4.5+ Reviewed-by: Andrew Donnellan Reported-by: Frederic Barrat Signed-off-by: Vaibhav Jain Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/api.c | 2 ++ drivers/misc/cxl/file.c | 22 +++++++++++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index af23d7dfe752..2e5233b60971 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -247,7 +247,9 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, cxl_ctx_get(); if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { + put_pid(ctx->glpid); put_pid(ctx->pid); + ctx->glpid = ctx->pid = NULL; cxl_adapter_context_put(ctx->afu->adapter); cxl_ctx_put(); goto out; diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index d0b421f49b39..77080cc5fa0a 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -193,6 +193,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); + /* + * Increment the mapped context count for adapter. This also checks + * if adapter_context_lock is taken. + */ + rc = cxl_adapter_context_get(ctx->afu->adapter); + if (rc) { + afu_release_irqs(ctx, ctx); + goto out; + } + /* * We grab the PID here and not in the file open to allow for the case * where a process (master, some daemon, etc) has opened the chardev on @@ -205,15 +215,6 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); - /* - * Increment the mapped context count for adapter. This also checks - * if adapter_context_lock is taken. - */ - rc = cxl_adapter_context_get(ctx->afu->adapter); - if (rc) { - afu_release_irqs(ctx, ctx); - goto out; - } trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); @@ -221,6 +222,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, amr))) { afu_release_irqs(ctx, ctx); cxl_adapter_context_put(ctx->afu->adapter); + put_pid(ctx->glpid); + put_pid(ctx->pid); + ctx->glpid = ctx->pid = NULL; goto out; } -- cgit v1.2.3-59-g8ed1b From eeaed4bb5a35591470b545590bb2f26dbe7653a2 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Mon, 24 Oct 2016 00:31:30 -0400 Subject: ACPI/PCI/IRQ: assign ISA IRQ directly during early boot stages We do not want to store the SCI penalty in the acpi_isa_irq_penalty[] table because acpi_isa_irq_penalty[] only holds ISA IRQ penalties and there's no guarantee that the SCI is an ISA IRQ. We add in the SCI penalty as a special case in acpi_irq_get_penalty(). But if we called acpi_penalize_isa_irq() or acpi_irq_penalty_update() for an SCI that happened to be an ISA IRQ, they stored the SCI penalty (part of the acpi_irq_get_penalty() return value) in acpi_isa_irq_penalty[]. Subsequent calls to acpi_irq_get_penalty() returned a penalty that included *two* SCI penalties. Fixes: 103544d86976 (ACPI,PCI,IRQ: reduce resource requirements) Signed-off-by: Sinan Kaya Acked-by: Bjorn Helgaas Tested-by: Jonathan Liu Signed-off-by: Rafael J. Wysocki --- drivers/acpi/pci_link.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index c983bf733ad3..6229b022a5d4 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -849,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used) continue; if (used) - new_penalty = acpi_irq_get_penalty(irq) + + new_penalty = acpi_isa_irq_penalty[irq] + PIRQ_PENALTY_ISA_USED; else new_penalty = 0; @@ -871,7 +871,7 @@ static int __init acpi_irq_penalty_update(char *str, int used) void acpi_penalize_isa_irq(int irq, int active) { if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) - acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + + acpi_isa_irq_penalty[irq] += (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); } -- cgit v1.2.3-59-g8ed1b From f1caa61df2a3dc4c58316295c5dc5edba4c68d85 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Mon, 24 Oct 2016 00:31:31 -0400 Subject: ACPI/PCI: pci_link: penalize SCI correctly Ondrej reported that IRQs stopped working in v4.7 on several platforms. A typical scenario, from Ondrej's VT82C694X/694X, is: ACPI: Using PIC for interrupt routing ACPI: PCI Interrupt Link [LNKA] (IRQs 1 3 4 5 6 7 10 *11 12 14 15) ACPI: No IRQ available for PCI Interrupt Link [LNKA] 8139too 0000:00:0f.0: PCI INT A: no GSI We're using PIC routing, so acpi_irq_balance == 0, and LNKA is already active at IRQ 11. In that case, acpi_pci_link_allocate() only tries to use the active IRQ (IRQ 11) which also happens to be the SCI. We should penalize the SCI by PIRQ_PENALTY_PCI_USING, but irq_get_trigger_type(11) returns something other than IRQ_TYPE_LEVEL_LOW, so we penalize it by PIRQ_PENALTY_ISA_ALWAYS instead, which makes acpi_pci_link_allocate() assume the IRQ isn't available and give up. Add acpi_penalize_sci_irq() so platforms can tell us the SCI IRQ, trigger, and polarity directly and we don't have to depend on irq_get_trigger_type(). Fixes: 103544d86976 (ACPI,PCI,IRQ: reduce resource requirements) Link: http://lkml.kernel.org/r/201609251512.05657.linux@rainbow-software.org Reported-by: Ondrej Zary Acked-by: Bjorn Helgaas Signed-off-by: Sinan Kaya Tested-by: Jonathan Liu Signed-off-by: Rafael J. Wysocki --- arch/x86/kernel/acpi/boot.c | 1 + drivers/acpi/pci_link.c | 30 +++++++++++++++--------------- include/linux/acpi.h | 1 + 3 files changed, 17 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8a5abaa7d453..931ced8ca345 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* * stash over-ride to indicate we've been here diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 6229b022a5d4..74bf96efae95 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -87,6 +87,7 @@ struct acpi_pci_link { static LIST_HEAD(acpi_link_list); static DEFINE_MUTEX(acpi_link_lock); +static int sci_irq = -1, sci_penalty; /* -------------------------------------------------------------------------- PCI Link Device Management @@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq) { int penalty = 0; - /* - * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict - * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be - * use for PCI IRQs. - */ - if (irq == acpi_gbl_FADT.sci_interrupt) { - u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK; - - if (type != IRQ_TYPE_LEVEL_LOW) - penalty += PIRQ_PENALTY_ISA_ALWAYS; - else - penalty += PIRQ_PENALTY_PCI_USING; - } + if (irq == sci_irq) + penalty += sci_penalty; if (irq < ACPI_MAX_ISA_IRQS) return penalty + acpi_isa_irq_penalty[irq]; - penalty += acpi_irq_pci_sharing_penalty(irq); - return penalty; + return penalty + acpi_irq_pci_sharing_penalty(irq); } int __init acpi_irq_penalty_init(void) @@ -881,6 +870,17 @@ bool acpi_isa_irq_available(int irq) acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); } +void acpi_penalize_sci_irq(int irq, int trigger, int polarity) +{ + sci_irq = irq; + + if (trigger == ACPI_MADT_TRIGGER_LEVEL && + polarity == ACPI_MADT_POLARITY_ACTIVE_LOW) + sci_penalty = PIRQ_PENALTY_PCI_USING; + else + sci_penalty = PIRQ_PENALTY_ISA_ALWAYS; +} + /* * Over-ride default table to reserve additional IRQs for use by ISA * e.g. acpi_irq_isa=5 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ddbeda6dbdc8..689a8b9b9c8f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -326,6 +326,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); -- cgit v1.2.3-59-g8ed1b From 98756f5319c64c883caa910dce702d9edefe7810 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Mon, 24 Oct 2016 00:31:32 -0400 Subject: ACPI/PCI: pci_link: Include PIRQ_PENALTY_PCI_USING for ISA IRQs Commit 103544d86976 ("ACPI,PCI,IRQ: reduce resource requirements") replaced the addition of PIRQ_PENALTY_PCI_USING in acpi_pci_link_allocate() with an addition in acpi_irq_pci_sharing_penalty(), but f7eca374f000 ("ACPI,PCI,IRQ: separate ISA penalty calculation") removed the use of acpi_irq_pci_sharing_penalty() for ISA IRQs. Therefore, PIRQ_PENALTY_PCI_USING is missing from ISA IRQs used by interrupt links. Include that penalty by adding it in the acpi_pci_link_allocate() path. Fixes: f7eca374f000 (ACPI,PCI,IRQ: separate ISA penalty calculation) Signed-off-by: Sinan Kaya Acked-by: Bjorn Helgaas Tested-by: Jonathan Liu Signed-off-by: Rafael J. Wysocki --- drivers/acpi/pci_link.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 74bf96efae95..bc3d914dfc3e 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -608,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) acpi_device_bid(link->device)); return -ENODEV; } else { + if (link->irq.active < ACPI_MAX_ISA_IRQS) + acpi_isa_irq_penalty[link->irq.active] += + PIRQ_PENALTY_PCI_USING; + printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", acpi_device_name(link->device), acpi_device_bid(link->device), link->irq.active); -- cgit v1.2.3-59-g8ed1b From ed19ece135a6908244e22e4ab395165999a4d3ab Mon Sep 17 00:00:00 2001 From: Wenyou Yang Date: Thu, 29 Sep 2016 18:07:07 +0800 Subject: usb: ohci-at91: Set RemoteWakeupConnected bit explicitly. The reset value of RWC is 0, set RemoteWakeupConnected bit explicitly before calling ohci_run, it also fixes the issue that the mass storage stick connected wasn't suspended when the system suspend. Signed-off-by: Wenyou Yang Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-at91.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 5b5880c0ae19..b38a228134df 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, ohci->num_ports = board->ports; at91_start_hc(pdev); + /* + * The RemoteWakeupConnected bit has to be set explicitly + * before calling ohci_run. The reset value of this bit is 0. + */ + ohci->hc_control = OHCI_CTRL_RWC; + retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval == 0) { device_wakeup_enable(hcd->self.controller); @@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev) * REVISIT: some boards will be able to turn VBUS off... */ if (!ohci_at91->wakeup) { - ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); - ohci->hc_control &= OHCI_CTRL_RWC; - ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); ohci->rh_state = OHCI_RH_HALTED; /* flush the writes */ -- cgit v1.2.3-59-g8ed1b From 1e4b4348753ba555ea93470ea8af821425d9c826 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 Oct 2016 20:11:59 +0900 Subject: usb: ehci-platform: increase EHCI_MAX_RSTS to 4 Socionext LD11 SoC (arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi) needs to handle 4 reset lines for EHCI. Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 876dca4fc216..a268d9e8d6cf 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -39,7 +39,7 @@ #define DRIVER_DESC "EHCI generic platform driver" #define EHCI_MAX_CLKS 4 -#define EHCI_MAX_RSTS 3 +#define EHCI_MAX_RSTS 4 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) struct ehci_platform_priv { -- cgit v1.2.3-59-g8ed1b From d8e5f0eca1e88215e45aca27115ea747e6164da1 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 19 Oct 2016 12:03:39 -0500 Subject: usb: musb: Fix hardirq-safe hardirq-unsafe lock order error If we configure musb with 2430 glue as a peripheral, and then rmmod omap2430 module, we'll get the following error: [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ] ... rmmod/413 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: (&phy->mutex){+.+.+.}, at: [] phy_power_off+0x1c/0xb8 [ 204.678710] and this task is already holding: (&(&musb->lock)->rlock){-.-...}, at: [] musb_gadget_stop+0x24/0xec [musb_hdrc] which would create a new lock dependency: (&(&musb->lock)->rlock){-.-...} -> (&phy->mutex){+.+.+.} ... This is because some glue layers expect musb_platform_enable/disable to be called with spinlock held, and 2430 glue layer has USB PHY on the I2C bus using a mutex. We could fix the glue layers to take the spinlock, but we still have a problem of musb_plaform_enable/disable being called in an unbalanced manner. So that would still lead into USB PHY enable/disable related problems for omap2430 glue layer. While it makes sense to only enable USB PHY when needed from PM point of view, in this case we just can't do it yet without breaking things. So let's just revert phy_enable/disable related changes instead and reconsider this after we have fixed musb_platform_enable/disable to be balanced. Fixes: a83e17d0f73b ("usb: musb: Improve PM runtime and phy handling for 2430 glue layer") Reviewed-by: Laurent Pinchart Signed-off-by: Tony Lindgren Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/omap2430.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 1ab6973d4f61..cc1225485509 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb) } musb->isr = omap2430_musb_interrupt; phy_init(musb->phy); + phy_power_on(musb->phy); l = musb_readl(musb->mregs, OTG_INTERFSEL); @@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb) struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); struct omap_musb_board_data *data = pdata->board_data; - if (!WARN_ON(!musb->phy)) - phy_power_on(musb->phy); switch (glue->status) { @@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb) struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); - if (!WARN_ON(!musb->phy)) - phy_power_off(musb->phy); - if (glue->status != MUSB_UNKNOWN) omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); @@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb) struct omap2430_glue *glue = dev_get_drvdata(dev->parent); omap2430_low_level_exit(musb); + phy_power_off(musb->phy); phy_exit(musb->phy); musb->phy = NULL; cancel_work_sync(&glue->omap_musb_mailbox_work); -- cgit v1.2.3-59-g8ed1b From cacaaf80c3a6f6036d290f353c4c1db237b42647 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 19 Oct 2016 12:03:40 -0500 Subject: usb: musb: Call pm_runtime from musb_gadget_queue If we're booting pandaboard using NFSroot over built-in g_ether, we can get the following after booting once and doing a warm reset: g_ether gadget: ecm_open g_ether gadget: notify connect true ... WARNING: CPU: 0 PID: 1 at drivers/bus/omap_l3_noc.c:147 l3_interrupt_handler+0x220/0x34c 44000000.ocp:L3 Custom Error: MASTER MPU TARGET L4CFG (Read): Data Access in User mode du ring Functional access ... Fix the issue by calling pm_runtime functions from musb_gadget_queue. Note that in the long run we should be able to queue the pending transfers if pm_runtime is not active, and flush the queue from pm_runtime_resume. Reported-by: Laurent Pinchart Tested-by: Laurent Pinchart Signed-off-by: Tony Lindgren Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_gadget.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index bff4869a57cd..4042ea017985 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, map_dma_buffer(request, musb, musb_ep); + pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ @@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, unlock: spin_unlock_irqrestore(&musb->lock, lockflags); + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); + return status; } -- cgit v1.2.3-59-g8ed1b From ed6d6f8f42d7302f6f9b6245f34927ec20d26c12 Mon Sep 17 00:00:00 2001 From: Bryan Paluch Date: Mon, 17 Oct 2016 08:54:46 -0400 Subject: usb: increase ohci watchdog delay to 275 msec Increase ohci watchout delay to 275 ms. Previous delay was 250 ms with 20 ms of slack, after removing slack time some ohci controllers don't respond in time. Logs from systems with controllers that have the issue would show "HcDoneHead not written back; disabled" Signed-off-by: Bryan Paluch Acked-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-hcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 1700908b84ef..86612ac3fda2 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -72,7 +72,7 @@ static const char hcd_name [] = "ohci_hcd"; #define STATECHANGE_DELAY msecs_to_jiffies(300) -#define IO_WATCHDOG_DELAY msecs_to_jiffies(250) +#define IO_WATCHDOG_DELAY msecs_to_jiffies(275) #include "ohci.h" #include "pci-quirks.h" -- cgit v1.2.3-59-g8ed1b From 806487a8fc8f385af75ed261e9ab658fc845e633 Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Tue, 18 Oct 2016 17:07:19 +0100 Subject: ACPI / APEI: Fix incorrect return value of ghes_proc() Although ghes_proc() tests for errors while reading the error status, it always return success (0). Fix this by propagating the return value. Fixes: d334a49113a4a33 (ACPI, APEI, Generic Hardware Error Source memory error support) Signed-of-by: Punit Agrawal Tested-by: Tyler Baicar Reviewed-by: Borislav Petkov [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/apei/ghes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f0a029e68d3e..0d099a24f776 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes) ghes_do_proc(ghes, ghes->estatus); out: ghes_clear_estatus(ghes); - return 0; + return rc; } static void ghes_add_timer(struct ghes *ghes) -- cgit v1.2.3-59-g8ed1b From b76032396d7958f006bccf5fb2535beb5526837c Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Thu, 20 Oct 2016 13:19:19 +0900 Subject: usb: renesas_usbhs: add wait after initialization for R-Car Gen3 Since the controller on R-Car Gen3 doesn't have any status registers to detect initialization (LPSTS.SUSPM = 1) and the initialization needs up to 45 usec, this patch adds wait after the initialization. Otherwise, writing other registers (e.g. INTENB0) will fail. Fixes: de18757e272d ("usb: renesas_usbhs: add R-Car Gen3 power control") Cc: # v4.6+ Cc: Signed-off-by: Yoshihiro Shimoda Signed-off-by: Greg Kroah-Hartman --- drivers/usb/renesas_usbhs/rcar3.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c index 1d70add926f0..d544b331c9f2 100644 --- a/drivers/usb/renesas_usbhs/rcar3.c +++ b/drivers/usb/renesas_usbhs/rcar3.c @@ -9,6 +9,7 @@ * */ +#include #include #include "common.h" #include "rcar3.h" @@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); - if (enable) + if (enable) { usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); - else + /* The controller on R-Car Gen3 needs to wait up to 45 usec */ + udelay(45); + } else { usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0); + } return 0; } -- cgit v1.2.3-59-g8ed1b From cf55902b9c306ed259eb57ff111a0c152620f4a6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 13 Oct 2016 15:55:08 +0300 Subject: staging: android: ion: Fix error handling in ion_query_heaps() If the copy_to_user() fails we should unlock and return directly without updating "cnt". Also the return value should be -EFAULT instead of the number of bytes remaining. Fixes: 02b23803c6af ("staging: android: ion: Add ioctl to query available heaps") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ion/ion.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 396ded52ab70..209a8f7ef02b 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) hdata.type = heap->type; hdata.heap_id = heap->id; - ret = copy_to_user(&buffer[cnt], - &hdata, sizeof(hdata)); + if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { + ret = -EFAULT; + goto out; + } cnt++; if (cnt >= max_cnt) -- cgit v1.2.3-59-g8ed1b From 25633d1f5dd7ea35c77aae12c039a80e46abec01 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 17 Oct 2016 16:37:20 +0000 Subject: greybus: arche-platform: Add missing of_node_put() in arche_platform_change_state() This node pointer is returned by of_find_compatible_node() with refcount incremented in this function. of_node_put() on it before exitting this function. This is detected by Coccinelle semantic patch. Signed-off-by: Wei Yongjun Acked-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/arche-platform.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index e36ee984485b..34307ac3f255 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state, pdev = of_find_device_by_node(np); if (!pdev) { pr_err("arche-platform device not found\n"); + of_node_put(np); return -ENODEV; } -- cgit v1.2.3-59-g8ed1b From 1305f2b2f52af5986f44dfbb1a6fe58ae875aa61 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 19 Oct 2016 13:17:53 +0000 Subject: greybus: es2: fix error return code in ap_probe() Fix to return a negative error code from the es2_arpc_in_enable() error handling case instead of 0, as done elsewhere in this function. Fixes: 9d9d3777a9db ("greybus: es2: Add a new bulk in endpoint for APBridgeA RPC") Signed-off-by: Wei Yongjun Acked-by: Viresh Kumar Reviewed-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/es2.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c index 071bb1cfd3ae..baab460eeaa3 100644 --- a/drivers/staging/greybus/es2.c +++ b/drivers/staging/greybus/es2.c @@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface, INIT_LIST_HEAD(&es2->arpcs); spin_lock_init(&es2->arpc_lock); - if (es2_arpc_in_enable(es2)) + retval = es2_arpc_in_enable(es2); + if (retval) goto error; retval = gb_hd_add(hd); -- cgit v1.2.3-59-g8ed1b From e866dd8aab76b6a0ee8428491e65fa5c83a6ae5a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 14 Oct 2016 22:18:21 +0300 Subject: greybus: fix a leak on error in gb_module_create() We should release ->interfaces[0] as well. Fixes: b15d97d77017 ("greybus: core: add module abstraction") Signed-off-by: Dan Carpenter Acked-by: Viresh Kumar Acked-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c index 69f67ddbd4a3..660b4674a76f 100644 --- a/drivers/staging/greybus/module.c +++ b/drivers/staging/greybus/module.c @@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, return module; err_put_interfaces: - for (--i; i > 0; --i) + for (--i; i >= 0; --i) gb_interface_put(module->interfaces[i]); put_device(&module->dev); -- cgit v1.2.3-59-g8ed1b From 44b3c7af02ca2701b6b90ee30c9d1d9c3ae07653 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 11 Oct 2016 13:34:16 +0200 Subject: xenbus: advertise control feature flags The Xen docs specify several flags which a guest can set to advertise which values of the xenstore control/shutdown key it will recognize. This patch adds code to write all the relevant feature-flag keys. Based-on-patch-by: Paul Durrant Signed-off-by: Juergen Gross Reviewed-by: David Vrabel Reviewed-by: Paul Durrant Signed-off-by: David Vrabel --- drivers/xen/manage.c | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index e12bd3635f83..26e5e8507f03 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -168,7 +168,9 @@ out: #endif /* CONFIG_HIBERNATE_CALLBACKS */ struct shutdown_handler { - const char *command; +#define SHUTDOWN_CMD_SIZE 11 + const char command[SHUTDOWN_CMD_SIZE]; + bool flag; void (*cb)(void); }; @@ -206,22 +208,22 @@ static void do_reboot(void) ctrl_alt_del(); } +static struct shutdown_handler shutdown_handlers[] = { + { "poweroff", true, do_poweroff }, + { "halt", false, do_poweroff }, + { "reboot", true, do_reboot }, +#ifdef CONFIG_HIBERNATE_CALLBACKS + { "suspend", true, do_suspend }, +#endif +}; + static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char *str; struct xenbus_transaction xbt; int err; - static struct shutdown_handler handlers[] = { - { "poweroff", do_poweroff }, - { "halt", do_poweroff }, - { "reboot", do_reboot }, -#ifdef CONFIG_HIBERNATE_CALLBACKS - { "suspend", do_suspend }, -#endif - {NULL, NULL}, - }; - static struct shutdown_handler *handler; + int idx; if (shutting_down != SHUTDOWN_INVALID) return; @@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch, return; } - for (handler = &handlers[0]; handler->command; handler++) { - if (strcmp(str, handler->command) == 0) + for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) { + if (strcmp(str, shutdown_handlers[idx].command) == 0) break; } /* Only acknowledge commands which we are prepared to handle. */ - if (handler->cb) + if (idx < ARRAY_SIZE(shutdown_handlers)) xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); @@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch, goto again; } - if (handler->cb) { - handler->cb(); + if (idx < ARRAY_SIZE(shutdown_handlers)) { + shutdown_handlers[idx].cb(); } else { pr_info("Ignoring shutdown request: %s\n", str); shutting_down = SHUTDOWN_INVALID; @@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = { static int setup_shutdown_watcher(void) { int err; + int idx; +#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-")) + char node[FEATURE_PATH_SIZE]; err = register_xenbus_watch(&shutdown_watch); if (err) { @@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void) } #endif + for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) { + if (!shutdown_handlers[idx].flag) + continue; + snprintf(node, FEATURE_PATH_SIZE, "feature-%s", + shutdown_handlers[idx].command); + xenbus_printf(XBT_NIL, "control", node, "%u", 1); + } + return 0; } -- cgit v1.2.3-59-g8ed1b From e1e5b3ff41983f506c3cbcf123fe7d682f61a8f1 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 24 Oct 2016 09:03:49 -0600 Subject: xenbus: prefer list_for_each() This is more efficient than list_for_each_safe() when list modification is accompanied by breaking out of the loop. Signed-off-by: Jan Beulich Reviewed-by: Juergen Gross Signed-off-by: David Vrabel --- drivers/xen/xenbus/xenbus_dev_frontend.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 7487971f9f78..fe60b12de920 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -364,7 +364,7 @@ out: static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) { - struct watch_adapter *watch, *tmp_watch; + struct watch_adapter *watch; char *path, *token; int err, rc; LIST_HEAD(staging_q); @@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) } list_add(&watch->list, &u->watches); } else { - list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { + list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); -- cgit v1.2.3-59-g8ed1b From c251f15c7dbf2cb72e7b2b282020b41f4e4d3665 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 24 Oct 2016 09:05:18 -0600 Subject: xenbus: check return value of xenbus_scanf() Don't ignore errors here: Set backend state to unknown when unsuccessful. Signed-off-by: Jan Beulich Signed-off-by: David Vrabel --- drivers/xen/xenbus/xenbus_probe_frontend.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 611a23119675..6d40a972ffb2 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -335,7 +335,9 @@ static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { - xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); + if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", + &backend_state) != 1) + backend_state = XenbusStateUnknown; printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); -- cgit v1.2.3-59-g8ed1b From dafa724bf582181d9a7d54f5cb4ca0bf8ef29269 Mon Sep 17 00:00:00 2001 From: "tang.junhui" Date: Fri, 21 Oct 2016 09:35:32 +0800 Subject: dm table: fix missing dm_put_target_type() in dm_table_add_target() dm_get_target_type() was previously called so any error returned from dm_table_add_target() must first call dm_put_target_type(). Otherwise the DM target module's reference count will leak and the associated kernel module will be unable to be removed. Also, leverage the fact that r is already -EINVAL and remove an extra newline. Fixes: 36a0456 ("dm table: add immutable feature") Fixes: cc6cbe1 ("dm table: add always writeable feature") Fixes: 3791e2f ("dm table: add singleton feature") Cc: stable@vger.kernel.org # 3.2+ Signed-off-by: tang.junhui Signed-off-by: Mike Snitzer --- drivers/md/dm-table.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3e407a9cde1f..c4b53b332607 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type, tgt->type = dm_get_target_type(type); if (!tgt->type) { - DMERR("%s: %s: unknown target type", dm_device_name(t->md), - type); + DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } if (dm_target_needs_singleton(tgt->type)) { if (t->num_targets) { - DMERR("%s: target type %s must appear alone in table", - dm_device_name(t->md), type); - return -EINVAL; + tgt->error = "singleton target type must appear alone in table"; + goto bad; } t->singleton = true; } if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { - DMERR("%s: target type %s may not be included in read-only tables", - dm_device_name(t->md), type); - return -EINVAL; + tgt->error = "target type may not be included in a read-only table"; + goto bad; } if (t->immutable_target_type) { if (t->immutable_target_type != tgt->type) { - DMERR("%s: immutable target type %s cannot be mixed with other target types", - dm_device_name(t->md), t->immutable_target_type->name); - return -EINVAL; + tgt->error = "immutable target type cannot be mixed with other target types"; + goto bad; } } else if (dm_target_is_immutable(tgt->type)) { if (t->num_targets) { - DMERR("%s: immutable target type %s cannot be mixed with other target types", - dm_device_name(t->md), tgt->type->name); - return -EINVAL; + tgt->error = "immutable target type cannot be mixed with other target types"; + goto bad; } t->immutable_target_type = tgt->type; } @@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; - r = -EINVAL; goto bad; } -- cgit v1.2.3-59-g8ed1b From 0a3ffab93fe52530602fe47cd74802cffdb19c05 Mon Sep 17 00:00:00 2001 From: Arve Hjønnevåg Date: Mon, 24 Oct 2016 15:20:29 +0200 Subject: ANDROID: binder: Add strong ref checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevent using a binder_ref with only weak references where a strong reference is required. Signed-off-by: Arve Hjønnevåg Signed-off-by: Martijn Coenen Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 562af94bec35..3681759c22d7 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal) static struct binder_ref *binder_get_ref(struct binder_proc *proc, - uint32_t desc) + u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; @@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (desc < ref->desc) + if (desc < ref->desc) { n = n->rb_left; - else if (desc > ref->desc) + } else if (desc > ref->desc) { n = n->rb_right; - else + } else if (need_strong_ref && !ref->strong) { + binder_user_error("tried to use weak ref as strong ref\n"); + return NULL; + } else { return ref; + } } return NULL; } @@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); + struct binder_ref *ref; + + ref = binder_get_ref(proc, fp->handle, + fp->type == BINDER_TYPE_HANDLE); if (ref == NULL) { pr_err("transaction release %d bad handle %d\n", @@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc, if (tr->target.handle) { struct binder_ref *ref; - ref = binder_get_ref(proc, tr->target.handle); + ref = binder_get_ref(proc, tr->target.handle, true); if (ref == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); @@ -1589,7 +1596,10 @@ static void binder_transaction(struct binder_proc *proc, } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); + struct binder_ref *ref; + + ref = binder_get_ref(proc, fp->handle, + fp->type == BINDER_TYPE_HANDLE); if (ref == NULL) { binder_user_error("%d:%d got transaction with invalid handle, %d\n", @@ -1800,7 +1810,9 @@ static int binder_thread_write(struct binder_proc *proc, ref->desc); } } else - ref = binder_get_ref(proc, target); + ref = binder_get_ref(proc, target, + cmd == BC_ACQUIRE || + cmd == BC_RELEASE); if (ref == NULL) { binder_user_error("%d:%d refcount change on invalid ref %d\n", proc->pid, thread->pid, target); @@ -1996,7 +2008,7 @@ static int binder_thread_write(struct binder_proc *proc, if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - ref = binder_get_ref(proc, target); + ref = binder_get_ref(proc, target, false); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, -- cgit v1.2.3-59-g8ed1b From 4afb604e2d14d429ac9e1fd84b952602853b2df5 Mon Sep 17 00:00:00 2001 From: Arve Hjønnevåg Date: Mon, 24 Oct 2016 15:20:30 +0200 Subject: ANDROID: binder: Clear binder and cookie when setting handle in flat binder struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevents leaking pointers between processes Signed-off-by: Arve Hjønnevåg Signed-off-by: Martijn Coenen Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 3681759c22d7..3c71b982bf2a 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1584,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc, fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; + fp->binder = 0; fp->handle = ref->desc; + fp->cookie = 0; binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); @@ -1634,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc, return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } + fp->binder = 0; fp->handle = new_ref->desc; + fp->cookie = 0; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); trace_binder_transaction_ref_to_ref(t, ref, new_ref); @@ -1688,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc, binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", fp->handle, target_fd); /* TODO: fput? */ + fp->binder = 0; fp->handle = target_fd; } break; -- cgit v1.2.3-59-g8ed1b From 43605e293eb13c07acb546c14f407a271837af17 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Wed, 19 Oct 2016 01:34:48 +0300 Subject: mei: txe: don't clean an unprocessed interrupt cause. SEC registers are not accessible when the TXE device is in low power state, hence the SEC interrupt cannot be processed if device is not awake. In some rare cases entrance to low power state (aliveness off) and input ready bits can be signaled at the same time, resulting in communication stall as input ready won't be signaled again after waking up. To resolve this IPC_HHIER_SEC bit in HHISR_REG should not be cleaned if the interrupt is not processed. Cc: stable@vger.kernel.org Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-txe.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index e6e5e55a12ed..60415a2bfcbd 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) hisr = mei_txe_br_reg_read(hw, HISR_REG); aliveness = mei_txe_aliveness_get(dev); - if (hhisr & IPC_HHIER_SEC && aliveness) + if (hhisr & IPC_HHIER_SEC && aliveness) { ipc_isr = mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG); - else + } else { ipc_isr = 0; + hhisr &= ~IPC_HHIER_SEC; + } generated = generated || (hisr & HISR_INT_STS_MSK) || -- cgit v1.2.3-59-g8ed1b From 423221d1745b53656db896bd34646d09d620c759 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Mon, 24 Oct 2016 15:13:25 -0400 Subject: nbd: fix incorrect unlock of nbd->sock_lock in sock_shutdown Commit 0eadf37afc250 ("nbd: allow block mq to deal with timeouts") changed normal usage of nbd->sock_lock to use spin_lock/spin_unlock rather than the *_irq variants, but it missed this unlock in an error path. Found by Coverity, CID 1373871. Signed-off-by: John W. Linville Cc: Josef Bacik Cc: Jens Axboe Cc: Markus Pargmann Fixes: 0eadf37afc250 ("nbd: allow block mq to deal with timeouts") Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ba405b55329f..19a16b2dbb91 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd) spin_lock(&nbd->sock_lock); if (!nbd->sock) { - spin_unlock_irq(&nbd->sock_lock); + spin_unlock(&nbd->sock_lock); return; } -- cgit v1.2.3-59-g8ed1b From 2f1d407adab026b34a105ed27b1d4d7e910c4448 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 24 Oct 2016 23:20:25 +0200 Subject: cpufreq: intel_pstate: Always set max P-state in performance mode The only times at which intel_pstate checks the policy set for a given CPU is the initialization of that CPU and updates of its policy settings from cpufreq when intel_pstate_set_policy() is invoked. That is insufficient, however, because intel_pstate uses the same P-state selection function for all CPUs regardless of the policy setting for each of them and the P-state limits are shared between them. Thus if the policy is set to "performance" for a particular CPU, it may not behave as expected if the cpufreq settings are changed subsequently for another CPU. That can be easily demonstrated by writing "performance" to scaling_governor for all CPUs and then switching it to "powersave" for one of them in which case all of the CPUs will behave as though their scaling_governor were all "powersave" (even though the policy still appears to be "performance" for the remaining CPUs). Fix this problem by modifying intel_pstate_adjust_busy_pstate() to always set the P-state to the maximum allowed by the current limits for all CPUs whose policy is set to "performance". Note that it still is recommended to always change the policy setting in the same way for all CPUs even with this fix applied to avoid confusion. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ac7c58d20b58..4737520ec823 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -179,6 +179,7 @@ struct _pid { /** * struct cpudata - Per CPU instance data storage * @cpu: CPU number for this instance data + * @policy: CPUFreq policy value * @update_util: CPUFreq utility callback information * @update_util_set: CPUFreq utility callback is set * @iowait_boost: iowait-related boost fraction @@ -201,6 +202,7 @@ struct _pid { struct cpudata { int cpu; + unsigned int policy; struct update_util_data update_util; bool update_util_set; @@ -1337,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) from = cpu->pstate.current_pstate; - target_pstate = pstate_funcs.get_target_pstate(cpu); + target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? + cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); intel_pstate_update_pstate(cpu, target_pstate); @@ -1504,6 +1507,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) policy->cpuinfo.max_freq, policy->max); cpu = all_cpu_data[policy->cpu]; + cpu->policy = policy->policy; + if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && policy->max < policy->cpuinfo.max_freq && policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { @@ -1511,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) policy->max = policy->cpuinfo.max_freq; } - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { limits = &performance_limits; if (policy->max >= policy->cpuinfo.max_freq) { pr_debug("set performance\n"); @@ -1547,7 +1552,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = round_up(limits->max_perf, FRAC_BITS); out: - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { /* * NOHZ_FULL CPUs need this as the governor callback may not * be invoked on them. -- cgit v1.2.3-59-g8ed1b From 407a3aee6ee2d2cb46d9ba3fc380bc29f35d020c Mon Sep 17 00:00:00 2001 From: Long Li Date: Wed, 5 Oct 2016 16:57:46 -0700 Subject: hv: do not lose pending heartbeat vmbus packets The host keeps sending heartbeat packets independent of the guest responding to them. Even though we respond to the heartbeat messages at interrupt level, we can have situations where there maybe multiple heartbeat messages pending that have not been responded to. For instance this occurs when the VM is paused and the host continues to send the heartbeat messages. Address this issue by draining and responding to all the heartbeat messages that maybe pending. Signed-off-by: Long Li Signed-off-by: K. Y. Srinivasan CC: Stable Signed-off-by: Greg Kroah-Hartman --- drivers/hv/hv_util.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 4aa3cb63fd41..bcd06306f3e8 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context) u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; struct icmsg_negotiate *negop = NULL; - vmbus_recvpacket(channel, hbeat_txf_buf, - PAGE_SIZE, &recvlen, &requestid); + while (1) { + + vmbus_recvpacket(channel, hbeat_txf_buf, + PAGE_SIZE, &recvlen, &requestid); + + if (!recvlen) + break; - if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ sizeof(struct vmbuspipe_hdr)]; -- cgit v1.2.3-59-g8ed1b From 991d5add50a5bb6ab8f12f2129f5c7487f6baaf6 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Sat, 10 Sep 2016 12:53:21 +0000 Subject: usb: chipidea: host: fix NULL ptr dereference during shutdown After commit b09b5224fe86 ("usb: chipidea: implement platform shutdown callback") and commit 43a404577a93 ("usb: chipidea: host: set host to be null after hcd is freed") a NULL pointer dereference is caused on i.MX23 during shutdown. So ensure that role is set to CI_ROLE_END and we finish interrupt handling before the hcd is deallocated. This avoids the NULL pointer dereference. Suggested-by: Alan Stern Signed-off-by: Stefan Wahren Fixes: b09b5224fe86 ("usb: chipidea: implement platform shutdown callback") Signed-off-by: Peter Chen --- drivers/usb/chipidea/host.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 96ae69502c86..111b0e0b8698 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci) if (hcd) { usb_remove_hcd(hcd); + ci->role = CI_ROLE_END; + synchronize_irq(ci->irq); usb_put_hcd(hcd); if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) && (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON)) -- cgit v1.2.3-59-g8ed1b From ae824f00241f495e8d55ebdc0341f3ce61a77da6 Mon Sep 17 00:00:00 2001 From: Ruqiang Ju Date: Mon, 24 Oct 2016 16:39:49 +0800 Subject: i2c: hix5hd2: allow build with ARCH_HISI This driver should be buildable with ARCH_HISI, because some of other HiSilicon SoCs also use it. Signed-off-by: Ruqiang Ju Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6d94e2ec5b4f..41abb20f082d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -79,12 +79,12 @@ config I2C_AMD8111 config I2C_HIX5HD2 tristate "Hix5hd2 high-speed I2C driver" - depends on ARCH_HIX5HD2 || COMPILE_TEST + depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST help - Say Y here to include support for high-speed I2C controller in the - Hisilicon based hix5hd2 SoCs. + Say Y here to include support for the high-speed I2C controller + used in HiSilicon hix5hd2 SoCs. - This driver can also be built as a module. If so, the module + This driver can also be built as a module. If so, the module will be called i2c-hix5hd2. config I2C_I801 -- cgit v1.2.3-59-g8ed1b From 399c168ab5ab5e12ed55b6c91d61c24eb84c9164 Mon Sep 17 00:00:00 2001 From: David Wu Date: Sat, 22 Oct 2016 16:43:42 +0800 Subject: i2c: rk3x: Give the tuning value 0 during rk3x_i2c_v0_calc_timings We found a bug that i2c transfer sometimes failed on 3066a board with stabel-4.8, the con register would be updated by uninitialized tuning value, it made the i2c transfer failed. So give the tuning value to be zero during rk3x_i2c_v0_calc_timings. Signed-off-by: David Wu Tested-by: Andy Yan Reviewed-by: Douglas Anderson Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/busses/i2c-rk3x.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 50702c7bb244..df220666d627 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate, t_calc->div_low--; t_calc->div_high--; + /* Give the tuning value 0, that would not update con register */ + t_calc->tuning = 0; /* Maximum divider supported by hw is 0xffff */ if (t_calc->div_low > 0xffff) { t_calc->div_low = 0xffff; -- cgit v1.2.3-59-g8ed1b From 6a676fb69dcbf3310b9e462c1db66c8e7f6ead38 Mon Sep 17 00:00:00 2001 From: Ralf Ramsauer Date: Mon, 17 Oct 2016 15:59:57 +0200 Subject: i2c: mark device nodes only in case of successful instantiation Instantiated I2C device nodes are marked with OF_POPULATE. This was introduced in 4f001fd30145a6. On unloading, loaded device nodes will of course be unmarked. The problem are nodes that fail during initialisation: If a node fails, it won't be unloaded and hence not be unmarked. If a I2C driver module is unloaded and reloaded, it will skip nodes that failed before. Skip device nodes that are already populated and mark them only in case of success. Fixes: 4f001fd30145a6 ("i2c: Mark instantiated device nodes with OF_POPULATE") Signed-off-by: Ralf Ramsauer Reviewed-by: Geert Uytterhoeven Acked-by: Pantelis Antoniou [wsa: use 14-digit commit sha] Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/i2c-core.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5ab67219f71e..1704fc84d647 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1681,6 +1681,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, static void of_i2c_register_devices(struct i2c_adapter *adap) { struct device_node *bus, *node; + struct i2c_client *client; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) @@ -1695,7 +1696,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap) for_each_available_child_of_node(bus, node) { if (of_node_test_and_set_flag(node, OF_POPULATED)) continue; - of_i2c_register_device(adap, node); + + client = of_i2c_register_device(adap, node); + if (IS_ERR(client)) { + dev_warn(&adap->dev, + "Failed to create I2C device for %s\n", + node->full_name); + of_node_clear_flag(node, OF_POPULATED); + } } of_node_put(bus); @@ -2299,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%s'\n", rd->dn->full_name); + of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } break; -- cgit v1.2.3-59-g8ed1b From 17791650c356b3cb220946d697d89afc1e32c315 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Mon, 17 Oct 2016 11:54:05 +1000 Subject: i2c: allow configuration of imx driver for ColdFire architecture The i2c controller used by Freescales iMX processors is the same hardware module used on Freescales ColdFire family of processors. We can use the existing i2c-imx driver on ColdFire family members. Modify the configuration to allow it to be selected when compiling for ColdFire targets. Signed-off-by: Greg Ungerer Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 41abb20f082d..d252276feadf 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -589,10 +589,10 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE help Say Y here if you want to use the IIC bus controller on - the Freescale i.MX/MXC or Layerscape processors. + the Freescale i.MX/MXC, Layerscape or ColdFire processors. This driver can also be built as a module. If so, the module will be called i2c-imx. -- cgit v1.2.3-59-g8ed1b From 3855ada848dbdbeae39fcf9eaaf6a7678315f153 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 18:01:45 -0300 Subject: i2c: jz4780: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Signed-off-by: Javier Martinez Canillas Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-jz4780.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index b8ea62105f42..30132c3957cd 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = { { .compatible = "ingenic,jz4780-i2c", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches); static int jz4780_i2c_probe(struct platform_device *pdev) { -- cgit v1.2.3-59-g8ed1b From 06e7b10a8711d10b4c4c21ab1aac3f1529a084df Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 18:01:46 -0300 Subject: i2c: xlp9xx: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Signed-off-by: Javier Martinez Canillas Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-xlp9xx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 2a972ed7aa0d..e29ff37a43bd 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = { { .compatible = "netlogic,xlp980-i2c", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = { -- cgit v1.2.3-59-g8ed1b From 2cb496db3d56baa86d53022044c4d25ffe7fa038 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 18:01:47 -0300 Subject: i2c: xlr: Fix module autoload for OF registration If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Signed-off-by: Javier Martinez Canillas Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-xlr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c index 0968f59b6df5..ad17d88d8573 100644 --- a/drivers/i2c/busses/i2c-xlr.c +++ b/drivers/i2c/busses/i2c-xlr.c @@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids); static int xlr_i2c_probe(struct platform_device *pdev) { -- cgit v1.2.3-59-g8ed1b From 60a951af8e1656e2a17a96d64941aafe0668d750 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Tue, 18 Oct 2016 18:01:48 -0300 Subject: i2c: digicolor: Fix module autoload If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Signed-off-by: Javier Martinez Canillas Acked-by: Baruch Siach Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-digicolor.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 9604024e0eb0..49f2084f7bb5 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = { { .compatible = "cnxt,cx92755-i2c" }, { }, }; +MODULE_DEVICE_TABLE(of, dc_i2c_match); static struct platform_driver dc_i2c_driver = { .probe = dc_i2c_probe, -- cgit v1.2.3-59-g8ed1b From 603616017c35f4d0fbdbcace72adf9bf949c4a65 Mon Sep 17 00:00:00 2001 From: Hoan Tran Date: Mon, 10 Oct 2016 10:13:10 -0700 Subject: i2c: xgene: Avoid dma_buffer overrun SMBus block command uses the first byte of buffer for the data length. The dma_buffer should be increased by 1 to avoid the overrun issue. Reported-by: Phil Endecott Signed-off-by: Hoan Tran Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/busses/i2c-xgene-slimpro.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 263685c7a512..05cf192ef1ac 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -105,7 +105,7 @@ struct slimpro_i2c_dev { struct mbox_chan *mbox_chan; struct mbox_client mbox_client; struct completion rd_complete; - u8 dma_buffer[I2C_SMBUS_BLOCK_MAX]; + u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ u32 *resp_msg; }; -- cgit v1.2.3-59-g8ed1b From ba9ad2af7019956b990ad654c56da5bac1e8b71b Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 11 Oct 2016 13:13:27 +0200 Subject: i2c: i801: Fix I2C Block Read on 8-Series/C220 and later Starting with the 8-Series/C220 PCH (Lynx Point), the SMBus controller includes a SPD EEPROM protection mechanism. Once the SPD Write Disable bit is set, only reads are allowed to slave addresses 0x50-0x57. However the legacy implementation of I2C Block Read since the ICH5 looks like a write, and is therefore blocked by the SPD protection mechanism. This causes the eeprom and at24 drivers to fail. So assume that I2C Block Read is implemented as an actual read on these chipsets. I tested it on my Q87 chipset and it seems to work just fine. Signed-off-by: Jean Delvare Tested-by: Jarkko Nikula [wsa: rebased to v4.9-rc2] Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-i801.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 08847e8b8998..eb3627f35d12 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -146,6 +146,7 @@ #define SMBHSTCFG_HST_EN 1 #define SMBHSTCFG_SMB_SMI_EN 2 #define SMBHSTCFG_I2C_EN 4 +#define SMBHSTCFG_SPD_WD 0x10 /* TCO configuration bits for TCOCTL */ #define TCOCTL_EN 0x0100 @@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, block = 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: - /* NB: page 240 of ICH5 datasheet shows that the R/#W - * bit should be cleared here, even when reading */ - outb_p((addr & 0x7f) << 1, SMBHSTADD(priv)); + /* + * NB: page 240 of ICH5 datasheet shows that the R/#W + * bit should be cleared here, even when reading. + * However if SPD Write Disable is set (Lynx Point and later), + * the read will fail if we don't set the R/#W bit. + */ + outb_p(((addr & 0x7f) << 1) | + ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ? + (read_write & 0x01) : 0), + SMBHSTADD(priv)); if (read_write == I2C_SMBUS_READ) { /* NB: page 240 of ICH5 datasheet also shows * that DATA1 is the cmd field when reading */ @@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Disable SMBus interrupt feature if SMBus using SMI# */ priv->features &= ~FEATURE_IRQ; } + if (temp & SMBHSTCFG_SPD_WD) + dev_info(&dev->dev, "SPD Write Disable is set\n"); /* Clear special mode bits */ if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) -- cgit v1.2.3-59-g8ed1b From 171e23e150acfb285f1772cedf04d35694af740b Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Thu, 29 Sep 2016 16:04:59 +0300 Subject: i2c: designware: Avoid aborted transfers with fast reacting I2C slaves I2C DesignWare may abort transfer with arbitration lost if I2C slave pulls SDA down quickly after falling edge of SCL. Reason for this is unknown but after trial and error it was found this can be avoided by enabling non-zero SDA RX hold time for the receiver. By the specification SDA RX hold time extends incoming SDA low to high transition by n * ic_clk cycles but only when SCL is high. However it seems to help avoid above faulty arbitration lost error. Bits 23:16 in IC_SDA_HOLD register define the SDA RX hold time for the receiver. Be conservative and enable 1 ic_clk cycle long hold time in case boot firmware hasn't set it up. Reported-by: Jukka Laitinen Signed-off-by: Jarkko Nikula Tested-by: Jukka Laitinen Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-core.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 1fe93c43215c..11e866d05368 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -95,6 +95,9 @@ #define DW_IC_STATUS_TFE BIT(2) #define DW_IC_STATUS_MST_ACTIVITY BIT(5) +#define DW_IC_SDA_HOLD_RX_SHIFT 16 +#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) + #define DW_IC_ERR_TX_ABRT 0x1 #define DW_IC_TAR_10BITADDR_MASTER BIT(12) @@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev) /* Configure SDA Hold Time if required */ reg = dw_readl(dev, DW_IC_COMP_VERSION); if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { - if (dev->sda_hold_time) { - dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); - } else { + if (!dev->sda_hold_time) { /* Keep previous hold time setting if no one set it */ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); } + /* + * Workaround for avoiding TX arbitration lost in case I2C + * slave pulls SDA down "too quickly" after falling egde of + * SCL by enabling non-zero SDA RX hold. Specification says it + * extends incoming SDA low to high transition while SCL is + * high but it apprears to help also above issue. + */ + if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK)) + dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT; + dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); } else { dev_warn(dev->dev, "Hardware too old to adjust SDA hold time.\n"); -- cgit v1.2.3-59-g8ed1b From 533169d164c6b4c8571d0d48779f6ff6be593d72 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Mon, 26 Sep 2016 17:18:58 -0700 Subject: i2c: imx: defer probe if bus recovery GPIOs are not ready MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some SoC might load the GPIO driver after the I2C driver and using the I2C bus recovery mechanism via GPIOs. In this case it is crucial to defer probing if the GPIO request functions do so, otherwise the I2C driver gets loaded without recovery mechanisms enabled. Signed-off-by: Stefan Agner Acked-by: Uwe Kleine-König Acked-by: Li Yang Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/busses/i2c-imx.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 592a8f26a708..47fc1f1acff7 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0); rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0); - if (!gpio_is_valid(rinfo->sda_gpio) || - !gpio_is_valid(rinfo->scl_gpio) || - IS_ERR(i2c_imx->pinctrl_pins_default) || - IS_ERR(i2c_imx->pinctrl_pins_gpio)) { + if (rinfo->sda_gpio == -EPROBE_DEFER || + rinfo->scl_gpio == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (!gpio_is_valid(rinfo->sda_gpio) || + !gpio_is_valid(rinfo->scl_gpio) || + IS_ERR(i2c_imx->pinctrl_pins_default) || + IS_ERR(i2c_imx->pinctrl_pins_gpio)) { dev_dbg(&pdev->dev, "recovery information incomplete\n"); return 0; } -- cgit v1.2.3-59-g8ed1b From 0ce57f8af1782fd12d3a81872a4ab97244989802 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 25 Oct 2016 14:04:34 +0200 Subject: ahci: fix the single MSI-X case in ahci_init_one We need to make sure hpriv->irq is set properly if we don't use per-port vectors, so switch from blindly assigning pdev->irq to using pci_irq_vector, which handles all interrupt types correctly. Signed-off-by: Christoph Hellwig Reported-by: Robert Richter Tested-by: Robert Richter Tested-by: David Daney Fixes: 0b9e2988ab22 ("ahci: use pci_alloc_irq_vectors") Signed-off-by: Tejun Heo --- drivers/ata/ahci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 60e42e2ed68f..9669fc7c19df 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1620,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* legacy intx interrupts */ pci_intx(pdev, 1); } - hpriv->irq = pdev->irq; + hpriv->irq = pci_irq_vector(pdev, 0); if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; -- cgit v1.2.3-59-g8ed1b From 7cf321d118a825c1541b43ca45294126fd474efa Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 24 Oct 2016 15:37:48 +1000 Subject: drm/drivers: add support for using the arch wc mapping API. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a regression in all these drivers since the cache mode tracking was fixed for mixed mappings. It uses the new arch API to add the VRAM range to the PAT mapping tracking tables. Fixes: 87744ab3832 (mm: fix cache mode tracking in vm_insert_mixed()) Reviewed-by: Christian König . Signed-off-by: Dave Airlie --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +++++ drivers/gpu/drm/ast/ast_ttm.c | 6 ++++++ drivers/gpu/drm/cirrus/cirrus_ttm.c | 7 +++++++ drivers/gpu/drm/mgag200/mgag200_ttm.c | 7 +++++++ drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 ++++++++ drivers/gpu/drm/radeon/radeon_object.c | 5 +++++ 6 files changed, 38 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aa074fac0c7f..f3efb1c5dae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = { int amdgpu_bo_init(struct amdgpu_device *adev) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(adev->mc.aper_base, + adev->mc.aper_size); + /* Add an MTRR for the VRAM */ adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, adev->mc.aper_size); @@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); arch_phys_wc_del(adev->mc.vram_mtrr); + arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); } int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 608df4c90520..0743e65cb240 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast) void ast_mm_fini(struct ast_private *ast) { + struct drm_device *dev = ast->dev; + ttm_bo_device_release(&ast->ttm.bdev); ast_ttm_global_release(ast); arch_phys_wc_del(ast->fb_mtrr); + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); } void ast_ttm_placement(struct ast_bo *bo, int domain) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index bb2438dd8733..5e7e63ce7bce 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus) void cirrus_mm_fini(struct cirrus_device *cirrus) { + struct drm_device *dev = cirrus->dev; + if (!cirrus->mm_inited) return; @@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) arch_phys_wc_del(cirrus->fb_mtrr); cirrus->fb_mtrr = 0; + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); } void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 919b35f2ad24..dcf7d11ac380 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev) void mgag200_mm_fini(struct mga_device *mdev) { + struct drm_device *dev = mdev->dev; + ttm_bo_device_release(&mdev->ttm.bdev); mgag200_ttm_global_release(mdev); + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); arch_phys_wc_del(mdev->fb_mtrr); mdev->fb_mtrr = 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 1825dbc33192..a6dbe8258040 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) /* VRAM init */ drm->gem.vram_available = drm->device.info.ram_user; + arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), + device->func->resource_size(device, 1)); + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, drm->gem.vram_available >> PAGE_SHIFT); if (ret) { @@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) void nouveau_ttm_fini(struct nouveau_drm *drm) { + struct nvkm_device *device = nvxx_device(&drm->device); + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); @@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm) arch_phys_wc_del(drm->ttm.mtrr); drm->ttm.mtrr = 0; + arch_io_free_memtype_wc(device->func->resource_addr(device, 1), + device->func->resource_size(device, 1)); + } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index be30861afae9..41b72ce6613f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev) int radeon_bo_init(struct radeon_device *rdev) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(rdev->mc.aper_base, + rdev->mc.aper_size); + /* Add an MTRR for the VRAM */ if (!rdev->fastfb_working) { rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, @@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); arch_phys_wc_del(rdev->mc.vram_mtrr); + arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); } /* Returns how many bytes TTM can move per IB. -- cgit v1.2.3-59-g8ed1b From 2925d366f450dc1db0ac88f2509a0eec0fef4226 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 17 Oct 2016 17:16:02 -0700 Subject: extcon: qcom-spmi-misc: Sync the extcon state on interrupt The driver was changed after submission to use the new style APIs like extcon_set_state(). Unfortunately, that only sets the state, and doesn't notify any consumers that the cable state has changed. Use extcon_set_state_sync() here instead so that we notify cable consumers of the state change. This fixes USB host-device role switching on the db8074 platform. Fixes: 38085c987f52 ("extcon: Add support for qcom SPMI PMIC USB id detection hardware") Signed-off-by: Stephen Boyd Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-qcom-spmi-misc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c index ca957a5f4291..b8cde096a808 100644 --- a/drivers/extcon/extcon-qcom-spmi-misc.c +++ b/drivers/extcon/extcon-qcom-spmi-misc.c @@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work) if (ret) return; - extcon_set_state(info->edev, EXTCON_USB_HOST, !id); + extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id); } static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id) -- cgit v1.2.3-59-g8ed1b From 62c61514191bfe5731b43619b9b1bf4b423beeb0 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sun, 23 Oct 2016 09:32:34 -0700 Subject: doc: Add missing parameter for msi_setup commit 92ca8d20dee2 ("genirq/msi: Switch to new irq spreading") introduced new parameter to msi_init_setup and but did not update docbook comments. Fixes 'make htmldocs' warning. Signed-off-by: Stephen Hemminger Cc: bhelgaas@google.com Cc: linux-pci@vger.kernel.org Signed-off-by: Thomas Gleixner --- drivers/pci/msi.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index bfdd0744b686..ad70507cfb56 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev) * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: number of interrupts to allocate + * @affinity: flag to indicate cpu irq affinity mask should be set * * Setup the MSI capability structure of the device with the requested * number of interrupts. A return value of zero indicates the successful @@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev, * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries + * @affinity: flag to indicate cpu irq affinity mask should be set * * Setup the MSI-X capability structure of device function with a * single MSI-X irq. A return of zero indicates the successful setup of -- cgit v1.2.3-59-g8ed1b From 92d230dd8cafac417e130e404d4b64eafe2271de Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 22 Oct 2016 14:31:06 +0000 Subject: rocker: fix error return code in rocker_world_check_init() Fix to return error code -EINVAL from the error handling case instead of 0, as done elsewhere in this function. Fixes: e420114eef4a ("rocker: introduce worlds infrastructure") Signed-off-by: Wei Yongjun Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 5424fb341613..24b746406bc7 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port) if (rocker->wops) { if (rocker->wops->mode != mode) { dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); - return err; + return -EINVAL; } return 0; } -- cgit v1.2.3-59-g8ed1b From e52fed7177f74382f742c27de2cc5314790aebb6 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sun, 23 Oct 2016 21:32:47 -0700 Subject: netvsc: fix incorrect receive checksum offloading The Hyper-V netvsc driver was looking at the incorrect status bits in the checksum info. It was setting the receive checksum unnecessary flag based on the IP header checksum being correct. The checksum flag is skb is about TCP and UDP checksum status. Because of this bug, any packet received with bad TCP checksum would be passed up the stack and to the application causing data corruption. The problem is reproducible via netcat and netem. This had a side effect of not doing receive checksum offload on IPv6. The driver was also also always doing checksum offload independent of the checksum setting done via ethtool. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5d6e75a40d38..c71d966d905f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); - if (csum_info) { - /* We only look at the IP checksum here. - * Should we be dropping the packet if checksum - * failed? How do we deal with other checksums - TCP/UDP? - */ - if (csum_info->receive.ip_checksum_succeeded) + + /* skb is already created with CHECKSUM_NONE */ + skb_checksum_none_assert(skb); + + /* + * In Linux, the IP checksum is always checked. + * Do L4 checksum offload if enabled and present. + */ + if (csum_info && (net->features & NETIF_F_RXCSUM)) { + if (csum_info->receive.tcp_checksum_succeeded || + csum_info->receive.udp_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; } if (vlan_tci & VLAN_TAG_PRESENT) -- cgit v1.2.3-59-g8ed1b From 42acfc6615f47e465731c263bee0c799edb098f2 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 3 Oct 2016 11:00:17 +0200 Subject: tty: vt, fix bogus division in csi_J MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In csi_J(3), the third parameter of scr_memsetw (vc_screenbuf_size) is divided by 2 inappropriatelly. But scr_memsetw expects size, not count, because it divides the size by 2 on its own before doing actual memset-by-words. So remove the bogus division. Signed-off-by: Jiri Slaby Cc: Petr Písař Fixes: f8df13e0a9 (tty: Clean console safely) Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 06fb39c1d6dd..ae203c2cd15a 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1176,7 +1176,7 @@ static void csi_J(struct vc_data *vc, int vpar) break; case 3: /* erase scroll-back buffer (and whole display) */ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, - vc->vc_screenbuf_size >> 1); + vc->vc_screenbuf_size); set_origin(vc); if (con_is_visible(vc)) update_screen(vc); -- cgit v1.2.3-59-g8ed1b From b20fb13c7c093f9170fbf162fc7f333d7a5cf77a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 6 Oct 2016 15:42:54 +0200 Subject: serial: stm32: Fix comparisons with undefined register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/tty/serial/stm32-usart.c: In function ‘stm32_receive_chars’: drivers/tty/serial/stm32-usart.c:130: warning: comparison is always true due to limited range of data type drivers/tty/serial/stm32-usart.c: In function ‘stm32_tx_dma_complete’: drivers/tty/serial/stm32-usart.c:177: warning: comparison is always false due to limited range of data type stm32_usart_offsets.icr is u8, while UNDEF_REG = ~0 is int, and thus 0xffffffff. As all registers in stm32_usart_offsets are u8, change the definition of UNDEF_REG to 0xff to fix this. Fixes: ada8618ff3bfe183 ("serial: stm32: adding support for stm32f7") Signed-off-by: Geert Uytterhoeven Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/stm32-usart.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h index 41d974923102..cd97ceb76e4f 100644 --- a/drivers/tty/serial/stm32-usart.h +++ b/drivers/tty/serial/stm32-usart.h @@ -31,7 +31,7 @@ struct stm32_usart_info { struct stm32_usart_config cfg; }; -#define UNDEF_REG ~0 +#define UNDEF_REG 0xff /* Register offsets */ struct stm32_usart_info stm32f4_info = { -- cgit v1.2.3-59-g8ed1b From bc2a024f865d712bb5748aabe77cd515d7d5eea9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 6 Oct 2016 15:55:24 +0200 Subject: serial: SERIAL_STM32 should depend on HAS_DMA If NO_DMA=y: drivers/built-in.o: In function `stm32_serial_remove': stm32-usart.c:(.text+0xcea1a): undefined reference to `bad_dma_ops' stm32-usart.c:(.text+0xcea7a): undefined reference to `bad_dma_ops' Add a dependency on HAS_DMA to fix this. Signed-off-by: Geert Uytterhoeven Acked-by: Alexandre Torgue Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index c7831407a882..25c1d7bc0100 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE config SERIAL_STM32 tristate "STMicroelectronics STM32 serial port support" select SERIAL_CORE + depends on HAS_DMA depends on ARM || COMPILE_TEST help This driver is for the on-chip Serial Controller on -- cgit v1.2.3-59-g8ed1b From 0267a4ff9836a1a4e59044db5bb8cdaddb986d3f Mon Sep 17 00:00:00 2001 From: Nava kishore Manne Date: Wed, 12 Oct 2016 13:17:27 +0530 Subject: serial: xuartps: Add new compatible string for ZynqMP This patch Adds the new compatible string for ZynqMP SoC. Signed-off-by: Nava kishore Manne Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/xilinx_uartps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index f37edaa5ac75..dd4c02fa4820 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device, OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup); OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup); OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup); +OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup); /** * cdns_uart_console_write - perform write operation @@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = { { .compatible = "xlnx,xuartps", }, { .compatible = "cdns,uart-r1p8", }, { .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def }, + { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def }, {} }; MODULE_DEVICE_TABLE(of, cdns_uart_of_match); -- cgit v1.2.3-59-g8ed1b From beadba5e19e2c44ec3527d3d1fc3ac3eda957e09 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sun, 23 Oct 2016 11:38:18 +0000 Subject: serial: pch_uart: add terminate entry for dmi_system_id tables Make sure dmi_system_id tables are NULL terminated. Signed-off-by: Wei Yongjun Acked-by: Jiri Slaby Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/pch_uart.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index d391650b82e7..42caccb5e87e 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = { }, (void *)MINNOW_UARTCLK, }, + { } }; /* Return UART clock, checking for board specific clocks. */ -- cgit v1.2.3-59-g8ed1b From 0ead21ad25f53117a1e39f0bddcb363e38886996 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Mon, 24 Oct 2016 17:00:27 +0900 Subject: serial: 8250_uniphier: fix more unterminated string Commit 1681d2116c96 ("serial: 8250_uniphier: add "\n" at the end of error log") missed this. Signed-off-by: Denys Vlasenko [masahiro: add commit log] Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_uniphier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index b8d9c8c9d02a..a8babb0cf659 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { - dev_err(dev, "failed to get memory resource"); + dev_err(dev, "failed to get memory resource\n"); return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From 09065c5f0f1ff4bfb309975e182b746989a869c5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 24 Oct 2016 17:00:28 +0900 Subject: serial: 8250_uniphier: fix clearing divisor latch access bit At this point, 'value' is always a byte, then this code is clearing bit 15, which is already clear. I meant to clear bit 7. Signed-off-by: Masahiro Yamada Reported-by: Denys Vlasenko Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_uniphier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index a8babb0cf659..417d9e7038e1 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) case UART_LCR: valshift = UNIPHIER_UART_LCR_SHIFT; /* Divisor latch access bit does not exist. */ - value &= ~(UART_LCR_DLAB << valshift); + value &= ~UART_LCR_DLAB; /* fall through */ case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; -- cgit v1.2.3-59-g8ed1b From be2c92b8f1648527620058fdac2bae12b07f1fe9 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 24 Oct 2016 15:56:49 -0500 Subject: serial: core: fix console problems on uart_close MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 761ed4a94582 ('tty: serial_core: convert uart_close to use tty_port_close') started setting the ttyport console flag for serial drivers. This is causing crashes, hangs, or garbage output on several platforms because the serial shutdown is skipped and IRQs are left enabled. Partially revert commit 761ed4a94582 and drop reporting UART tty_ports as a console leaving the console handling to the serial_core as it was before. Fixes: 761ed4a94582ab29 ("tty: serial_core: convert uart_close to use tty_port_close") Reported-by: Niklas Söderlund Reported-by: Mike Galbraith Reported-by: Mugunthan V N Cc: Peter Hurley Cc: Geert Uytterhoeven Cc: Alan Cox Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: linux-serial@vger.kernel.org Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 6e4f63627479..664c99aeeca5 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) uport->cons = drv->cons; uport->minor = drv->tty_driver->minor_start + uport->line; - port->console = uart_console(uport); - /* * If this port is a console, then the spinlock is already * initialised. -- cgit v1.2.3-59-g8ed1b From f00a7c57569db04633818bc5e0c0e35d62733b02 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 10 Oct 2016 11:13:48 +0300 Subject: serial: 8250_lpss: enable MSI for sure The commit 4fe0d154880b ("PCI: Use positive flags in pci_alloc_irq_vectors()") replaces flags from negative to positive values which makes mandatory to have the last argument in pci_alloc_irq_vectors() non-zero (if we want to be no-op). This basically drops MSI enabling in 8250_lpss driver. Restore desired behaviour in 8250_lpss by passing PCI_IRQ_ALL_TYPES instead of 0 to pci_alloc_irq_vectors(). Fixes: 60a9244a5d14 ("serial: 8250_lpss: enable MSI for Intel Quark") Cc: Christoph Hellwig Signed-off-by: Andy Shevchenko Reviewed-by: Bryan O'Donoghue Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_lpss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 886fcf37f291..b9923464599f 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port) struct pci_dev *pdev = to_pci_dev(port->dev); int ret; - ret = pci_alloc_irq_vectors(pdev, 1, 1, 0); + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) return ret; -- cgit v1.2.3-59-g8ed1b From d704b2d32c39c256dea659e142a31b875a13c63b Mon Sep 17 00:00:00 2001 From: Aaron Brice Date: Thu, 6 Oct 2016 15:13:04 -0700 Subject: tty: serial: fsl_lpuart: Fix Tx DMA edge case In the case where head == 0 on the circular buffer, there should be one DMA buffer, not two. The second zero-length buffer would break the lpuart driver, transfer would never complete. Signed-off-by: Aaron Brice Acked-by: Stefan Agner Tested-by: Stefan Agner Tested-by: Bhuvanchandra DV Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/fsl_lpuart.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index de9d5107c00a..76103f2c4a80 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport) sport->dma_tx_bytes = uart_circ_chars_pending(xmit); - if (xmit->tail < xmit->head) { + if (xmit->tail < xmit->head || xmit->head == 0) { sport->dma_tx_nents = 1; sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); } else { @@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport) sport->dma_tx_in_progress = true; sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc); dma_async_issue_pending(sport->dma_tx_chan); - } static void lpuart_dma_tx_complete(void *arg) -- cgit v1.2.3-59-g8ed1b From 32b2921e6a7461fe63b71217067a6cf4bddb132f Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Fri, 14 Oct 2016 15:18:28 +0200 Subject: tty: limit terminal size to 4M chars Size of kmalloc() in vc_do_resize() is controlled by user. Too large kmalloc() size triggers WARNING message on console. Put a reasonable upper bound on terminal size to prevent WARNINGs. Signed-off-by: Dmitry Vyukov CC: David Rientjes Cc: One Thousand Gnomes Cc: Greg Kroah-Hartman Cc: Jiri Slaby Cc: Peter Hurley Cc: linux-kernel@vger.kernel.org Cc: syzkaller@googlegroups.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ae203c2cd15a..26cda08bc611 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -870,6 +870,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; + if (new_screen_size > (4 << 20)) + return -EINVAL; newscreen = kmalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From ecb988a3b7985913d1f0112f66667cdd15e40711 Mon Sep 17 00:00:00 2001 From: Steve Shih Date: Mon, 17 Oct 2016 09:51:05 -0700 Subject: tty: serial: 8250: 8250_core: NXP SC16C2552 workaround NXP SC16C2552 requires that we always write a reset to the RX FIFO and TX FIFO whenever we enable the FIFOs Cc: xe-kernel@external.cisco.com Signed-off-by: Steve Shih Signed-off-by: David Singleton Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1bfb6fdbaa20..1731b98d2471 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = { .name = "16550A", .fifo_size = 16, .tx_loadsz = 16, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, .rxtrig_bytes = {1, 4, 8, 14}, .flags = UART_CAP_FIFO, }, -- cgit v1.2.3-59-g8ed1b From 03842c17397e14cb8bb1adc2015f5dce6c733ffe Mon Sep 17 00:00:00 2001 From: Francois Berder Date: Tue, 25 Oct 2016 13:24:13 +0100 Subject: sc16is7xx: always write state when configuring GPIO as an output The regmap_update first reads the IOState register and then triggers a write if needed. However, GPIOS might be configured as an input so the read to IOState on this GPIO is the current state which might be random. Signed-off-by: Francois Berder Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/sc16is7xx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 2675792a8f59..fb0672554123 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip, { struct sc16is7xx_port *s = gpiochip_get_data(chip); struct uart_port *port = &s->p[0].port; + u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG); - sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset), - val ? BIT(offset) : 0); + if (val) + state |= BIT(offset); + else + state &= ~BIT(offset); + sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state); sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), BIT(offset)); -- cgit v1.2.3-59-g8ed1b From 009e39ae44f4191188aeb6dfbf661b771dbbe515 Mon Sep 17 00:00:00 2001 From: Scot Doyle Date: Thu, 13 Oct 2016 12:12:43 -0500 Subject: vt: clear selection before resizing When resizing a vt its selection may exceed the new size, resulting in an invalid memory access [1]. Clear the selection before resizing. [1] http://lkml.kernel.org/r/CACT4Y+acDTwy4umEvf5ROBGiRJNrxHN4Cn5szCXE5Jw-d1B=Xw@mail.gmail.com Reported-and-tested-by: Dmitry Vyukov Signed-off-by: Scot Doyle Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 26cda08bc611..8c3bf3d613c0 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -876,6 +876,9 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (!newscreen) return -ENOMEM; + if (vc == sel_cons) + clear_selection(); + old_rows = vc->vc_rows; old_row_size = vc->vc_size_row; -- cgit v1.2.3-59-g8ed1b From 248ff02165437864146d6fbd2d99b2359c3723e6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 7 Oct 2016 09:09:30 -0700 Subject: driver core: Make Kconfig text for DEBUG_TEST_DRIVER_REMOVE stronger The current state of driver removal is not great. CONFIG_DEBUG_TEST_DRIVER_REMOVE finds lots of errors. The help text currently undersells exactly how many errors this option will find. Add a bit more description to indicate this option shouldn't be turned on unless you actually want to debug driver removal. The text can be changed later when more drivers are fixed up. Signed-off-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/base/Kconfig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index fdf44cac08e6..d02e7c0f5bfd 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -213,14 +213,16 @@ config DEBUG_DEVRES If you are unsure about this, Say N here. config DEBUG_TEST_DRIVER_REMOVE - bool "Test driver remove calls during probe" + bool "Test driver remove calls during probe (UNSTABLE)" depends on DEBUG_KERNEL help Say Y here if you want the Driver core to test driver remove functions by calling probe, remove, probe. This tests the remove path without having to unbind the driver or unload the driver module. - If you are unsure about this, say N here. + This option is expected to find errors and may render your system + unusable. You should say N here unless you are explicitly looking to + test this functionality. config SYS_HYPERVISOR bool -- cgit v1.2.3-59-g8ed1b From 4fc6d239ee5640909932c47f3c7b93dd8e415fea Mon Sep 17 00:00:00 2001 From: Zefir Kurtisi Date: Mon, 24 Oct 2016 12:40:53 +0200 Subject: Revert "at803x: fix suspend/resume for SGMII link" This reverts commit 98267311fe3b334ae7c107fa0e2413adcf3ba735. Suspending the SGMII alongside the copper side made the at803x inaccessable while powered down, e.g. it can't be re-probed after suspend. Signed-off-by: Zefir Kurtisi Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 26 -------------------------- 1 file changed, 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index f279a897a5c7..cf74d108953e 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -52,9 +52,6 @@ #define AT803X_DEBUG_REG_5 0x05 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) -#define AT803X_REG_CHIP_CONFIG 0x1f -#define AT803X_BT_BX_REG_SEL 0x8000 - #define ATH8030_PHY_ID 0x004dd076 #define ATH8031_PHY_ID 0x004dd074 #define ATH8035_PHY_ID 0x004dd072 @@ -209,7 +206,6 @@ static int at803x_suspend(struct phy_device *phydev) { int value; int wol_enabled; - int ccr; mutex_lock(&phydev->lock); @@ -225,16 +221,6 @@ static int at803x_suspend(struct phy_device *phydev) phy_write(phydev, MII_BMCR, value); - if (phydev->interface != PHY_INTERFACE_MODE_SGMII) - goto done; - - /* also power-down SGMII interface */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - -done: mutex_unlock(&phydev->lock); return 0; @@ -243,7 +229,6 @@ done: static int at803x_resume(struct phy_device *phydev) { int value; - int ccr; mutex_lock(&phydev->lock); @@ -251,17 +236,6 @@ static int at803x_resume(struct phy_device *phydev) value &= ~(BMCR_PDOWN | BMCR_ISOLATE); phy_write(phydev, MII_BMCR, value); - if (phydev->interface != PHY_INTERFACE_MODE_SGMII) - goto done; - - /* also power-up SGMII interface */ - ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); - value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE); - phy_write(phydev, MII_BMCR, value); - phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); - -done: mutex_unlock(&phydev->lock); return 0; -- cgit v1.2.3-59-g8ed1b From f62265b53ef34a372b657c99e23d32e95b464316 Mon Sep 17 00:00:00 2001 From: Zefir Kurtisi Date: Mon, 24 Oct 2016 12:40:54 +0200 Subject: at803x: double check SGMII side autoneg In SGMII mode, we observed an autonegotiation issue after power-down-up cycles where the copper side reports successful link establishment but the SGMII side's link is down. This happened in a setup where the at8031 is connected over SGMII to a eTSEC (fsl gianfar), but so far could not be reproduced with other Ethernet device / driver combinations. This commit adds a wrapper function for at8031 that in case of operating in SGMII mode double checks SGMII link state when generic aneg_done() succeeds. It prints a warning on failure but intentionally does not try to recover from this state. As a result, if you ever see a warning '803x_aneg_done: SGMII link is not ok' you will end up having an Ethernet link up but won't get any data through. This should not happen, if it does, please contact the module maintainer. Signed-off-by: Zefir Kurtisi Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index cf74d108953e..a52b560e428b 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -42,10 +42,18 @@ #define AT803X_MMD_ACCESS_CONTROL 0x0D #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E #define AT803X_FUNC_DATA 0x4003 +#define AT803X_REG_CHIP_CONFIG 0x1f +#define AT803X_BT_BX_REG_SEL 0x8000 #define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E +#define AT803X_MODE_CFG_MASK 0x0F +#define AT803X_MODE_CFG_SGMII 0x01 + +#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ +#define AT803X_PSSR_MR_AN_COMPLETE 0x0200 + #define AT803X_DEBUG_REG_0 0x00 #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) @@ -355,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev) } } +static int at803x_aneg_done(struct phy_device *phydev) +{ + int ccr; + + int aneg_done = genphy_aneg_done(phydev); + if (aneg_done != BMSR_ANEGCOMPLETE) + return aneg_done; + + /* + * in SGMII mode, if copper side autoneg is successful, + * also check SGMII side autoneg result + */ + ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); + if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII) + return aneg_done; + + /* switch to SGMII/fiber page */ + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); + + /* check if the SGMII link is OK. */ + if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { + pr_warn("803x_aneg_done: SGMII link is not ok\n"); + aneg_done = 0; + } + /* switch back to copper page */ + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); + + return aneg_done; +} + static struct phy_driver at803x_driver[] = { { /* ATHEROS 8035 */ @@ -406,6 +444,7 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, } }; -- cgit v1.2.3-59-g8ed1b From e0f841f5cbf2a195c63f3441f3d8ef1cd2bdeeed Mon Sep 17 00:00:00 2001 From: Tobias Brunner Date: Mon, 24 Oct 2016 15:44:26 +0200 Subject: macsec: Fix header length if SCI is added if explicitly disabled Even if sending SCIs is explicitly disabled, the code that creates the Security Tag might still decide to add it (e.g. if multiple RX SCs are defined on the MACsec interface). But because the header length so far only depended on the configuration option the SCI overwrote the original frame's contents (EtherType and e.g. the beginning of the IP header) and if encrypted did not visibly end up in the packet, while the SC flag in the TCI field of the Security Tag was still set, resulting in invalid MACsec frames. Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver") Signed-off-by: Tobias Brunner Acked-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/macsec.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3ea47f28e143..d2e61e002926 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 +static bool send_sci(const struct macsec_secy *secy) +{ + const struct macsec_tx_sc *tx_sc = &secy->tx_sc; + + return tx_sc->send_sci || + (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); +} + static sci_t make_sci(u8 *addr, __be16 port) { sci_t sci; @@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present) /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ static void macsec_fill_sectag(struct macsec_eth_header *h, - const struct macsec_secy *secy, u32 pn) + const struct macsec_secy *secy, u32 pn, + bool sci_present) { const struct macsec_tx_sc *tx_sc = &secy->tx_sc; - memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); + memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); h->eth.h_proto = htons(ETH_P_MACSEC); - if (tx_sc->send_sci || - (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { + if (sci_present) { h->tci_an |= MACSEC_TCI_SC; memcpy(&h->secure_channel_id, &secy->sci, sizeof(h->secure_channel_id)); @@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, struct macsec_tx_sc *tx_sc; struct macsec_tx_sa *tx_sa; struct macsec_dev *macsec = macsec_priv(dev); + bool sci_present; u32 pn; secy = &macsec->secy; @@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, unprotected_len = skb->len; eth = eth_hdr(skb); - hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); + sci_present = send_sci(secy); + hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); memmove(hh, eth, 2 * ETH_ALEN); pn = tx_sa_update_pn(tx_sa, secy); @@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, kfree_skb(skb); return ERR_PTR(-ENOLINK); } - macsec_fill_sectag(hh, secy, pn); + macsec_fill_sectag(hh, secy, pn, sci_present); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); skb_put(skb, secy->icv_len); @@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, skb_to_sgvec(skb, sg, 0, skb->len); if (tx_sc->encrypt) { - int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - + int len = skb->len - macsec_hdr_len(sci_present) - secy->icv_len; aead_request_set_crypt(req, sg, sg, len, iv); - aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); + aead_request_set_ad(req, macsec_hdr_len(sci_present)); } else { aead_request_set_crypt(req, sg, sg, 0, iv); aead_request_set_ad(req, skb->len - secy->icv_len); -- cgit v1.2.3-59-g8ed1b From e30520c2b075fae3977d482a31b54b0256160a57 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 24 Oct 2016 17:54:18 +0200 Subject: kalmia: avoid potential uninitialized variable use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kalmia_send_init_packet() returns zero or a negative return code, but gcc has no way of knowing that there cannot be a positive return code, so it determines that copying the ethernet address at the end of kalmia_bind() will access uninitialized data: drivers/net/usb/kalmia.c: In function ‘kalmia_bind’: arch/x86/include/asm/string_32.h:78:22: error: ‘*((void *)ðernet_addr+4)’ may be used uninitialized in this function [-Werror=maybe-uninitialized] *((short *)to + 2) = *((short *)from + 2); ^ drivers/net/usb/kalmia.c:138:5: note: ‘*((void *)ðernet_addr+4)’ was declared here This warning is harmless, but for consistency, we should make the check for the return code match what the driver does everywhere else and just progate it, which then gets rid of the warning. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/usb/kalmia.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index 5662babf0583..3e37724d30ae 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf) status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); - if (status < 0) { + if (status) { usb_set_intfdata(intf, NULL); usb_driver_release_interface(driver_of(intf), intf); return status; -- cgit v1.2.3-59-g8ed1b From c121f72a66c5f92fbe2fc53baa274eef39875cec Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 24 Oct 2016 23:46:18 +0100 Subject: net: bgmac: fix spelling mistake: "connecton" -> "connection" trivial fix to spelling mistake in dev_err message Signed-off-by: Colin Ian King Acked-by: Jon Mason Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 856379cbb402..31ca204b38d2 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac) phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - dev_err(bgmac->dev, "PHY connecton failed\n"); + dev_err(bgmac->dev, "PHY connection failed\n"); return PTR_ERR(phy_dev); } -- cgit v1.2.3-59-g8ed1b From a3b8cb1f84a0de95323902c76bab245675d6d218 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 22 Sep 2016 15:06:52 -0700 Subject: ixgbe: fix panic when using macvlan with l2-fwd-offload enabled Fix NULL pointer dereference in the case where a macvlan interface is brought up while the PF is still down: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [] ixgbe_alloc_rx_buffers+0x42/0x1a0 [ixgbe] Call Trace: [] ixgbe_configure_rx_ring+0x2eb/0x3d0 [ixgbe] [] ixgbe_fwd_ring_up+0xd1/0x380 [ixgbe] [] ixgbe_fwd_add+0x149/0x230 [ixgbe] [] macvlan_open+0x260/0x2b0 [macvlan] Reported-by: Matthew Garrett Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a244d9a67264..bd93d823cc25 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9135,10 +9135,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) goto fwd_add_err; fwd_adapter->pool = pool; fwd_adapter->real_adapter = adapter; - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); - if (err) - goto fwd_add_err; - netif_tx_start_all_queues(vdev); + + if (netif_running(pdev)) { + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (err) + goto fwd_add_err; + netif_tx_start_all_queues(vdev); + } + return fwd_adapter; fwd_add_err: /* unwind counter and free adapter struct */ -- cgit v1.2.3-59-g8ed1b From ea6acb7ef78960e4b6f1cd8c4162a5e490e83dcd Mon Sep 17 00:00:00 2001 From: David Ertman Date: Tue, 20 Sep 2016 07:10:50 -0700 Subject: i40e: Fix configure TCs after initial DCB disable in commit a036244c068612a43fa8c0f33a0eb4daa4d8dba0 a fix was put into place to avoid a kernel panic when a non- supported traffic class configuration was put into place and then lldp was enabled/disabled on the link partner switch. This fix caused it to be necessary to unload/reload the driver to reenable DCB once a supported TC config was in place. The root cause of the original panic was that the function i40e_pf_get_default_tc was allowing for a default TC other than TC 0, and only TC 0 is supported as a default. This patch removes the get_default_tc function and replaces it with a #define since there is only one TC supported as a default. Change-Id: I448371974e946386d0a7718d73668b450b7c72ef Signed-off-by: Dave Ertman Tested-by: Ronald Bynoe Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 31 ++++------------------------- 2 files changed, 5 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2030d7c1dc94..6d61e443bdf8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -92,6 +92,7 @@ #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 +#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac1faee2a5b8..050d005d2ede 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4640,29 +4640,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) return num_tc; } -/** - * i40e_pf_get_default_tc - Get bitmap for first enabled TC - * @pf: PF being queried - * - * Return a bitmap for first enabled traffic class for this PF. - **/ -static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) -{ - u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; - u8 i = 0; - - if (!enabled_tc) - return 0x1; /* TC0 */ - - /* Find the first enabled TC */ - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & BIT(i)) - break; - } - - return BIT(i); -} - /** * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried @@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { /* If DCB is not enabled for this PF then just return default TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) - return i40e_pf_get_default_tc(pf); + return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) @@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return i40e_pf_get_default_tc(pf); + return I40E_DEFAULT_TRAFFIC_CLASS; } /** @@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else - tc_map = i40e_pf_get_default_tc(pf); + tc_map = I40E_DEFAULT_TRAFFIC_CLASS; #ifdef I40E_FCOE if (pf->vsi[v]->type == I40E_VSI_FCOE) tc_map = i40e_get_fcoe_tc_map(pf); @@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, u8 type; /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret; /* Ignore if event is not for Nearest Bridge */ -- cgit v1.2.3-59-g8ed1b From 4c95aa5d8fc34be18fcab01b1c6251c8c2b61520 Mon Sep 17 00:00:00 2001 From: Guilherme G Piccoli Date: Thu, 22 Sep 2016 10:03:58 -0300 Subject: i40e: disable MSI-X interrupts if we cannot reserve enough vectors If we fail on allocating enough MSI-X interrupts, we should disable them since they were previously enabled in this point of code. Not disabling them can lead to WARN_ON() being triggered and subsequent failure in enabling MSI as a fallback; the below message was shown without this patch while we played with interrupt allocation in i40e driver: [ 21.461346] sysfs: cannot create duplicate filename '/devices/pci0007:00/0007:00:00.0/0007:01:00.3/msi_irqs' [ 21.461459] ------------[ cut here ]------------ [ 21.461514] WARNING: CPU: 64 PID: 1155 at fs/sysfs/dir.c:31 sysfs_warn_dup+0x88/0xc0 Also, we noticed that without this patch, if we modprobe the module without enough MSI-X interrupts (triggering the above warning), unload the module and re-load it again, we got a crash on the system. Signed-off-by: Guilherme G Piccoli Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 050d005d2ede..6abc1301a8e4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7684,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); return -ENODEV; } else if (v_actual == I40E_MIN_MSIX) { -- cgit v1.2.3-59-g8ed1b From 599b076d15ee3ead7af20fc907079df00b2d59a0 Mon Sep 17 00:00:00 2001 From: Huaibin Wang Date: Mon, 26 Sep 2016 09:51:18 +0200 Subject: i40e: fix call of ndo_dflt_bridge_getlink() Order of arguments is wrong. The wrong code has been introduced by commit 7d4f8d871ab1, but is compiled only since commit 9df70b66418e. Note that this may break netlink dumps. Fixes: 9df70b66418e ("i40e: Remove incorrect #ifdef's") Fixes: 7d4f8d871ab1 ("switchdev; add VLAN support for port's bridge_getlink") CC: Carolyn Wyborny Signed-off-by: Huaibin Wang Signed-off-by: Nicolas Dichtel Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6abc1301a8e4..31c97e3937a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9034,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - nlflags, 0, 0, filter_mask, NULL); + 0, 0, nlflags, filter_mask, NULL); } /* Hardware supports L4 tunnel length of 128B (=2^7) which includes -- cgit v1.2.3-59-g8ed1b From 867dfe342118b1ea0256a85f7c0d9ceb0ead032a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 25 Oct 2016 17:52:04 +0200 Subject: nvdimm: make CONFIG_NVDIMM_DAX 'bool' A bugfix just tried to address a randconfig build problem and introduced a variant of the same problem: with CONFIG_LIBNVDIMM=y and CONFIG_NVDIMM_DAX=m, the nvdimm module now fails to link: drivers/nvdimm/built-in.o: In function `to_nd_device_type': bus.c:(.text+0x1b5d): undefined reference to `is_nd_dax' drivers/nvdimm/built-in.o: In function `nd_region_notify_driver_action.constprop.2': region_devs.c:(.text+0x6b6c): undefined reference to `is_nd_dax' region_devs.c:(.text+0x6b8c): undefined reference to `to_nd_dax' drivers/nvdimm/built-in.o: In function `nd_region_probe': region.c:(.text+0x70f3): undefined reference to `nd_dax_create' drivers/nvdimm/built-in.o: In function `mode_show': namespace_devs.c:(.text+0xa196): undefined reference to `is_nd_dax' drivers/nvdimm/built-in.o: In function `nvdimm_namespace_common_probe': (.text+0xa55f): undefined reference to `is_nd_dax' drivers/nvdimm/built-in.o: In function `nvdimm_namespace_common_probe': (.text+0xa56e): undefined reference to `to_nd_dax' This reverts the earlier fix, making NVDIMM_DAX a 'bool' option again as it should be (it gets linked into the libnvdimm module). To fix the original problem, I'm adding a dependency on LIBNVDIMM to DEV_DAX_PMEM, which ensures we can't have that one built-in if the rest is a module. Fixes: 4e65e9381c7a ("/dev/dax: fix Kconfig dependency build breakage") Signed-off-by: Arnd Bergmann Reviewed-by: Ross Zwisler Signed-off-by: Dan Williams --- drivers/dax/Kconfig | 2 +- drivers/nvdimm/Kconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index daadd20aa936..3e2ab3b14eea 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -14,7 +14,7 @@ if DEV_DAX config DEV_DAX_PMEM tristate "PMEM DAX: direct access to persistent memory" - depends on NVDIMM_DAX + depends on LIBNVDIMM && NVDIMM_DAX default DEV_DAX help Support raw access to persistent memory. Note that this diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 8b2b740d6679..124c2432ac9c 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -89,7 +89,7 @@ config NVDIMM_PFN Select Y if unsure config NVDIMM_DAX - tristate "NVDIMM DAX: Raw access to persistent memory" + bool "NVDIMM DAX: Raw access to persistent memory" default LIBNVDIMM depends on NVDIMM_PFN help -- cgit v1.2.3-59-g8ed1b From 52e73eb2872c9af6f382b2b22954ca8214397a4e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 27 Oct 2016 17:04:05 -0700 Subject: device-dax: fix percpu_ref_exit ordering We need to wait until the percpu_ref is released before exit. Otherwise, we sometimes lose the race and trigger this new warning that was added in v4.9 (commit a67823c1ed10 "percpu-refcount: init ->confirm_switch member properly"): WARNING: CPU: 0 PID: 3629 at lib/percpu-refcount.c:107 percpu_ref_exit+0x51/0x60 [..] Call Trace: [] dump_stack+0x85/0xc2 [] __warn+0xcb/0xf0 [] warn_slowpath_null+0x1d/0x20 [] percpu_ref_exit+0x51/0x60 [] dax_pmem_percpu_exit+0x1a/0x50 [dax_pmem] [] devm_action_release+0xf/0x20 Cc: Fixes: ab68f2622136 ("/dev/dax, pmem: direct access to persistent memory") Signed-off-by: Dan Williams --- drivers/dax/pmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 9630d8837ba9..4a15fa5df98b 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data) dev_dbg(dax_pmem->dev, "%s\n", __func__); percpu_ref_exit(ref); - wait_for_completion(&dax_pmem->cmp); } static void dax_pmem_percpu_kill(void *data) @@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data) dev_dbg(dax_pmem->dev, "%s\n", __func__); percpu_ref_kill(ref); + wait_for_completion(&dax_pmem->cmp); } static int dax_pmem_probe(struct device *dev) -- cgit v1.2.3-59-g8ed1b From ee52c44dee63ff2686a7b0d98fff7c80852ac022 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 27 Oct 2016 17:47:04 -0700 Subject: block: DAC960: print a hex number after a 0x prefix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It makes the message hard to interpret correctly if a base 10 number is prefixed by 0x. So change to a hex number. Link: http://lkml.kernel.org/r/20161026125658.25728-3-u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/DAC960.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 811e11c82f32..0809cda93cc0 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device, case DAC960_PD_Controller: if (!request_region(Controller->IO_Address, 0x80, Controller->FullModelName)) { - DAC960_Error("IO port 0x%d busy for Controller at\n", + DAC960_Error("IO port 0x%lx busy for Controller at\n", Controller, Controller->IO_Address); goto Failure; } @@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device, case DAC960_P_Controller: if (!request_region(Controller->IO_Address, 0x80, Controller->FullModelName)){ - DAC960_Error("IO port 0x%d busy for Controller at\n", + DAC960_Error("IO port 0x%lx busy for Controller at\n", Controller, Controller->IO_Address); goto Failure; } -- cgit v1.2.3-59-g8ed1b From 9105585d13bd2946f0eb2a664e32ec9a9b23bf1b Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 27 Oct 2016 17:47:07 -0700 Subject: ipack: print a hex number after a 0x prefix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It makes the result hard to interpret correctly if a base 10 number is prefixed by 0x. So change to a hex number. Link: http://lkml.kernel.org/r/20161026125658.25728-4-u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König Cc: Samuel Iglesias Gonsalvez Cc: Jens Taprogge Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/ipack/ipack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c index c0e7b624ce54..12102448fddd 100644 --- a/drivers/ipack/ipack.c +++ b/drivers/ipack/ipack.c @@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, idev->id_vendor, idev->id_device); } -ipack_device_attr(id_format, "0x%hhu\n"); +ipack_device_attr(id_format, "0x%hhx\n"); static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(id_device); -- cgit v1.2.3-59-g8ed1b From 8e819101ce6fcc58801c9a813ea99c4da0255eef Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Thu, 27 Oct 2016 17:47:12 -0700 Subject: drivers/misc/sgi-gru/grumain.c: remove bogus 0x prefix from printk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Would like to have this be a decimal number. Link: http://lkml.kernel.org/r/20161026134746.GA30169@sgi.com Signed-off-by: Dimitri Sivanich Reported-by: Uwe Kleine-König Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-gru/grumain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 1525870f460a..33741ad4a74a 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru, spin_lock(&gru->gs_asid_lock); BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); asids->mt_ctxbitmap ^= ctxbitmap; - gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", + gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n", gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); spin_unlock(&gru->gs_asid_lock); spin_unlock(&gms->ms_asid_lock); -- cgit v1.2.3-59-g8ed1b From 9bcffe7575b721d7b6d9b3090fe18809d9806e78 Mon Sep 17 00:00:00 2001 From: Richard Genoud Date: Thu, 27 Oct 2016 18:04:06 +0200 Subject: tty/serial: at91: fix hardware handshake on Atmel platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit 1cf6e8fc8341 ("tty/serial: at91: fix RTS line management when hardware handshake is enabled"), the hardware handshake wasn't functional anymore on Atmel platforms (beside SAMA5D2). To understand why, one has to understand the flag ATMEL_US_USMODE_HWHS first: Before commit 1cf6e8fc8341 ("tty/serial: at91: fix RTS line management when hardware handshake is enabled"), this flag was never set. Thus, the CTS/RTS where only handled by serial_core (and everything worked just fine). This commit introduced the use of the ATMEL_US_USMODE_HWHS flag, enabling it for all boards when the user space enables flow control. When the ATMEL_US_USMODE_HWHS is set, the Atmel USART controller handles a part of the flow control job: - disable the transmitter when the CTS pin gets high. - drive the RTS pin high when the DMA buffer transfer is completed or PDC RX buffer full or RX FIFO is beyond threshold. (depending on the controller version). NB: This feature is *not* mandatory for the flow control to work. (Nevertheless, it's very useful if low latencies are needed.) Now, the specifics of the ATMEL_US_USMODE_HWHS flag: - For platforms with DMAC and no FIFOs (sam9x25, sam9x35, sama5D3, sama5D4, sam9g15, sam9g25, sam9g35)* this feature simply doesn't work. ( source: https://lkml.org/lkml/2016/9/7/598 ) Tested it on sam9g35, the RTS pins always stays up, even when RXEN=1 or a new DMA transfer descriptor is set. => ATMEL_US_USMODE_HWHS must not be used for those platforms - For platforms with a PDC (sam926{0,1,3}, sam9g10, sam9g20, sam9g45, sam9g46)*, there's another kind of problem. Once the flag ATMEL_US_USMODE_HWHS is set, the RTS pin can't be driven anymore via RTSEN/RTSDIS in USART Control Register. The RTS pin can only be driven by enabling/disabling the receiver or setting RCR=RNCR=0 in the PDC (Receive (Next) Counter Register). => Doing this is beyond the scope of this patch and could add other bugs, so the original (and working) behaviour should be set for those platforms (meaning ATMEL_US_USMODE_HWHS flag should be unset). - For platforms with a FIFO (sama5d2)*, the RTS pin is driven according to the RX FIFO thresholds, and can be also driven by RTSEN/RTSDIS in USART Control Register. No problem here. (This was the use case of commit 1cf6e8fc8341 ("tty/serial: at91: fix RTS line management when hardware handshake is enabled")) NB: If the CTS pin declared as a GPIO in the DTS, (for instance cts-gpios = <&pioA PIN_PB31 GPIO_ACTIVE_LOW>), the transmitter will be disabled. => ATMEL_US_USMODE_HWHS flag can be set for this platform ONLY IF the CTS pin is not a GPIO. So, the only case when ATMEL_US_USMODE_HWHS can be enabled is when (atmel_use_fifo(port) && !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) Tested on all Atmel USART controller flavours: AT91SAM9G35-CM (DMAC flavour), AT91SAM9G20-EK (PDC flavour), SAMA5D2xplained (FIFO flavour). * the list may not be exhaustive Cc: #4.4+ (beware, missing atmel_port variable) Fixes: 1cf6e8fc8341 ("tty/serial: at91: fix RTS line management when hardware handshake is enabled") Signed-off-by: Richard Genoud Acked-by: Alexandre Belloni Acked-by: Cyrille Pitchen Acked-by: Uwe Kleine-König Acked-by: Nicolas Ferre Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/atmel_serial.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index fd8aa1f4ba78..168b10cad47b 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_RS485; } else if (termios->c_cflag & CRTSCTS) { /* RS232 with hardware handshake (RTS/CTS) */ - if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) { - dev_info(port->dev, "not enabling hardware flow control because DMA is used"); - termios->c_cflag &= ~CRTSCTS; - } else { + if (atmel_use_fifo(port) && + !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { + /* + * with ATMEL_US_USMODE_HWHS set, the controller will + * be able to drive the RTS pin high/low when the RX + * FIFO is above RXFTHRES/below RXFTHRES2. + * It will also disable the transmitter when the CTS + * pin is high. + * This mode is not activated if CTS pin is a GPIO + * because in this case, the transmitter is always + * disabled (there must be an internal pull-up + * responsible for this behaviour). + * If the RTS pin is a GPIO, the controller won't be + * able to drive it according to the FIFO thresholds, + * but it will be handled by the driver. + */ mode |= ATMEL_US_USMODE_HWHS; + } else { + /* + * For platforms without FIFO, the flow control is + * handled by the driver. + */ + mode |= ATMEL_US_USMODE_NORMAL; } } else { /* RS232 without hadware handshake */ -- cgit v1.2.3-59-g8ed1b From 4dda864d73079a1eb01fab4ec29b97db150163bf Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 28 Oct 2016 07:07:47 -0500 Subject: tty: serial_core: Fix serial console crash on port shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The port->console flag is always false, as uart_console() is called before the serial console has been registered. Hence for a serial port used as the console, uart_tty_port_shutdown() will still be called when userspace closes the port, powering it down. This may lead to a system lock up when the serial console driver writes to the serial port's registers. To fix this, move the setting of port->console after the call to uart_configure_port(), which registers the serial console. Fixes: 761ed4a94582ab29 ("tty: serial_core: convert uart_close to use tty_port_close") Reported-by: Niklas Söderlund Signed-off-by: Geert Uytterhoeven Acked-by: Rob Herring Tested-by: Mugunthan V N Tested-by: Niklas Söderlund [robh: rebased on tty-linus] Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 664c99aeeca5..ce8899c13af3 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2759,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) uart_configure_port(drv, state, uport); + port->console = uart_console(uport); + num_groups = 2; if (uport->attr_group) num_groups++; -- cgit v1.2.3-59-g8ed1b From d0f4bce2bce7e998abc906f3590e9032af7a41ba Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 28 Oct 2016 07:07:48 -0500 Subject: tty: serial_core: fix NULL struct tty pointer access in uart_write_wakeup Since commit 761ed4a94582ab29 ("tty: serial_core: convert uart_close to use tty_port_close"), the serial console is broken on various systems and typing "reboot" splats the following on the serial console: INIT: Sending p[ 427.863916] BUG: unable to handle kernel NULL pointer dereference at 00000000000001e0 [ 427.885156] IP: [] tty_wakeup+0xc/0x70 [ 427.898337] PGD 0 [ 427.902051] [ 427.907498] Oops: 0000 [#1] PREEMPT SMP [ 427.917635] Modules linked in: nfsv3 nfs_acl nfs fscache lockd sunrpc grace edd af_packet cpufreq_conservative cpufreq_userspace cpufreq_powersave fuse loop md_mod dm_mod joydev hid_generic usbhid ipmi_ssif ohci_pci ohci_hcd ehci_pci ehci_hcd e1000e ptp firewire_ohci edac_core pps_core tpm_infineon sp5100_tco firewire_core acpi_cpufreq serio_raw pcspkr fjes usbcore shpchp edac_mce_amd tpm_tis ipmi_si tpm_tis_core i2c_piix4 k10temp sg ipmi_msghandler tpm sr_mod button cdrom kvm_amd kvm irqbypass crc_itu_t ast ttm drm_kms_helper drm fb_sys_fops sysimgblt sysfillrect syscopyarea i2c_algo_bit scsi_dh_rdac scsi_dh_alua scsi_dh_emc scsi_dh_hp_sw ata_generic pata_atiixp [ 428.054179] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.9.0-rc1-1.g73e3f23-default #1 [ 428.072868] Hardware name: System manufacturer System Product Name/KGP(M)E-D16, BIOS 0902 12/03/2010 [ 428.094755] task: ffffffffa2c0d500 task.stack: ffffffffa2c00000 [ 428.109717] RIP: 0010:[] [] tty_wakeup+0xc/0x70 [ 428.128407] RSP: 0018:ffff9a1a5fc03df8 EFLAGS: 00010086 [ 428.142184] RAX: ffff9a1857258000 RBX: ffffffffa3050ea0 RCX: 0000000000000000 [ 428.159649] RDX: 000000000000001b RSI: 0000000000000000 RDI: 0000000000000000 [ 428.177109] RBP: ffff9a1a5fc03e08 R08: 0000000000000000 R09: 0000000000000000 [ 428.194547] R10: 0000000000021c77 R11: 0000000000000000 R12: ffff9a1857258000 [ 428.212002] R13: 0000000000000000 R14: 0000000000000020 R15: 0000000000000020 [ 428.229481] FS: 0000000000000000(0000) GS:ffff9a1a5fc00000(0000) knlGS:0000000000000000 [ 428.248938] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 428.263726] CR2: 00000000000001e0 CR3: 0000000390c06000 CR4: 00000000000006f0 [ 428.281331] Stack: [ 428.288696] ffffffffa3050ea0 ffff9a1857258000 ffff9a1a5fc03e18 ffffffffa24e0ab1 [ 428.307064] ffff9a1a5fc03e40 ffffffffa24e8865 ffffffffa3050ea0 00000000000000c2 [ 428.325456] 0000000000000046 ffff9a1a5fc03e78 ffffffffa24e8a5f ffffffffa3050ea0 [ 428.343905] Call Trace: [ 428.352319] [ 428.356216] [] uart_write_wakeup+0x21/0x30 The problem is for console ports, the serial port is not shutdown and interrupts may fire after the struct tty is gone. Simply calling the tty_port helper tty_port_tty_wakeup instead of tty_wakeup directly will ensure there is a valid struct tty. Fixes: 761ed4a94582ab29 ("tty: serial_core: convert uart_close to use tty_port_close") Reported-by: Borislav Petkov Reported-by: Mike Galbraith Cc: Jiri Slaby Cc: Greg Kroah-Hartman Cc: linux-serial@vger.kernel.org Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index ce8899c13af3..f2303f390345 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port) * closed. No cookie for you. */ BUG_ON(!state); - tty_wakeup(state->port.tty); + tty_port_tty_wakeup(&state->port); } static void uart_stop(struct tty_struct *tty) @@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty) if (port->ops->flush_buffer) port->ops->flush_buffer(port); uart_port_unlock(port, flags); - tty_wakeup(tty); + tty_port_tty_wakeup(&state->port); } /* -- cgit v1.2.3-59-g8ed1b From 6ad37567b6b886121e250036e489d82cde5e5e94 Mon Sep 17 00:00:00 2001 From: Martyn Welch Date: Fri, 21 Oct 2016 17:36:19 +0100 Subject: vme: vme_get_size potentially returning incorrect value on failure The function vme_get_size returns the size of the window to the caller, however it doesn't check the return value of the call to vme_master_get. Return 0 on failure rather than anything else. Suggested-by: Dan Carpenter Signed-off-by: Martyn Welch Signed-off-by: Greg Kroah-Hartman --- drivers/vme/vme.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c index 15b64076bc26..bdbadaa47ef3 100644 --- a/drivers/vme/vme.c +++ b/drivers/vme/vme.c @@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource) case VME_MASTER: retval = vme_master_get(resource, &enabled, &base, &size, &aspace, &cycle, &dwidth); + if (retval) + return 0; return size; break; case VME_SLAVE: retval = vme_slave_get(resource, &enabled, &base, &size, &buf_base, &aspace, &cycle); + if (retval) + return 0; return size; break; -- cgit v1.2.3-59-g8ed1b From a7a7aeefbca2982586ba2c9fd7739b96416a6d1d Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Wed, 19 Oct 2016 12:29:41 +0200 Subject: GenWQE: Fix bad page access during abort of resource allocation When interrupting an application which was allocating DMAable memory, it was possible, that the DMA memory was deallocated twice, leading to the error symptoms below. Thanks to Gerald, who analyzed the problem and provided this patch. I agree with his analysis of the problem: ddcb_cmd_fixups() -> genwqe_alloc_sync_sgl() (fails in f/lpage, but sgl->sgl != NULL and f/lpage maybe also != NULL) -> ddcb_cmd_cleanup() -> genwqe_free_sync_sgl() (double free, because sgl->sgl != NULL and f/lpage maybe also != NULL) In this scenario we would have exactly the kind of double free that would explain the WARNING / Bad page state, and as expected it is caused by broken error handling (cleanup). Using the Ubuntu git source, tag Ubuntu-4.4.0-33.52, he was able to reproduce the "Bad page state" issue, and with the patch on top he could not reproduce it any more. ------------[ cut here ]------------ WARNING: at /build/linux-o03cxz/linux-4.4.0/arch/s390/include/asm/pci_dma.h:141 Modules linked in: qeth_l2 ghash_s390 prng aes_s390 des_s390 des_generic sha512_s390 sha256_s390 sha1_s390 sha_common genwqe_card qeth crc_itu_t qdio ccwgroup vmur dm_multipath dasd_eckd_mod dasd_mod CPU: 2 PID: 3293 Comm: genwqe_gunzip Not tainted 4.4.0-33-generic #52-Ubuntu task: 0000000032c7e270 ti: 00000000324e4000 task.ti: 00000000324e4000 Krnl PSW : 0404c00180000000 0000000000156346 (dma_update_cpu_trans+0x9e/0xa8) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3 Krnl GPRS: 00000000324e7bcd 0000000000c3c34a 0000000027628298 000000003215b400 0000000000000400 0000000000001fff 0000000000000400 0000000116853000 07000000324e7b1e 0000000000000001 0000000000000001 0000000000000001 0000000000001000 0000000116854000 0000000000156402 00000000324e7a38 Krnl Code: 000000000015633a: 95001000 cli 0(%r1),0 000000000015633e: a774ffc3 brc 7,1562c4 #0000000000156342: a7f40001 brc 15,156344 >0000000000156346: 92011000 mvi 0(%r1),1 000000000015634a: a7f4ffbd brc 15,1562c4 000000000015634e: 0707 bcr 0,%r7 0000000000156350: c00400000000 brcl 0,156350 0000000000156356: eb7ff0500024 stmg %r7,%r15,80(%r15) Call Trace: ([<00000000001563e0>] dma_update_trans+0x90/0x228) [<00000000001565dc>] s390_dma_unmap_pages+0x64/0x160 [<00000000001567c2>] s390_dma_free+0x62/0x98 [<000003ff801310ce>] __genwqe_free_consistent+0x56/0x70 [genwqe_card] [<000003ff801316d0>] genwqe_free_sync_sgl+0xf8/0x160 [genwqe_card] [<000003ff8012bd6e>] ddcb_cmd_cleanup+0x86/0xa8 [genwqe_card] [<000003ff8012c1c0>] do_execute_ddcb+0x110/0x348 [genwqe_card] [<000003ff8012c914>] genwqe_ioctl+0x51c/0xc20 [genwqe_card] [<000000000032513a>] do_vfs_ioctl+0x3b2/0x518 [<0000000000325344>] SyS_ioctl+0xa4/0xb8 [<00000000007b86c6>] system_call+0xd6/0x264 [<000003ff9e8e520a>] 0x3ff9e8e520a Last Breaking-Event-Address: [<0000000000156342>] dma_update_cpu_trans+0x9a/0xa8 ---[ end trace 35996336235145c8 ]--- BUG: Bad page state in process jbd2/dasdb1-8 pfn:3215b page:000003d100c856c0 count:-1 mapcount:0 mapping: (null) index:0x0 flags: 0x3fffc0000000000() page dumped because: nonzero _count Signed-off-by: Gerald Schaefer Signed-off-by: Frank Haverkamp Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/misc/genwqe/card_utils.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 8a679ecc8fd1..fc2794b513fa 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (copy_from_user(sgl->lpage, user_addr + user_size - sgl->lpage_size, sgl->lpage_size)) { rc = -EFAULT; - goto err_out1; + goto err_out2; } } return 0; + err_out2: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; err_out1: __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; err_out: __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, sgl->sgl_dma_addr); + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0; + sgl->sgl_size = 0; return -ENOMEM; } -- cgit v1.2.3-59-g8ed1b From eb94cd68abd9b7c92bf70ddc452d65f1a84c46e2 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Thu, 6 Oct 2016 04:43:08 -0700 Subject: VMCI: Doorbell create and destroy fixes This change consists of two changes: 1) If vmci_doorbell_create is called when neither guest nor host personality as been initialized, vmci_get_context_id will return VMCI_INVALID_ID. In that case, we should fail the create call. 2) In doorbell destroy, we assume that vmci_guest_code_active() has the same return value on create and destroy. That may not be the case, so we may end up with the wrong refcount. Instead, destroy should check explicitly whether the doorbell is in the index table as an indicator of whether the guest code was active at create time. Reviewed-by: Adit Ranadive Signed-off-by: Jorgen Hansen Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_doorbell.c | 8 +++++++- drivers/misc/vmw_vmci/vmci_driver.c | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index a8cee33ae8d2..b3fa738ae005 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle, if (vmci_handle_is_invalid(*handle)) { u32 context_id = vmci_get_context_id(); + if (context_id == VMCI_INVALID_ID) { + pr_warn("Failed to get context ID\n"); + result = VMCI_ERROR_NO_RESOURCES; + goto free_mem; + } + /* Let resource code allocate a free ID for us */ new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID); } else { @@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle) entry = container_of(resource, struct dbell_entry, resource); - if (vmci_guest_code_active()) { + if (!hlist_unhashed(&entry->node)) { int result; dbell_index_table_remove(entry); diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index 896be150e28f..d7eaf1eb11e7 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); -MODULE_VERSION("1.1.4.0-k"); +MODULE_VERSION("1.1.5.0-k"); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-59-g8ed1b From 40b6e61ac72e99672e47cdb99c8d7d226004169b Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 28 Oct 2016 11:08:44 +0200 Subject: ubi: fastmap: Fix add_vol() return value test in ubi_attach_fastmap() Commit e96a8a3bb671 ("UBI: Fastmap: Do not add vol if it already exists") introduced a bug by changing the possible error codes returned by add_vol(): - this function no longer returns NULL in case of allocation failure but return ERR_PTR(-ENOMEM) - when a duplicate entry in the volume RB tree is found it returns ERR_PTR(-EEXIST) instead of ERR_PTR(-EINVAL) Fix the tests done on add_vol() return val to match this new behavior. Fixes: e96a8a3bb671 ("UBI: Fastmap: Do not add vol if it already exists") Reported-by: Dan Carpenter Signed-off-by: Boris Brezillon Acked-by: Sheng Yong Signed-off-by: Richard Weinberger --- drivers/mtd/ubi/fastmap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 2ff62157d3bb..c1f5c29e458e 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -707,11 +707,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, fmvhdr->vol_type, be32_to_cpu(fmvhdr->last_eb_bytes)); - if (!av) - goto fail_bad; - if (PTR_ERR(av) == -EINVAL) { - ubi_err(ubi, "volume (ID %i) already exists", - fmvhdr->vol_id); + if (IS_ERR(av)) { + if (PTR_ERR(av) == -EEXIST) + ubi_err(ubi, "volume (ID %i) already exists", + fmvhdr->vol_id); + goto fail_bad; } -- cgit v1.2.3-59-g8ed1b From 2083d36790e328a364896a2833fbedbf6e1d2317 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 25 Oct 2016 11:25:56 +0200 Subject: mlxsw: spectrum_router: Save requested prefix bitlist when creating tree Currently, the prefix bitlist is not saved for LPM trees, causing the compare to always fail which causes the tree to be destroyed and created for every inserted and removed FIB entry. So fix this by saving the bitlist as it should have been done from the very beginning. Fixes: 53342023eed9 ("mlxsw: spectrum_router: Implement LPM trees management") Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f3d50d369bbe..4cff4c043d11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, lpm_tree); if (err) goto err_left_struct_set; + memcpy(&lpm_tree->prefix_usage, prefix_usage, + sizeof(lpm_tree->prefix_usage)); return lpm_tree; err_left_struct_set: -- cgit v1.2.3-59-g8ed1b From 8b99becdc89ee7635137725e9fa76f304bb168f5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 25 Oct 2016 11:25:57 +0200 Subject: mlxsw: spectrum_router: Compare only trees which are in use during tree get Only trees which are in use should be compared to requested prefix usage. Fixes: 53342023eed9 ("mlxsw: spectrum_router: Implement LPM trees management") Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4cff4c043d11..4573da2c5560 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -345,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { lpm_tree = &mlxsw_sp->router.lpm_trees[i]; - if (lpm_tree->proto == proto && + if (lpm_tree->ref_count != 0 && + lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, prefix_usage)) goto inc_ref_count; -- cgit v1.2.3-59-g8ed1b From 25ccd2429fd0af5b9c2c136c8c593491aeabf162 Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Wed, 26 Oct 2016 15:40:12 +0800 Subject: ACPICA: Dispatcher: Fix order issue of method termination The last step of the method termination should be the end of the method serialization. Otherwise, the steps happening after it will face the race issues that cannot be protected by the method serialization mechanism. This patch fixes this issue by moving the per-method-object deletion code prior than the end of the method serialization. Otherwise, the possible race issues may result in AE_ALREADY_EXISTS error in a parallel environment. Fixes: 74f51b80a0c4 (ACPICA: Namespace: Fix dynamic table loading issues) Reported-and-tested-by: Imre Deak Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/dsmethod.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 32e9ddc0cf2b..c4028a8dacd5 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -730,26 +730,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, acpi_ds_method_data_delete_all(walk_state); - /* - * If method is serialized, release the mutex and restore the - * current sync level for this thread - */ - if (method_desc->method.mutex) { - - /* Acquisition Depth handles recursive calls */ - - method_desc->method.mutex->mutex.acquisition_depth--; - if (!method_desc->method.mutex->mutex.acquisition_depth) { - walk_state->thread->current_sync_level = - method_desc->method.mutex->mutex. - original_sync_level; - - acpi_os_release_mutex(method_desc->method. - mutex->mutex.os_mutex); - method_desc->method.mutex->mutex.thread_id = 0; - } - } - /* * Delete any namespace objects created anywhere within the * namespace by the execution of this method. Unless: @@ -786,6 +766,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, ~ACPI_METHOD_MODIFIED_NAMESPACE; } } + + /* + * If method is serialized, release the mutex and restore the + * current sync level for this thread + */ + if (method_desc->method.mutex) { + + /* Acquisition Depth handles recursive calls */ + + method_desc->method.mutex->mutex.acquisition_depth--; + if (!method_desc->method.mutex->mutex.acquisition_depth) { + walk_state->thread->current_sync_level = + method_desc->method.mutex->mutex. + original_sync_level; + + acpi_os_release_mutex(method_desc->method. + mutex->mutex.os_mutex); + method_desc->method.mutex->mutex.thread_id = 0; + } + } } /* Decrement the thread count on the method */ -- cgit v1.2.3-59-g8ed1b From 8121aa26e32012ca89afafa5e503b879950ac0fe Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Wed, 26 Oct 2016 15:40:20 +0800 Subject: ACPICA: Dispatcher: Fix an unbalanced lock exit path in acpi_ds_auto_serialize_method() There is a lock unbalanced exit path in acpi_ds_initialize_method(), this patch corrects it. Fixes: 441ad11d078f (ACPICA: Dispatcher: Fix a mutex issue for method auto serialization) Tested-by: Imre Deak Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/dsmethod.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index c4028a8dacd5..5997e592e5a6 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -128,7 +128,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); acpi_ps_free_op(op); - return_ACPI_STATUS(status); + goto unlock; } walk_state->descending_callback = acpi_ds_detect_named_opcodes; -- cgit v1.2.3-59-g8ed1b From 8633db6b027952449e155a316f4ae3a530bbe18f Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Wed, 26 Oct 2016 15:42:01 +0800 Subject: ACPICA: Dispatcher: Fix interpreter locking around acpi_ev_initialize_region() In the code path of acpi_ev_initialize_region(), there is namespace modification code unlocked. This patch tunes the code to make sure such modification are always locked. Fixes: 74f51b80a0c4 (ACPICA: Namespace: Fix dynamic table loading issues) Tested-by: Imre Deak Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/dsinit.c | 11 +++-------- drivers/acpi/acpica/dsmethod.c | 12 +++--------- drivers/acpi/acpica/dswload2.c | 2 -- drivers/acpi/acpica/evrgnini.c | 3 +++ drivers/acpi/acpica/nsload.c | 2 ++ 5 files changed, 11 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index f1e6dcc7a827..54d48b90de2c 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -46,6 +46,7 @@ #include "acdispat.h" #include "acnamesp.h" #include "actables.h" +#include "acinterp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsinit") @@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index, /* Walk entire namespace from the supplied root */ - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - /* * We don't use acpi_walk_namespace since we do not want to acquire * the namespace reader lock. */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, - ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object, - NULL, &info, NULL); + 0, acpi_ds_init_one_object, NULL, &info, + NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 5997e592e5a6..2b3210f42a46 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, "Method auto-serialization parse [%4.4s] %p\n", acpi_ut_get_node_name(node), node)); - acpi_ex_enter_interpreter(); - /* Create/Init a root op for the method parse tree */ op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); if (!op) { - status = AE_NO_MEMORY; - goto unlock; + return_ACPI_STATUS(AE_NO_MEMORY); } acpi_ps_set_name(op, node->name.integer); @@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); if (!walk_state) { acpi_ps_free_op(op); - status = AE_NO_MEMORY; - goto unlock; + return_ACPI_STATUS(AE_NO_MEMORY); } status = acpi_ds_init_aml_walk(walk_state, op, node, @@ -128,7 +124,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); acpi_ps_free_op(op); - goto unlock; + return_ACPI_STATUS(status); } walk_state->descending_callback = acpi_ds_detect_named_opcodes; @@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, status = acpi_ps_parse_aml(walk_state); acpi_ps_delete_parse_tree(op); -unlock: - acpi_ex_exit_interpreter(); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 028b22a3154e..e36218206bb0 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) } } - acpi_ex_exit_interpreter(); status = acpi_ev_initialize_region (acpi_ns_get_attached_object(node), FALSE); - acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { /* diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 3843f1fc5dbb..75ddd160a716 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acevents.h" #include "acnamesp.h" +#include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evrgnini") @@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, } } + acpi_ex_exit_interpreter(); status = acpi_ev_execute_reg_method(region_obj, ACPI_REG_CONNECT); + acpi_ex_enter_interpreter(); if (acpi_ns_locked) { status = diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 334d3c5ba617..d1f20143bb11 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -137,7 +137,9 @@ unlock: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Begin Table Object Initialization\n")); + acpi_ex_enter_interpreter(); status = acpi_ds_initialize_objects(table_index, node); + acpi_ex_exit_interpreter(); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Completed Table Object Initialization\n")); -- cgit v1.2.3-59-g8ed1b From b47bd6ea40636362a8b6605de51207cc387ba0b8 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 25 Oct 2016 18:36:24 +0300 Subject: {net, ib}/mlx5: Make cache line size determination at runtime. ARM 64B cache line systems have L1_CACHE_BYTES set to 128. cache_line_size() will return the correct size. Fixes: cf50b5efa2fe('net/mlx5_core/ib: New device capabilities handling.') Signed-off-by: Daniel Jurgens Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 31 +++++++++++++++++++++---- include/linux/mlx5/driver.h | 11 --------- 4 files changed, 27 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 22174774dbb8..63036c731626 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); - resp.cache_line_size = L1_CACHE_BYTES; + resp.cache_line_size = cache_line_size(); resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 41f4c2afbcdd..7ce97daf26c6 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -52,7 +52,6 @@ enum { enum { MLX5_IB_SQ_STRIDE = 6, - MLX5_IB_CACHE_LINE_SIZE = 64, }; static const u32 mlx5_ib_opcode[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 6cb38304669f..2c6e3c7b7417 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -41,6 +41,13 @@ #include "mlx5_core.h" +struct mlx5_db_pgdir { + struct list_head list; + unsigned long *bitmap; + __be32 *db_page; + dma_addr_t db_dma; +}; + /* Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. */ @@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free); static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) { + u32 db_per_page = PAGE_SIZE / cache_line_size(); struct mlx5_db_pgdir *pgdir; pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); if (!pgdir) return NULL; - bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); + pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page), + sizeof(unsigned long), + GFP_KERNEL); + + if (!pgdir->bitmap) { + kfree(pgdir); + return NULL; + } + + bitmap_fill(pgdir->bitmap, db_per_page); pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, &pgdir->db_dma, node); if (!pgdir->db_page) { + kfree(pgdir->bitmap); kfree(pgdir); return NULL; } @@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, struct mlx5_db *db) { + u32 db_per_page = PAGE_SIZE / cache_line_size(); int offset; int i; - i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); - if (i >= MLX5_DB_PER_PAGE) + i = find_first_bit(pgdir->bitmap, db_per_page); + if (i >= db_per_page) return -ENOMEM; __clear_bit(i, pgdir->bitmap); db->u.pgdir = pgdir; db->index = i; - offset = db->index * L1_CACHE_BYTES; + offset = db->index * cache_line_size(); db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); db->dma = pgdir->db_dma + offset; @@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { + u32 db_per_page = PAGE_SIZE / cache_line_size(); mutex_lock(&dev->priv.pgdir_mutex); __set_bit(db->index, db->u.pgdir->bitmap); - if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { + if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, db->u.pgdir->db_page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); + kfree(db->u.pgdir->bitmap); kfree(db->u.pgdir); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 85c4786427e4..5dbda60a09f4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -625,10 +625,6 @@ struct mlx5_db { int index; }; -enum { - MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, -}; - enum { MLX5_COMP_EQ_SIZE = 1024, }; @@ -638,13 +634,6 @@ enum { MLX5_PTYS_EN = 1 << 2, }; -struct mlx5_db_pgdir { - struct list_head list; - DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); - __be32 *db_page; - dma_addr_t db_dma; -}; - typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { -- cgit v1.2.3-59-g8ed1b From bba1574c2f11d674bf343f3cf307b33d19f73d9d Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 25 Oct 2016 18:36:25 +0300 Subject: net/mlx5: Always Query HCA caps after setting them Always query the HCA caps after setting them to update the capablities data structures. Not doing so results in incorrect capabilities being reported including max_dc, max_qp and several others. Fixes: 59211bd3b632 ("net/mlx5: Split the load/unload flow into hardware and software flows") Signed-off-by: Daniel Jurgens Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d9c3c70b29e4..8a63910bfccf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) struct pci_dev *pdev = dev->pdev; int err; - err = mlx5_query_hca_caps(dev); - if (err) { - dev_err(&pdev->dev, "query hca failed\n"); - goto out; - } - err = mlx5_query_board_id(dev); if (err) { dev_err(&pdev->dev, "query board id failed\n"); @@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_start_health_poll(dev); + err = mlx5_query_hca_caps(dev); + if (err) { + dev_err(&pdev->dev, "query hca failed\n"); + goto err_stop_poll; + } + if (boot && mlx5_init_once(dev, priv)) { dev_err(&pdev->dev, "sw objs init failed\n"); goto err_stop_poll; -- cgit v1.2.3-59-g8ed1b From eccec8da3b4e6f403040c7187338a46d2ea27c20 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 25 Oct 2016 18:36:26 +0300 Subject: net/mlx5: Keep autogroups list ordered Finding a new autogroup range is done by going over a group list sorted by each group start index. The search is stopped after finding the first free range. Adding the newly created group to the list is wrongly added to the end of the list regardless of its start index as the parameter of where to insert it is ignored. This commit makes sure to use that unused parameter to insert it where requested. Fixes: f0d22d187473 ('net/mlx5_core: Introduce flow steering autogrouped flow table') Signed-off-by: Paul Blakey Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5da2cc878582..17559c842905 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -879,7 +879,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table * tree_init_node(&fg->node, !is_auto_fg, del_flow_group); tree_add_node(&fg->node, &ft->node); /* Add node to group list */ - list_add(&fg->node.list, ft->node.children.prev); + list_add(&fg->node.list, prev_fg); return fg; } @@ -893,7 +893,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, return ERR_PTR(-EPERM); lock_ref_node(&ft->node); - fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); + fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false); unlock_ref_node(&ft->node); return fg; @@ -1012,7 +1012,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, u32 *match_criteria) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct list_head *prev = &ft->node.children; + struct list_head *prev = ft->node.children.prev; unsigned int candidate_index = 0; struct mlx5_flow_group *fg; void *match_criteria_addr; -- cgit v1.2.3-59-g8ed1b From 32dba76a7a3104b2815f31b1755f4c0ba1f01a78 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 25 Oct 2016 18:36:27 +0300 Subject: net/mlx5: Fix autogroups groups num not decreasing Autogroups groups num is increased when creating a new flow group, but is never decreased. Now decreasing it when deleting a flow group. Fixes: f0d22d187473 ('net/mlx5_core: Introduce flow steering autogrouped flow table') Signed-off-by: Paul Blakey Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 17559c842905..89696048b045 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node) fs_get_obj(ft, fg->node.parent); dev = get_dev(&ft->node); + if (ft->autogroup.active) + ft->autogroup.num_groups--; + if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); -- cgit v1.2.3-59-g8ed1b From e83d6955fe422f120b0e98e4f02496af89845eef Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 25 Oct 2016 18:36:28 +0300 Subject: net/mlx5: Correctly initialize last use of flow counters Currently, last use timestamp is initialized to zero. This is not the expected value by higher layers such as when we do TC action offloading. To fix that, set it to the current time, e.g when the counter/rule is offloaded. This is the same behaviour of non-offloaded TC actions. Fixes: 43a335e055bb ('mlx5_core: Flow counters infrastructure') Signed-off-by: Paul Blakey Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 3a9195b4169d..3b026c151cf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) goto err_out; if (aging) { + counter->cache.lastuse = jiffies; counter->aging = true; spin_lock(&fc_stats->addlist_lock); -- cgit v1.2.3-59-g8ed1b From 2b029556667a7fc1aea731c5828e501656f87653 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 25 Oct 2016 18:36:29 +0300 Subject: net/mlx5e: Choose best nearest LRO timeout Instead of predicting the index of the wanted LRO timeout value from hardware capabilities, look for the nearest LRO timeout value. Fixes: 5c50368f3831 ('net/mlx5e: Light-weight netdev open/stop') Signed-off-by: Saeed Mahameed Signed-off-by: Mohamad Haj Yahia Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 19 ++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 460363b66cb1..7a43502a89cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -85,6 +85,9 @@ #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) +#define MLX5E_DEFAULT_LRO_TIMEOUT 32 +#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 + #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 @@ -221,6 +224,7 @@ struct mlx5e_params { struct ieee_ets ets; #endif bool rx_am_enabled; + u32 lro_timeout; }; struct mlx5e_tstamp { @@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); +u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7eaf38020a8f..5c8ab3eabe9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) MLX5_SET(tirc, tirc, lro_max_ip_payload_size, (priv->params.lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, - MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[2])); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); } void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) @@ -3401,6 +3399,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, } } +u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) +{ + int i; + + /* The supported periods are organized in ascending order */ + for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) + if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) + break; + + return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); +} + static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, @@ -3419,6 +3429,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; + priv->params.lro_timeout = + mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ -- cgit v1.2.3-59-g8ed1b From 5e1e93c7047381b81236f82f60c15d49c510d1a7 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 25 Oct 2016 18:36:30 +0300 Subject: net/mlx5e: Unregister netdev before detaching it Detaching the netdev before unregistering it cause some netdev cleanup ndos to fail because they check presence of the netdev, so we need to unregister the netdev first. Fixes: 26e59d8077a3 ('net/mlx5e: Implement mlx5e interface attach/detach callbacks') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5c8ab3eabe9c..f4c687ce4c59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4048,7 +4048,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev; - unregister_netdev(netdev); destroy_workqueue(priv->wq); if (profile->cleanup) profile->cleanup(priv); @@ -4065,6 +4064,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) for (vport = 1; vport < total_vfs; vport++) mlx5_eswitch_unregister_vport_rep(esw, vport); + unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); mlx5e_destroy_netdev(mdev, priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3c97da103d30..7fe6559e4ab3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5e_priv *priv = rep->priv_data; struct net_device *netdev = priv->netdev; + unregister_netdev(netdev); mlx5e_detach_netdev(esw->dev, netdev); mlx5e_destroy_netdev(esw->dev, priv); } -- cgit v1.2.3-59-g8ed1b From 247f139cdae73b4f47bd348d05dff1afd40b84b6 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 25 Oct 2016 18:36:31 +0300 Subject: net/mlx5: Change the acl enable prototype to return status The Ingress/Egress ACL enable function may fail and it should return status to its caller to avoid NULL pointer dereference. Fixes: f942380c1239 ('net/mlx5: E-Switch, Vport ingress/egress ACLs rules for spoofchk') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 50 +++++++++++++++-------- 1 file changed, 34 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index abbf2c369923..be1f7333ab7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *vlan_grp = NULL; @@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, int table_size = 2; int err = 0; - if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) || - !IS_ERR_OR_NULL(vport->egress.acl)) - return; + if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) + return -EOPNOTSUPP; + + if (!IS_ERR_OR_NULL(vport->egress.acl)) + return 0; esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); @@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); - return; + return -EIO; } flow_group_in = mlx5_vzalloc(inlen); if (!flow_group_in) - return; + return -ENOMEM; acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); if (IS_ERR(acl)) { @@ -1009,6 +1011,7 @@ out: mlx5_destroy_flow_group(vlan_grp); if (err && !IS_ERR_OR_NULL(acl)) mlx5_destroy_flow_table(acl); + return err; } static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, @@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, vport->egress.acl = NULL; } -static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_core_dev *dev = esw->dev; @@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, int table_size = 4; int err = 0; - if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) || - !IS_ERR_OR_NULL(vport->ingress.acl)) - return; + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + return -EOPNOTSUPP; + + if (!IS_ERR_OR_NULL(vport->ingress.acl)) + return 0; esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); @@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); - return; + return -EIO; } flow_group_in = mlx5_vzalloc(inlen); if (!flow_group_in) - return; + return -ENOMEM; acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); if (IS_ERR(acl)) { @@ -1167,6 +1172,7 @@ out: } kvfree(flow_group_in); + return err; } static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, @@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, return 0; } - esw_vport_enable_ingress_acl(esw, vport); + err = esw_vport_enable_ingress_acl(esw, vport); + if (err) { + mlx5_core_warn(esw->dev, + "failed to enable ingress acl (%d) on vport[%d]\n", + err, vport->vport); + return err; + } esw_debug(esw->dev, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", @@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, return 0; } - esw_vport_enable_egress_acl(esw, vport); + err = esw_vport_enable_egress_acl(esw, vport); + if (err) { + mlx5_core_warn(esw->dev, + "failed to enable egress acl (%d) on vport[%d]\n", + err, vport->vport); + return err; + } esw_debug(esw->dev, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", -- cgit v1.2.3-59-g8ed1b From 2241007b3d783cbdbaa78c30bdb1994278b6f9b9 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 25 Oct 2016 18:36:32 +0300 Subject: net/mlx5: Clear health sick bit when starting health poll The health sick status should be cleared when we start the health poll. This is crucial for driver reload (unload + load) in order to behave right in case of health issue. Fixes: fd76ee4da55a ('net/mlx5_core: Fix internal error detection conditions') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 1a05fb965c8d..2cb4094c9c49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -281,6 +281,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) struct mlx5_core_health *health = &dev->priv.health; init_timer(&health->timer); + health->sick = 0; health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; -- cgit v1.2.3-59-g8ed1b From 05ac2c0b7438ea08c5d54b48797acf9b22cb2f6f Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 25 Oct 2016 18:36:33 +0300 Subject: net/mlx5: Fix race between PCI error handlers and health work Currently there is a race between the health care work and the kernel pci error handlers because both of them detect the error, the first one to be called will do the error handling. There is a chance that health care will disable the pci after resuming pci slot. Also create a separate WQ because now we will have two types of health works, one for the error detection and one for the recovery. Fixes: 89d44f0a6c73 ('net/mlx5_core: Add pci error handlers to mlx5_core driver') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 30 +++++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/main.c | 9 +++++-- include/linux/mlx5/driver.h | 4 ++++ 3 files changed, 38 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 2cb4094c9c49..2d00022c92d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -64,6 +64,10 @@ enum { MLX5_NIC_IFC_NO_DRAM_NIC = 2 }; +enum { + MLX5_DROP_NEW_HEALTH_WORK, +}; + static u8 get_nic_interface(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; @@ -272,7 +276,13 @@ static void poll_health(unsigned long data) if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); - schedule_work(&health->work); + spin_lock(&health->wq_lock); + if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + queue_work(health->wq, &health->work); + else + dev_err(&dev->pdev->dev, + "new health works are not permitted at this stage\n"); + spin_unlock(&health->wq_lock); } } @@ -282,6 +292,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) init_timer(&health->timer); health->sick = 0; + clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; @@ -298,11 +309,21 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev) del_timer_sync(&health->timer); } +void mlx5_drain_health_wq(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + spin_lock(&health->wq_lock); + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + spin_unlock(&health->wq_lock); + cancel_work_sync(&health->work); +} + void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - flush_work(&health->work); + destroy_workqueue(health->wq); } int mlx5_health_init(struct mlx5_core_dev *dev) @@ -317,8 +338,11 @@ int mlx5_health_init(struct mlx5_core_dev *dev) strcpy(name, "mlx5_health"); strcat(name, dev_name(&dev->pdev->dev)); + health->wq = create_singlethread_workqueue(name); kfree(name); - + if (!health->wq) + return -ENOMEM; + spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8a63910bfccf..9f90226bc120 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1315,8 +1315,13 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv, false); - pci_save_state(pdev); - mlx5_pci_disable_device(dev); + /* In case of kernel call save the pci state and drain health wq */ + if (state) { + pci_save_state(pdev); + mlx5_drain_health_wq(dev); + mlx5_pci_disable_device(dev); + } + return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5dbda60a09f4..7d9a5d08eb59 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -418,7 +418,10 @@ struct mlx5_core_health { u32 prev; int miss_counter; bool sick; + /* wq spinlock to synchronize draining */ + spinlock_t wq_lock; struct workqueue_struct *wq; + unsigned long flags; struct work_struct work; }; @@ -778,6 +781,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_drain_health_wq(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); -- cgit v1.2.3-59-g8ed1b From 04c0c1ab38e95105d950db5b84e727637e149ce7 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Tue, 25 Oct 2016 18:36:34 +0300 Subject: net/mlx5: PCI error recovery health care simulation In case that the kernel PCI error handlers are not called, we will trigger our own recovery flow. The health work will give priority to the kernel pci error handlers to recover the PCI by waiting for a small period, if the pci error handlers are not triggered the manual recovery flow will be executed. We don't save pci state in case of manual recovery because it will ruin the pci configuration space and we will lose dma sync. Fixes: 89d44f0a6c73 ('net/mlx5_core: Add pci error handlers to mlx5_core driver') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 45 ++++++++++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/main.c | 18 ++++++--- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 1 + include/linux/mlx5/driver.h | 1 + 4 files changed, 56 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 2d00022c92d8..5bcf93422ee0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -61,14 +61,15 @@ enum { enum { MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_DISABLED = 1, - MLX5_NIC_IFC_NO_DRAM_NIC = 2 + MLX5_NIC_IFC_NO_DRAM_NIC = 2, + MLX5_NIC_IFC_INVALID = 3 }; enum { MLX5_DROP_NEW_HEALTH_WORK, }; -static u8 get_nic_interface(struct mlx5_core_dev *dev) +static u8 get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } @@ -101,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev) struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED) + if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) @@ -131,7 +132,7 @@ unlock: static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { - u8 nic_interface = get_nic_interface(dev); + u8 nic_interface = get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: @@ -153,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) mlx5_disable_device(dev); } +static void health_recover(struct work_struct *work) +{ + struct mlx5_core_health *health; + struct delayed_work *dwork; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + u8 nic_state; + + dwork = container_of(work, struct delayed_work, work); + health = container_of(dwork, struct mlx5_core_health, recover_work); + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + + nic_state = get_nic_state(dev); + if (nic_state == MLX5_NIC_IFC_INVALID) { + dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); + return; + } + + dev_err(&dev->pdev->dev, "starting health recovery flow\n"); + mlx5_recover_device(dev); +} + +/* How much time to wait until health resetting the driver (in msecs) */ +#define MLX5_RECOVERY_DELAY_MSECS 60000 static void health_care(struct work_struct *work) { + unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS); struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; @@ -164,6 +191,14 @@ static void health_care(struct work_struct *work) dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev); + + spin_lock(&health->wq_lock); + if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + schedule_delayed_work(&health->recover_work, recover_delay); + else + dev_err(&dev->pdev->dev, + "new health works are not permitted at this stage\n"); + spin_unlock(&health->wq_lock); } static const char *hsynd_str(u8 synd) @@ -316,6 +351,7 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) spin_lock(&health->wq_lock); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); spin_unlock(&health->wq_lock); + cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } @@ -344,6 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) return -ENOMEM; spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); + INIT_DELAYED_WORK(&health->recover_work, health_recover); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 9f90226bc120..d5433c49b2b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1313,6 +1313,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, struct mlx5_priv *priv = &dev->priv; dev_info(&pdev->dev, "%s was called\n", __func__); + mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv, false); /* In case of kernel call save the pci state and drain health wq */ @@ -1378,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -void mlx5_disable_device(struct mlx5_core_dev *dev) -{ - mlx5_pci_err_detected(dev->pdev, 0); -} - static void mlx5_pci_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); @@ -1432,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = { MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); +void mlx5_disable_device(struct mlx5_core_dev *dev) +{ + mlx5_pci_err_detected(dev->pdev, 0); +} + +void mlx5_recover_device(struct mlx5_core_dev *dev) +{ + mlx5_pci_disable_device(dev); + if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) + mlx5_pci_resume(dev->pdev); +} + static struct pci_driver mlx5_core_driver = { .name = DRIVER_NAME, .id_table = mlx5_core_pci_table, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 3d0cfb9f18f9..187662c8ea96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); +void mlx5_recover_device(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev); void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); int mlx5_sriov_attach(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 7d9a5d08eb59..ecc451d89ccd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -423,6 +423,7 @@ struct mlx5_core_health { struct workqueue_struct *wq; unsigned long flags; struct work_struct work; + struct delayed_work recover_work; }; struct mlx5_cq_table { -- cgit v1.2.3-59-g8ed1b From 6b276190c50a12511d889d9079ffb901ff94a822 Mon Sep 17 00:00:00 2001 From: Noa Osherovich Date: Tue, 25 Oct 2016 18:36:35 +0300 Subject: net/mlx5: Avoid passing dma address 0 to firmware Currently the firmware can't work with a page with dma address 0. Passing such an address to the firmware will cause the give_pages command to fail. To avoid this, in case we get a 0 dma address of a page from the dma engine, we avoid passing it to FW by remapping to get an address other than 0. Fixes: bf0bf77f6519 ('mlx5: Support communicating arbitrary host...') Signed-off-by: Noa Osherovich Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 26 +++++++++++++++------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index cc4fd61914d3..a57d5a81eb05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) { struct page *page; + u64 zero_addr = 1; u64 addr; int err; int nid = dev_to_node(&dev->pdev->dev); @@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) mlx5_core_warn(dev, "failed to allocate page\n"); return -ENOMEM; } +map: addr = dma_map_page(&dev->pdev->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->pdev->dev, addr)) { mlx5_core_warn(dev, "failed dma mapping page\n"); err = -ENOMEM; - goto out_alloc; + goto err_mapping; } + + /* Firmware doesn't support page with physical address 0 */ + if (addr == 0) { + zero_addr = addr; + goto map; + } + err = insert_page(dev, addr, page, func_id); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); - goto out_mapping; + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); } - return 0; - -out_mapping: - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); +err_mapping: + if (err) + __free_page(page); -out_alloc: - __free_page(page); + if (zero_addr == 0) + dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); return err; } -- cgit v1.2.3-59-g8ed1b From a4256bc9ec36159e84d710c7c44aaa244a6380a8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 25 Oct 2016 18:16:20 +0200 Subject: IB/mlx4: avoid a -Wmaybe-uninitialize warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is an old warning about mlx4_SW2HW_EQ_wrapper on x86: ethernet/mellanox/mlx4/resource_tracker.c: In function ‘mlx4_SW2HW_EQ_wrapper’: ethernet/mellanox/mlx4/resource_tracker.c:3071:10: error: ‘eq’ may be used uninitialized in this function [-Werror=maybe-uninitialized] The problem here is that gcc won't track the state of the variable across a spin_unlock. Moving the assignment out of the lock is safe here and avoids the warning. Signed-off-by: Arnd Bergmann Reviewed-by: Yishai Hadas Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 84d7857ccc27..c548beaaf910 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_EQ_BUSY; - if (eq) - *eq = r; } } spin_unlock_irq(mlx4_tlock(dev)); + if (!err && eq) + *eq = r; + return err; } -- cgit v1.2.3-59-g8ed1b From 166e6045cacccb3046883a1a4c977f173fec5ccd Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 26 Oct 2016 13:26:38 +0530 Subject: cxgb4: Fix error handling in alloc_uld_rxqs(). Fix to release resources properly in error handling path of alloc_uld_rxqs(), This patch also removes unwanted arguments and avoids calling the same function twice. Fixes: 94cdb8bb993a (cxgb4: Add support for dynamic allocation of resources for ULD Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 40 ++++++++++++-------------- 1 file changed, 18 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 0945fa49a5dd..2471ff465d5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, } static int alloc_uld_rxqs(struct adapter *adap, - struct sge_uld_rxq_info *rxq_info, - unsigned int nq, unsigned int offset, bool lro) + struct sge_uld_rxq_info *rxq_info, bool lro) { struct sge *s = &adap->sge; - struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; - unsigned short *ids = rxq_info->rspq_id + offset; - unsigned int per_chan = nq / adap->params.nports; + unsigned int nq = rxq_info->nrxq + rxq_info->nciq; + struct sge_ofld_rxq *q = rxq_info->uldrxq; + unsigned short *ids = rxq_info->rspq_id; unsigned int bmap_idx = 0; - int i, err, msi_idx; + unsigned int per_chan; + int i, err, msi_idx, que_idx = 0; + + per_chan = rxq_info->nrxq / adap->params.nports; if (adap->flags & USING_MSIX) msi_idx = 1; @@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap, msi_idx = -((int)s->intrq.abs_id + 1); for (i = 0; i < nq; i++, q++) { + if (i == rxq_info->nrxq) { + /* start allocation of concentrator queues */ + per_chan = rxq_info->nciq / adap->params.nports; + que_idx = 0; + } + if (msi_idx >= 0) { bmap_idx = get_msix_idx_from_bmap(adap); msi_idx = adap->msix_info_ulds[bmap_idx].idx; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, - adap->port[i / per_chan], + adap->port[que_idx++ / per_chan], msi_idx, q->fl.size ? &q->fl : NULL, uldrx_handler, @@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap, if (err) goto freeout; if (msi_idx >= 0) - rxq_info->msix_tbl[i + offset] = bmap_idx; + rxq_info->msix_tbl[i] = bmap_idx; memset(&q->stats, 0, sizeof(q->stats)); if (ids) ids[i] = q->rspq.abs_id; } return 0; freeout: - q = rxq_info->uldrxq + offset; + q = rxq_info->uldrxq; for ( ; i; i--, q++) { if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); } - - /* We need to free rxq also in case of ciq allocation failure */ - if (offset) { - q = rxq_info->uldrxq + offset; - for ( ; i; i--, q++) { - if (q->rspq.desc) - free_rspq_fl(adap, &q->rspq, - q->fl.size ? &q->fl : NULL); - } - } return err; } @@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) return -ENOMEM; } - ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && - !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, - rxq_info->nrxq, lro)); + ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); /* Tell uP to route control queue completions to rdma rspq */ if (adap->flags & FULL_INIT_DONE && -- cgit v1.2.3-59-g8ed1b From e934f684856393a0b2aecc7fdd8357a48b79c535 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 26 Oct 2016 08:30:29 -0700 Subject: Revert "hv_netvsc: report vmbus name in ethtool" This reverts commit e3f74b841d48 ("hv_netvsc: report vmbus name in ethtool")' because of problem introduced by commit f9a56e5d6a0ba ("Drivers: hv: make VMBus bus ids persistent"). This changed the format of the vmbus name and this new format is too long to fit in the bus_info field of ethtool. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 4 ---- include/linux/hyperv.h | 7 ------- 2 files changed, 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c71d966d905f..f6382150b16a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -699,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj, static void netvsc_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info)); } static void netvsc_get_channels(struct net_device *net, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6824556d37ed..cd184bdca58f 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); -static inline const char *vmbus_dev_name(const struct hv_device *device_obj) -{ - const struct kobject *kobj = &device_obj->device.kobj; - - return kobj->name; -} - void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, -- cgit v1.2.3-59-g8ed1b From fd33b2447bec7926ceaf4a4b74b68ea2466f2ae0 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Wed, 26 Oct 2016 11:47:02 -0600 Subject: net: mv643xx_eth: Fetch the phy connection type from DT The MAC is capable of RGMII mode and that is probably a more typical connection type than GMII today (eg it is used by Marvell Reference designs for several SOCs). Let DT users specify the standard phy-connection-type = "rgmii-id"; On a phy node. Signed-off-by: Jason Gunthorpe Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- .../devicetree/bindings/net/marvell-orion-net.txt | 1 + drivers/net/ethernet/marvell/mv643xx_eth.c | 23 ++++++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt index bce52b2ec55e..6fd988c84c4f 100644 --- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt +++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt @@ -49,6 +49,7 @@ Optional port properties: and - phy-handle: See ethernet.txt file in the same directory. + - phy-mode: See ethernet.txt file in the same directory. or diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 55831188bc32..bf5cc55ba24c 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2968,6 +2968,22 @@ static void set_params(struct mv643xx_eth_private *mp, mp->txq_count = pd->tx_queue_count ? : 1; } +static int get_phy_mode(struct mv643xx_eth_private *mp) +{ + struct device *dev = mp->dev->dev.parent; + int iface = -1; + + if (dev->of_node) + iface = of_get_phy_mode(dev->of_node); + + /* Historical default if unspecified. We could also read/write + * the interface state in the PSC1 + */ + if (iface < 0) + iface = PHY_INTERFACE_MODE_GMII; + return iface; +} + static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { @@ -2994,7 +3010,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, "orion-mdio-mii", addr); phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, - PHY_INTERFACE_MODE_GMII); + get_phy_mode(mp)); if (!IS_ERR(phydev)) { phy_addr_set(mp, addr); break; @@ -3090,6 +3106,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); mp = netdev_priv(dev); platform_set_drvdata(pdev, mp); @@ -3129,7 +3146,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (pd->phy_node) { mp->phy = of_phy_connect(mp->dev, pd->phy_node, mv643xx_eth_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + get_phy_mode(mp)); if (!mp->phy) err = -ENODEV; else @@ -3187,8 +3204,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->priv_flags |= IFF_UNICAST_FLT; dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; - SET_NETDEV_DEV(dev, &pdev->dev); - if (mp->shared->win_protect) wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); -- cgit v1.2.3-59-g8ed1b From 8d7533e5aaad1c94386a8101a36b0617987966b7 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 26 Oct 2016 13:57:38 -0500 Subject: ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context Schedule these XPORT event tasks in the shared workqueue so that IRQs are not freed in an interrupt context when sub-CRQs are released. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 35 ++++++++++++++++++++++++----------- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 25 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 213162df1a9b..0459d19a5f16 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3232,6 +3232,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->inflight_lock, flags); } +static void ibmvnic_xport_event(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter = container_of(work, + struct ibmvnic_adapter, + ibmvnic_xport); + struct device *dev = &adapter->vdev->dev; + int rc; + + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + if (adapter->migrated) { + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } +} + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3267,15 +3288,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); + schedule_work(&adapter->ibmvnic_xport); } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { dev_info(dev, "Backing device failover detected\n"); netif_carrier_off(netdev); @@ -3284,8 +3297,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); + schedule_work(&adapter->ibmvnic_xport); } return; case IBMVNIC_CRQ_CMD_RSP: @@ -3726,6 +3738,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); + INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); spin_lock_init(&adapter->stats_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 878e2c059f5c..dd775d951b73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1048,5 +1048,6 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + struct work_struct ibmvnic_xport; bool failover; }; -- cgit v1.2.3-59-g8ed1b From aa0c08feae8161b945520ada753d0dfe62b14fe7 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 27 Oct 2016 16:27:13 +0300 Subject: net/mlx4_core: Fix the resource-type enum in res tracker to conform to FW spec The resource type enum in the resource tracker was incorrect. RES_EQ was put in the position of RES_NPORT_ID (a FC resource). Since the remaining resources maintain their current values, and RES_EQ is not passed from slaves to the hypervisor in any FW command, this change affects only the hypervisor. Therefore, there is no backwards-compatibility issue. Fixes: 623ed84b1f95 ("mlx4_core: initial header-file changes for SRIOV support") Signed-off-by: Jack Morgenstein Signed-off-by: Moshe Shemesh Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e4878f31e45d..7aad07644289 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -145,9 +145,10 @@ enum mlx4_resource { RES_MTT, RES_MAC, RES_VLAN, - RES_EQ, + RES_NPORT_ID, RES_COUNTER, RES_FS_RULE, + RES_EQ, MLX4_NUM_OF_RESOURCE_TYPE }; -- cgit v1.2.3-59-g8ed1b From 33a1f8b196dca933313c001866c4df3f3ca11f78 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Thu, 27 Oct 2016 16:27:14 +0300 Subject: net/mlx4_core: Avoid setting ports to auto when only one port type is supported When only one port type is supported, it should be read only. We reject changing requests, even to the auto sense mode. Fixes: 27bf91d6a0d5 ("mlx4_core: Add link type autosensing") Signed-off-by: Maor Gottlieb Signed-off-by: Moshe Shemesh Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7183ac4135d2..6f4e67bc3538 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info, int i; int err = 0; + if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { + mlx4_err(mdev, + "Requested port type for port %d is not supported on this HCA\n", + info->port); + err = -EINVAL; + goto err_sup; + } + mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); info->tmp_type = port_type; @@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); - +err_sup: return err; } -- cgit v1.2.3-59-g8ed1b From 72da2e911f79d4c7132d7431a97d46659ee862be Mon Sep 17 00:00:00 2001 From: Moshe Lazer Date: Thu, 27 Oct 2016 16:27:15 +0300 Subject: net/mlx4_core: Change the default value of enable_qos Change the default status of quality of service back to disabled, as it hurts performance in some cases. Fixes: 38438f7c7e8c ("net/mlx4: Set enhanced QoS support by default when ...") Signed-off-by: Moshe Lazer Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index c41ab31a39f8..84bab9f0732e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -49,9 +49,9 @@ enum { extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); -static bool enable_qos = true; +static bool enable_qos; module_param(enable_qos, bool, 0444); -MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); +MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); #define MLX4_GET(dest, source, offset) \ do { \ -- cgit v1.2.3-59-g8ed1b From 4850cf4581578216468b7b3c3d06cc5abb0a697d Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Thu, 27 Oct 2016 16:27:16 +0300 Subject: net/mlx4_en: Resolve dividing by zero in 32-bit system When doing roundup_pow_of_two for large enough number with bit 31, an overflow will occur and a value equal to 1 will be returned. In this case 1 will be subtracted from the return value and division by zero will be reached. Fixes: 31c128b66e5b ("net/mlx4_en: Choose time-stamping shift value according to HW frequency") Signed-off-by: Eugenia Emantayev Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_clock.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 08fc5fc56d43..a5fc46bbcbe2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq) { u32 freq_khz = freq * 1000; u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; + u64 tmp_rounded = + roundup_pow_of_two(max_val_cycles) > max_val_cycles ? + roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX; u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? - max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; + max_val_cycles : tmp_rounded; /* calculate max possible multiplier in order to fit in 64bit */ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); -- cgit v1.2.3-59-g8ed1b From 8d59de8f7bb3db296331c665779c653b0c8d13ba Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 27 Oct 2016 16:27:17 +0300 Subject: net/mlx4_en: Process all completions in RX rings after port goes up Currently there is a race between incoming traffic and initialization flow. HW is able to receive the packets after INIT_PORT is done and unicast steering is configured. Before we set priv->port_up NAPI is not scheduled and receive queues become full. Therefore we never get new interrupts about the completions. This issue could happen if running heavy traffic during bringing port up. The resolution is to schedule NAPI once port_up is set. If receive queues were full this will process all cqes and release them. Fixes: c27a02cd94d6 ("mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC") Signed-off-by: Erez Shitrit Signed-off-by: Eugenia Emantayev Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7e703bed7b82..e25c11dff525 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev) udp_tunnel_get_rx_info(dev); priv->port_up = true; + + /* Process all completions if exist to prevent + * the queues freezing if they are full + */ + for (i = 0; i < priv->rx_ring_num; i++) + napi_schedule(&priv->rx_cq[i]->napi); + netif_tx_start_all_queues(dev); netif_device_attach(dev); -- cgit v1.2.3-59-g8ed1b From 9d2afba058722d40cc02f430229c91611c0e8d16 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Thu, 27 Oct 2016 16:27:18 +0300 Subject: net/mlx4_en: Fix panic during reboot Fix a kernel panic that occurs as a result of an asynchronous event handled in roce_gid_mgmt: mlx4_en_get_drvinfo is called and accesses freed resources. This happens in a shutdown flow only, since pci device is destroyed while netdevice is still alive. Fixes: c27a02cd94d6 ("mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC") Signed-off-by: Eugenia Emantayev Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e25c11dff525..314f54c8dbed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2201,6 +2201,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) if (!shutdown) free_netdev(dev); + dev->ethtool_ops = NULL; } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) -- cgit v1.2.3-59-g8ed1b From 81d184199e328fdad5633da139a10337327154e0 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 27 Oct 2016 16:27:19 +0300 Subject: net/mlx4_core: Do not access comm channel if it has not yet been initialized In the Hypervisor, there are several FW commands which are invoked before the comm channel is initialized (in mlx4_multi_func_init). These include MOD_STAT_CONFIG, QUERY_DEV_CAP, INIT_HCA, and others. If any of these commands fails, say with a timeout, the Hypervisor driver enters the internal error reset flow. In this flow, the driver attempts to notify all slaves via the comm channel that an internal error has occurred. Since the comm channel has not yet been initialized (i.e., mapped via ioremap), this will cause dereferencing a NULL pointer. To fix this, do not access the comm channel in the internal error flow if it has not yet been initialized. Fixes: 55ad359225b2 ("net/mlx4_core: Enable device recovery flow with SRIOV") Fixes: ab9c17a009ee ("mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet") Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b1cef7a0f7ca..e36bebcab3f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2469,6 +2469,7 @@ err_comm_admin: kfree(priv->mfunc.master.slave_state); err_comm: iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; err_vhcr: dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, priv->mfunc.vhcr, @@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) int slave; u32 slave_read; + /* If the comm channel has not yet been initialized, + * skip reporting the internal error event to all + * the communication channels. + */ + if (!priv->mfunc.comm) + return; + /* Report an internal error event to all * communication channels. */ @@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) } iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; } void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) -- cgit v1.2.3-59-g8ed1b From 6f2e0d2c3bf0f8d322ab7516c57340c7189cca02 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Thu, 27 Oct 2016 16:27:20 +0300 Subject: net/mlx4: Fix firmware command timeout during interrupt test Currently interrupt test that is part of ethtool selftest runs the check over all interrupt vectors of the device. In mlx4_en package part of interrupt vectors are uninitialized since mlx4_ib doesn't exist. This causes NOP FW command to time out. Change logic to test current port interrupt vectors only. Signed-off-by: Eugenia Emantayev Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_selftest.c | 26 +++++++++- drivers/net/ethernet/mellanox/mlx4/eq.c | 62 +++++++++++------------- include/linux/mlx4/device.h | 3 +- 3 files changed, 55 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index b66e03d9711f..c06346a82496 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit: return !loopback_ok; } +static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + int i = 0; + + err = mlx4_test_async(mdev->dev); + /* When not in MSI_X or slave, test only async */ + if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev)) + return err; + + /* A loop over all completion vectors of current port, + * for each vector check whether it works by mapping command + * completions to that vector and performing a NOP command + */ + for (i = 0; i < priv->rx_ring_num; i++) { + err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector); + if (err) + break; + } + + return err; +} static int mlx4_en_test_link(struct mlx4_en_priv *priv) { @@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); @@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) netif_carrier_on(dev); } - buf[0] = mlx4_test_interrupts(mdev->dev); + buf[0] = mlx4_en_test_interrupts(priv); buf[1] = mlx4_en_test_link(priv); buf[2] = mlx4_en_test_speed(priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cf8f8a72a801..cd3638e6fe25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) kfree(priv->eq_table.uar_map); } -/* A test that verifies that we can accept interrupts on all - * the irq vectors of the device. +/* A test that verifies that we can accept interrupts + * on the vector allocated for asynchronous events + */ +int mlx4_test_async(struct mlx4_dev *dev) +{ + return mlx4_NOP(dev); +} +EXPORT_SYMBOL(mlx4_test_async); + +/* A test that verifies that we can accept interrupts + * on the given irq vector of the tested port. * Interrupts are checked using the NOP command. */ -int mlx4_test_interrupts(struct mlx4_dev *dev) +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) { struct mlx4_priv *priv = mlx4_priv(dev); - int i; int err; - err = mlx4_NOP(dev); - /* When not in MSI_X, there is only one irq to check */ - if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) - return err; - - /* A loop over all completion vectors, for each vector we will check - * whether it works by mapping command completions to that vector - * and performing a NOP command - */ - for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { - /* Make sure request_irq was called */ - if (!priv->eq_table.eq[i].have_irq) - continue; - - /* Temporary use polling for command completions */ - mlx4_cmd_use_polling(dev); - - /* Map the new eq to handle all asynchronous events */ - err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[i].eqn); - if (err) { - mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); - mlx4_cmd_use_events(dev); - break; - } + /* Temporary use polling for command completions */ + mlx4_cmd_use_polling(dev); - /* Go back to using events */ - mlx4_cmd_use_events(dev); - err = mlx4_NOP(dev); + /* Map the new eq to handle all asynchronous events */ + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); + if (err) { + mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); + goto out; } + /* Go back to using events */ + mlx4_cmd_use_events(dev); + err = mlx4_NOP(dev); + /* Return to default */ + mlx4_cmd_use_polling(dev); +out: mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + mlx4_cmd_use_events(dev); + return err; } -EXPORT_SYMBOL(mlx4_test_interrupts); +EXPORT_SYMBOL(mlx4_test_interrupt); bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f6a164297358..3be7abd6e722 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); -int mlx4_test_interrupts(struct mlx4_dev *dev); +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); +int mlx4_test_async(struct mlx4_dev *dev); int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, const u32 offset[], u32 value[], size_t array_len, u8 port); -- cgit v1.2.3-59-g8ed1b From d2582a03939ed0a80ffcd3ea5345505bc8067c54 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 27 Oct 2016 16:27:21 +0300 Subject: net/mlx4_en: Fix potential deadlock in port statistics flow mlx4_en_DUMP_ETH_STATS took the *counter mutex* and then called the FW command, with WRAPPED attribute. As a result, the fw command is wrapped on the Hypervisor when it calls mlx4_en_DUMP_ETH_STATS. The FW command wrapper flow on the hypervisor takes the *slave_cmd_mutex* during processing. At the same time, a VF could be in the process of coming up, and could call mlx4_QUERY_FUNC_CAP. On the hypervisor, the command flow takes the *slave_cmd_mutex*, then executes mlx4_QUERY_FUNC_CAP_wrapper. mlx4_QUERY_FUNC_CAP wrapper calls mlx4_get_default_counter_index(), which takes the *counter mutex*. DEADLOCK. The fix is that the DUMP_ETH_STATS fw command should be called with the NATIVE attribute, so that on the hypervisor, this command does not enter the wrapper flow. Since the Hypervisor no longer goes through the wrapper code, we also simply return 0 in mlx4_DUMP_ETH_STATS_wrapper (i.e.the function succeeds, but the returned data will be all zeroes). No need to test if it is the Hypervisor going through the wrapper. Fixes: f9baff509f8a ("mlx4_core: Add "native" argument to mlx4_cmd ...") Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_port.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 -- drivers/net/ethernet/mellanox/mlx4/port.c | 13 +------------ 3 files changed, 3 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 5aa8b751f417..59473a0ebcdf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) return PTR_ERR(mailbox); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_NATIVE); if (err) goto out; @@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 0, MLX4_CMD_DUMP_ETH_STATS, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 7aad07644289..88ee7d8a5923 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1330,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd); int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, int port, void *buf); -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, - struct mlx4_cmd_mailbox *outbox); int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c5b2064297a1..b656dd5772e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, return err; } -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, - u32 in_mod, struct mlx4_cmd_mailbox *outbox) -{ - return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); -} - int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { - if (slave != dev->caps.function) - return 0; - return mlx4_common_dump_eth_stats(dev, slave, - vhcr->in_modifier, outbox); + return 0; } int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, -- cgit v1.2.3-59-g8ed1b From eb4b678825992aa434d32b2f615d2090281e0f88 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 27 Oct 2016 16:27:22 +0300 Subject: net/mlx4_en: Save slave ethtool stats command Following the previous patch, as an optimization, the slave will not even bother sending the DUMP_ETH_STATS command over the comm channel. Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 314f54c8dbed..12c99a2655f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1917,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev) struct mlx4_en_dev *mdev = priv->mdev; int i; - if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) - en_dbg(HW, priv, "Failed dumping statistics\n"); + if (!mlx4_is_slave(mdev->dev)) + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) + en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); -- cgit v1.2.3-59-g8ed1b From dbc34e73c2bee4ff66c3a6b0ea5d65c25a6b6994 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 29 Oct 2016 17:18:17 -0400 Subject: Revert "ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context" This reverts commit 8d7533e5aaad1c94386a8101a36b0617987966b7. It introduced kbuild failures, new version coming. Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 35 +++++++++++------------------------ drivers/net/ethernet/ibm/ibmvnic.h | 1 - 2 files changed, 11 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0459d19a5f16..213162df1a9b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3232,27 +3232,6 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->inflight_lock, flags); } -static void ibmvnic_xport_event(struct work_struct *work) -{ - struct ibmvnic_adapter *adapter = container_of(work, - struct ibmvnic_adapter, - ibmvnic_xport); - struct device *dev = &adapter->vdev->dev; - int rc; - - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - if (adapter->migrated) { - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); - } -} - static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3288,7 +3267,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - schedule_work(&adapter->ibmvnic_xport); + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { dev_info(dev, "Backing device failover detected\n"); netif_carrier_off(netdev); @@ -3297,7 +3284,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - schedule_work(&adapter->ibmvnic_xport); + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); } return; case IBMVNIC_CRQ_CMD_RSP: @@ -3738,7 +3726,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); - INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); spin_lock_init(&adapter->stats_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index dd775d951b73..878e2c059f5c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1048,6 +1048,5 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; - struct work_struct ibmvnic_xport; bool failover; }; -- cgit v1.2.3-59-g8ed1b From 9888d7b02c7793cbbcbdd05dd9e14cc0e78d1db7 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Thu, 27 Oct 2016 12:28:51 -0500 Subject: ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context Schedule these XPORT event tasks in the shared workqueue so that IRQs are not freed in an interrupt context when sub-CRQs are released. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 35 ++++++++++++++++++++++++----------- drivers/net/ethernet/ibm/ibmvnic.h | 1 + 2 files changed, 25 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 213162df1a9b..a922fb94fceb 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3232,6 +3232,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->inflight_lock, flags); } +static void ibmvnic_xport_event(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter = container_of(work, + struct ibmvnic_adapter, + ibmvnic_xport); + struct device *dev = &adapter->vdev->dev; + long rc; + + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + if (adapter->migrated) { + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } +} + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3267,15 +3288,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); + schedule_work(&adapter->ibmvnic_xport); } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { dev_info(dev, "Backing device failover detected\n"); netif_carrier_off(netdev); @@ -3284,8 +3297,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); + schedule_work(&adapter->ibmvnic_xport); } return; case IBMVNIC_CRQ_CMD_RSP: @@ -3726,6 +3738,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); + INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); spin_lock_init(&adapter->stats_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 878e2c059f5c..dd775d951b73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1048,5 +1048,6 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + struct work_struct ibmvnic_xport; bool failover; }; -- cgit v1.2.3-59-g8ed1b From 8bf371e6adff29758cc3c57c17df4486513081f8 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Thu, 27 Oct 2016 12:28:52 -0500 Subject: ibmvnic: Fix missing brackets in init_sub_crq_irqs Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a922fb94fceb..5f44c5520fbc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1461,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) return rc; req_rx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } i = adapter->req_tx_queues; req_tx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } release_sub_crqs_no_irqs(adapter); return rc; } -- cgit v1.2.3-59-g8ed1b From 9fe1c98ac90023842ae7cd921badfa1029e45bd1 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Thu, 27 Oct 2016 16:01:03 -0700 Subject: enic: fix rq disable When MTU is changed from 9000 to 1500 while there is burst of inbound 9000 bytes packets, adaptor sometimes delivers 9000 bytes packets to 1500 bytes buffers. This causes memory corruption and sometimes crash. This is because of a race condition in adaptor between "RQ disable" clearing descriptor mini-cache and mini-cache valid bit being set by completion of descriptor fetch. This can result in stale RQ desc being cached and used when packets arrive. In this case, the stale descriptor have old MTU value. Solution is to write RQ->disable twice. The first write will stop any further desc fetches, allowing the second disable to clear the mini-cache valid bit without danger of a race. Also, the check for rq->running becoming 0 after writing rq->enable to 0 is not done properly. When incoming packets are flooding the interface, rq->running will pulse high for each dropped packet. Since the driver was waiting for 10us between each poll, it is possible to see rq->running = 1 1000 times in a row, even though it is not actually stuck running. This results in false failure of vnic_rq_disable(). Fix is to try more than 1000 time without delay between polls to ensure we do not miss when running goes low. In old adaptors rq->enable needs to be re-written to 0 when posted_index is reset in vnic_rq_clean() in order to keep rq->prefetch_index in sync. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/vnic_rq.c | 32 ++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index e572a527b18d..36bc2c71fba9 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; struct vnic_dev *vdev = rq->vdev; + int i; - iowrite32(0, &rq->ctrl->enable); + /* Due to a race condition with clearing RQ "mini-cache" in hw, we need + * to disable the RQ twice to guarantee that stale descriptors are not + * used when this RQ is re-enabled. + */ + for (i = 0; i < 2; i++) { + iowrite32(0, &rq->ctrl->enable); - /* Wait for HW to ACK disable request */ - for (wait = 0; wait < 1000; wait++) { - if (!(ioread32(&rq->ctrl->running))) - return 0; - udelay(10); - } + /* Wait for HW to ACK disable request */ + for (wait = 20000; wait > 0; wait--) + if (!ioread32(&rq->ctrl->running)) + break; + if (!wait) { + vdev_neterr(vdev, "Failed to disable RQ[%d]\n", + rq->index); - vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index); + return -ETIMEDOUT; + } + } - return -ETIMEDOUT; + return 0; } void vnic_rq_clean(struct vnic_rq *rq, @@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq, [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; iowrite32(fetch_index, &rq->ctrl->posted_index); + /* Anytime we write fetch_index, we need to re-write 0 to rq->enable + * to re-sync internal VIC state. + */ + iowrite32(0, &rq->ctrl->enable); + vnic_dev_clear_desc_ring(&rq->ring); } -- cgit v1.2.3-59-g8ed1b From 3034783472f5353f71af44ed52ad9ee65f9f6d17 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Fri, 28 Oct 2016 12:40:20 +0300 Subject: net: phy: dp83848: add dp83822 PHY support This PHY has a compatible register set with DP83848x so add support for it. Acked-by: Andrew F. Davis Signed-off-by: Roger Quadros Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 03d54c4adc88..800b39f06279 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -19,6 +19,7 @@ #define TI_DP83848C_PHY_ID 0x20005ca0 #define NS_DP83848C_PHY_ID 0x20005c90 #define TLK10X_PHY_ID 0x2000a210 +#define TI_DP83822_PHY_ID 0x2000a240 /* Registers */ #define DP83848_MICR 0x11 /* MII Interrupt Control Register */ @@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = { { TI_DP83848C_PHY_ID, 0xfffffff0 }, { NS_DP83848C_PHY_ID, 0xfffffff0 }, { TLK10X_PHY_ID, 0xfffffff0 }, + { TI_DP83822_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, dp83848_tbl); @@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); -- cgit v1.2.3-59-g8ed1b From 087892d29b75c025086d99b29d385a3dac0169fc Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sat, 29 Oct 2016 17:04:35 +0300 Subject: qede: Fix out-of-bound fastpath memory access Driver allocates a shadow array for transmitted SKBs with X entries; That means valid indices are {0,...,X - 1}. [X == 8191] Problem is the driver also uses X as a mask for a producer/consumer in order to choose the right entry in the array which allows access to entry X which is out of bounds. To fix this, simply allocate X + 1 entries in the shadow array. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 444b271059b2..7def29aaf65c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2940,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) txq->num_tx_buffers = edev->q_num_tx_buffers; /* Allocate the parallel driver ring for Tx buffers */ - size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; + size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE; txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); if (!txq->sw_tx_ring) { DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); @@ -2951,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - NUM_TX_BDS_MAX, + TX_RING_SIZE, sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err; -- cgit v1.2.3-59-g8ed1b From c6fcc4fc5f8b592600c7409e769ab68da0fb1eca Mon Sep 17 00:00:00 2001 From: pravin shelar Date: Fri, 28 Oct 2016 09:59:15 -0700 Subject: vxlan: avoid using stale vxlan socket. When vxlan device is closed vxlan socket is freed. This operation can race with vxlan-xmit function which dereferences vxlan socket. Following patch uses RCU mechanism to avoid this situation. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 80 +++++++++++++++++++++++++++++++++-------------------- include/net/vxlan.h | 4 +-- 2 files changed, 52 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c1639a3e95a4..f3c2fa3ab0d5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev, static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) { struct vxlan_dev *vxlan; + struct vxlan_sock *sock4; + struct vxlan_sock *sock6 = NULL; unsigned short family = dev->default_dst.remote_ip.sa.sa_family; + sock4 = rtnl_dereference(dev->vn4_sock); + /* The vxlan_sock is only used by dev, leaving group has * no effect on other vxlan devices. */ - if (family == AF_INET && dev->vn4_sock && - atomic_read(&dev->vn4_sock->refcnt) == 1) + if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1) return false; #if IS_ENABLED(CONFIG_IPV6) - if (family == AF_INET6 && dev->vn6_sock && - atomic_read(&dev->vn6_sock->refcnt) == 1) + sock6 = rtnl_dereference(dev->vn6_sock); + if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1) return false; #endif @@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) if (!netif_running(vxlan->dev) || vxlan == dev) continue; - if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) + if (family == AF_INET && + rtnl_dereference(vxlan->vn4_sock) != sock4) continue; #if IS_ENABLED(CONFIG_IPV6) - if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) + if (family == AF_INET6 && + rtnl_dereference(vxlan->vn6_sock) != sock6) continue; #endif @@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) static void vxlan_sock_release(struct vxlan_dev *vxlan) { - bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); #if IS_ENABLED(CONFIG_IPV6) - bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + rcu_assign_pointer(vxlan->vn6_sock, NULL); #endif + rcu_assign_pointer(vxlan->vn4_sock, NULL); synchronize_net(); - if (ipv4) { - udp_tunnel_sock_release(vxlan->vn4_sock->sock); - kfree(vxlan->vn4_sock); + if (__vxlan_sock_release_prep(sock4)) { + udp_tunnel_sock_release(sock4->sock); + kfree(sock4); } #if IS_ENABLED(CONFIG_IPV6) - if (ipv6) { - udp_tunnel_sock_release(vxlan->vn6_sock->sock); - kfree(vxlan->vn6_sock); + if (__vxlan_sock_release_prep(sock6)) { + udp_tunnel_sock_release(sock6->sock); + kfree(sock6); } #endif } @@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan) int ret = -EINVAL; if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, }; - sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk; lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); #if IS_ENABLED(CONFIG_IPV6) } else { - sk = vxlan->vn6_sock->sock->sk; + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; lock_sock(sk); ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, &ip->sin6.sin6_addr); @@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) int ret = -EINVAL; if (ip->sa.sa_family == AF_INET) { + struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, }; - sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk; lock_sock(sk); ret = ip_mc_leave_group(sk, &mreq); release_sock(sk); #if IS_ENABLED(CONFIG_IPV6) } else { - sk = vxlan->vn6_sock->sock->sk; + struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); + + sk = sock6->sock->sk; lock_sock(sk); ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, &ip->sin6.sin6_addr); @@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { + struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; int err; + if (!sock6) + return ERR_PTR(-EIO); + if (tos && !info) use_cache = false; if (use_cache) { @@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowi6_proto = IPPROTO_UDP; err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - vxlan->vn6_sock->sock->sk, + sock6->sock->sk, &ndst, &fl6); if (err < 0) return ERR_PTR(err); @@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { - if (!vxlan->vn4_sock) + struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); + + if (!sock4) goto drop; - sk = vxlan->vn4_sock->sock->sk; + sk = sock4->sock->sk; rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, @@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { + struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct dst_entry *ndst; u32 rt6i_flags; - if (!vxlan->vn6_sock) + if (!sock6) goto drop; - sk = vxlan->vn6_sock->sock->sk; + sk = sock6->sock->sk; ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, @@ -2415,9 +2436,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) dport = info->key.tp_dst ? : vxlan->cfg.dst_port; if (ip_tunnel_info_af(info) == AF_INET) { + struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; - if (!vxlan->vn4_sock) + if (!sock4) return -EINVAL; rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, info->key.u.ipv4.dst, @@ -2429,8 +2451,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) struct dst_entry *ndst; - if (!vxlan->vn6_sock) - return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, &info->key.u.ipv6.src, NULL, info); @@ -2740,10 +2760,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) return PTR_ERR(vs); #if IS_ENABLED(CONFIG_IPV6) if (ipv6) - vxlan->vn6_sock = vs; + rcu_assign_pointer(vxlan->vn6_sock, vs); else #endif - vxlan->vn4_sock = vs; + rcu_assign_pointer(vxlan->vn4_sock, vs); vxlan_vs_add_dev(vs, vxlan); return 0; } @@ -2754,9 +2774,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; int ret = 0; - vxlan->vn4_sock = NULL; + RCU_INIT_POINTER(vxlan->vn4_sock, NULL); #if IS_ENABLED(CONFIG_IPV6) - vxlan->vn6_sock = NULL; + RCU_INIT_POINTER(vxlan->vn6_sock, NULL); if (ipv6 || metadata) ret = __vxlan_sock_add(vxlan, true); #endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 0255613a54a4..308adc4154f4 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -225,9 +225,9 @@ struct vxlan_config { struct vxlan_dev { struct hlist_node hlist; /* vni hash table */ struct list_head next; /* vxlan's per namespace list */ - struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */ + struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ #if IS_ENABLED(CONFIG_IPV6) - struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */ + struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */ #endif struct net_device *dev; struct net *net; /* netns for packet i/o */ -- cgit v1.2.3-59-g8ed1b From fceb9c3e38252992bbf1a3028cc2f7b871211533 Mon Sep 17 00:00:00 2001 From: pravin shelar Date: Fri, 28 Oct 2016 09:59:16 -0700 Subject: geneve: avoid using stale geneve socket. This patch is similar to earlier vxlan patch. Geneve device close operation frees geneve socket. This operation can race with geneve-xmit function which dereferences geneve socket. Following patch uses RCU mechanism to avoid this situation. Signed-off-by: Pravin B Shelar Acked-by: John W. Linville Signed-off-by: David S. Miller --- drivers/net/geneve.c | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 16af1ce99233..42edd7b7902f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -58,9 +58,9 @@ struct geneve_dev { struct hlist_node hlist; /* vni hash table */ struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ - struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */ + struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ #if IS_ENABLED(CONFIG_IPV6) - struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */ + struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ #endif u8 vni[3]; /* virtual network ID for tunnel */ u8 ttl; /* TTL override */ @@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs) static void geneve_sock_release(struct geneve_dev *geneve) { - __geneve_sock_release(geneve->sock4); + struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); #if IS_ENABLED(CONFIG_IPV6) - __geneve_sock_release(geneve->sock6); + struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); + + rcu_assign_pointer(geneve->sock6, NULL); +#endif + + rcu_assign_pointer(geneve->sock4, NULL); + synchronize_net(); + + __geneve_sock_release(gs4); +#if IS_ENABLED(CONFIG_IPV6) + __geneve_sock_release(gs6); #endif } @@ -586,10 +596,10 @@ out: gs->flags = geneve->flags; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) - geneve->sock6 = gs; + rcu_assign_pointer(geneve->sock6, gs); else #endif - geneve->sock4 = gs; + rcu_assign_pointer(geneve->sock4, gs); hash = geneve_net_vni_hash(geneve->vni); hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); @@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev) bool metadata = geneve->collect_md; int ret = 0; - geneve->sock4 = NULL; #if IS_ENABLED(CONFIG_IPV6) - geneve->sock6 = NULL; if (ipv6 || metadata) ret = geneve_sock_add(geneve, true); #endif @@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct rtable *rt = NULL; __u8 tos; + if (!rcu_dereference(geneve->sock4)) + return ERR_PTR(-EIO); + memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_mark = skb->mark; fl4->flowi4_proto = IPPROTO_UDP; @@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; struct dst_cache *dst_cache; + struct geneve_sock *gs6; __u8 prio; + gs6 = rcu_dereference(geneve->sock6); + if (!gs6) + return ERR_PTR(-EIO); + memset(fl6, 0, sizeof(*fl6)); fl6->flowi6_mark = skb->mark; fl6->flowi6_proto = IPPROTO_UDP; @@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs4 = geneve->sock4; + struct geneve_sock *gs4; struct rtable *rt = NULL; const struct iphdr *iip; /* interior IP header */ int err = -EINVAL; @@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); u32 flags = geneve->flags; + gs4 = rcu_dereference(geneve->sock4); + if (!gs4) + goto tx_error; + if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); @@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); - struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; const struct iphdr *iip; /* interior IP header */ + struct geneve_sock *gs6; int err = -EINVAL; struct flowi6 fl6; __u8 prio, ttl; @@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); u32 flags = geneve->flags; + gs6 = rcu_dereference(geneve->sock6); + if (!gs6) + goto tx_error; + if (geneve->collect_md) { if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); -- cgit v1.2.3-59-g8ed1b