diff options
Diffstat (limited to 'drivers/fpga')
28 files changed, 2239 insertions, 317 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index c20445b867ae..72380e1d31c7 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only # # FPGA framework configuration # @@ -25,9 +26,9 @@ config FPGA_MGR_SOCFPGA_A10 FPGA manager driver support for Altera Arria10 SoCFPGA. config ALTERA_PR_IP_CORE - tristate "Altera Partial Reconfiguration IP Core" - help - Core driver support for Altera Partial Reconfiguration IP component + tristate "Altera Partial Reconfiguration IP Core" + help + Core driver support for Altera Partial Reconfiguration IP component config ALTERA_PR_IP_CORE_PLAT tristate "Platform support of Altera Partial Reconfiguration IP Core" @@ -39,16 +40,17 @@ config ALTERA_PR_IP_CORE_PLAT config FPGA_MGR_ALTERA_PS_SPI tristate "Altera FPGA Passive Serial over SPI" depends on SPI + select BITREVERSE help FPGA manager driver support for Altera Arria/Cyclone/Stratix using the passive serial interface over SPI. config FPGA_MGR_ALTERA_CVP - tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager" + tristate "Altera CvP FPGA Manager" depends on PCI help - FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V - and Arria 10 Altera FPGAs using the CvP interface over PCIe. + FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V, + Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe. config FPGA_MGR_ZYNQ_FPGA tristate "Xilinx Zynq FPGA" @@ -154,7 +156,7 @@ config FPGA_DFL config FPGA_DFL_FME tristate "FPGA DFL FME Driver" - depends on FPGA_DFL + depends on FPGA_DFL && HWMON help The FPGA Management Engine (FME) is a feature device implemented under Device Feature List (DFL) framework. Select this option to @@ -204,4 +206,13 @@ config FPGA_DFL_PCI To compile this as a module, choose M here. +config FPGA_MGR_ZYNQMP_FPGA + tristate "Xilinx ZynqMP FPGA" + depends on ARCH_ZYNQMP || COMPILE_TEST + help + FPGA manager driver support for Xilinx ZynqMP FPGAs. + This driver uses the processor configuration port(PCAP) + to configure the programmable logic(PL) through PS + on ZynqMP SoC. + endif # FPGA diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index c0dd4c82fbdb..4865b74b00a4 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o +obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o @@ -38,8 +39,9 @@ obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o -dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o +dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o +dfl-afu-objs += dfl-afu-error.o # Drivers for FPGAs which implement DFL obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 35c3aa5792e2..4e0edb60bfba 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP * @@ -5,15 +6,6 @@ * * Anatolij Gustschin <agust@denx.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Manage Altera FPGA firmware using PCIe CvP. * Firmware must be in binary "rbf" format. */ @@ -30,10 +22,10 @@ #define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */ /* Vendor Specific Extended Capability Registers */ -#define VSE_PCIE_EXT_CAP_ID 0x200 +#define VSE_PCIE_EXT_CAP_ID 0x0 #define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */ -#define VSE_CVP_STATUS 0x21c /* 32bit */ +#define VSE_CVP_STATUS 0x1c /* 32bit */ #define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */ #define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */ #define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */ @@ -41,41 +33,93 @@ #define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */ #define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */ -#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */ +#define VSE_CVP_MODE_CTRL 0x20 /* 32bit */ #define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */ #define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */ #define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */ #define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8) -#define VSE_CVP_DATA 0x228 /* 32bit */ -#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */ +#define VSE_CVP_DATA 0x28 /* 32bit */ +#define VSE_CVP_PROG_CTRL 0x2c /* 32bit */ #define VSE_CVP_PROG_CTRL_CONFIG BIT(0) #define VSE_CVP_PROG_CTRL_START_XFER BIT(1) +#define VSE_CVP_PROG_CTRL_MASK GENMASK(1, 0) -#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */ +#define VSE_UNCOR_ERR_STATUS 0x34 /* 32bit */ #define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */ +#define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */ +/* V2 Defines */ +#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */ + +#define V2_CREDIT_TIMEOUT_US 20000 +#define V2_CHECK_CREDIT_US 10 +#define V2_POLL_TIMEOUT_US 1000000 +#define V2_USER_TIMEOUT_US 500000 + +#define V1_POLL_TIMEOUT_US 10 + #define DRV_NAME "altera-cvp" #define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager" +/* Write block sizes */ +#define ALTERA_CVP_V1_SIZE 4 +#define ALTERA_CVP_V2_SIZE 4096 + /* Optional CvP config error status check for debugging */ static bool altera_cvp_chkcfg; +struct cvp_priv; + struct altera_cvp_conf { struct fpga_manager *mgr; struct pci_dev *pci_dev; void __iomem *map; - void (*write_data)(struct altera_cvp_conf *, u32); + void (*write_data)(struct altera_cvp_conf *conf, + u32 data); char mgr_name[64]; u8 numclks; + u32 sent_packets; + u32 vsec_offset; + const struct cvp_priv *priv; }; +struct cvp_priv { + void (*switch_clk)(struct altera_cvp_conf *conf); + int (*clear_state)(struct altera_cvp_conf *conf); + int (*wait_credit)(struct fpga_manager *mgr, u32 blocks); + size_t block_size; + int poll_time_us; + int user_time_us; +}; + +static int altera_read_config_byte(struct altera_cvp_conf *conf, + int where, u8 *val) +{ + return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where, + val); +} + +static int altera_read_config_dword(struct altera_cvp_conf *conf, + int where, u32 *val) +{ + return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where, + val); +} + +static int altera_write_config_dword(struct altera_cvp_conf *conf, + int where, u32 val) +{ + return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where, + val); +} + static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr) { struct altera_cvp_conf *conf = mgr->priv; u32 status; - pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status); + altera_read_config_dword(conf, VSE_CVP_STATUS, &status); if (status & VSE_CVP_STATUS_CFG_DONE) return FPGA_MGR_STATE_OPERATING; @@ -93,7 +137,8 @@ static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val) static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val) { - pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val); + pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA, + val); } /* switches between CvP clock and internal clock */ @@ -103,10 +148,10 @@ static void altera_cvp_dummy_write(struct altera_cvp_conf *conf) u32 val; /* set 1 CVP clock cycle for every CVP Data Register Write */ - pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; - pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); for (i = 0; i < CVP_DUMMY_WR; i++) conf->write_data(conf, 0); /* dummy data, could be any value */ @@ -123,7 +168,7 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask, retries++; do { - pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val); + altera_read_config_dword(conf, VSE_CVP_STATUS, &val); if ((val & status_mask) == status_val) return 0; @@ -134,32 +179,136 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask, return -ETIMEDOUT; } +static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes) +{ + struct altera_cvp_conf *conf = mgr->priv; + u32 val; + int ret; + + /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */ + ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val); + if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) { + dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n", + bytes); + return -EPROTO; + } + return 0; +} + +/* + * CvP Version2 Functions + * Recent Intel FPGAs use a credit mechanism to throttle incoming + * bitstreams and a different method of clearing the state. + */ + +static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf) +{ + u32 val; + int ret; + + /* Clear the START_XFER and CVP_CONFIG bits */ + ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val); + if (ret) { + dev_err(&conf->pci_dev->dev, + "Error reading CVP Program Control Register\n"); + return ret; + } + + val &= ~VSE_CVP_PROG_CTRL_MASK; + ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); + if (ret) { + dev_err(&conf->pci_dev->dev, + "Error writing CVP Program Control Register\n"); + return ret; + } + + return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, + conf->priv->poll_time_us); +} + +static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr, + u32 blocks) +{ + u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US; + struct altera_cvp_conf *conf = mgr->priv; + int ret; + u8 val; + + do { + ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val); + if (ret) { + dev_err(&conf->pci_dev->dev, + "Error reading CVP Credit Register\n"); + return ret; + } + + /* Return if there is space in FIFO */ + if (val - (u8)conf->sent_packets) + return 0; + + ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE); + if (ret) { + dev_err(&conf->pci_dev->dev, + "CE Bit error credit reg[0x%x]:sent[0x%x]\n", + val, conf->sent_packets); + return -EAGAIN; + } + + /* Limit the check credit byte traffic */ + usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1); + } while (timeout--); + + dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n"); + return -ETIMEDOUT; +} + +static int altera_cvp_send_block(struct altera_cvp_conf *conf, + const u32 *data, size_t len) +{ + u32 mask, words = len / sizeof(u32); + int i, remainder; + + for (i = 0; i < words; i++) + conf->write_data(conf, *data++); + + /* write up to 3 trailing bytes, if any */ + remainder = len % sizeof(u32); + if (remainder) { + mask = BIT(remainder * 8) - 1; + if (mask) + conf->write_data(conf, *data & mask); + } + + return 0; +} + static int altera_cvp_teardown(struct fpga_manager *mgr, struct fpga_image_info *info) { struct altera_cvp_conf *conf = mgr->priv; - struct pci_dev *pdev = conf->pci_dev; int ret; u32 val; /* STEP 12 - reset START_XFER bit */ - pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val); val &= ~VSE_CVP_PROG_CTRL_START_XFER; - pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); /* STEP 13 - reset CVP_CONFIG bit */ val &= ~VSE_CVP_PROG_CTRL_CONFIG; - pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); /* * STEP 14 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy * writes to the HIP */ - altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */ + if (conf->priv->switch_clk) + conf->priv->switch_clk(conf); /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */ - ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10); + ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, + conf->priv->poll_time_us); if (ret) dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n"); @@ -171,7 +320,6 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, const char *buf, size_t count) { struct altera_cvp_conf *conf = mgr->priv; - struct pci_dev *pdev = conf->pci_dev; u32 iflags, val; int ret; @@ -191,7 +339,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, conf->numclks = 1; /* for uncompressed and unencrypted images */ /* STEP 1 - read CVP status and check CVP_EN flag */ - pci_read_config_dword(pdev, VSE_CVP_STATUS, &val); + altera_read_config_dword(conf, VSE_CVP_STATUS, &val); if (!(val & VSE_CVP_STATUS_CVP_EN)) { dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val); return -ENODEV; @@ -209,30 +357,42 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned) */ /* switch from fabric to PMA clock */ - pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL; - pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); /* set CVP mode */ - pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); val |= VSE_CVP_MODE_CTRL_CVP_MODE; - pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); /* * STEP 3 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP */ - altera_cvp_dummy_write(conf); + if (conf->priv->switch_clk) + conf->priv->switch_clk(conf); + + if (conf->priv->clear_state) { + ret = conf->priv->clear_state(conf); + if (ret) { + dev_err(&mgr->dev, "Problem clearing out state\n"); + return ret; + } + } + + conf->sent_packets = 0; /* STEP 4 - set CVP_CONFIG bit */ - pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val); /* request control block to begin transfer using CVP */ val |= VSE_CVP_PROG_CTRL_CONFIG; - pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); - /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */ + /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, - VSE_CVP_STATUS_CFG_RDY, 10); + VSE_CVP_STATUS_CFG_RDY, + conf->priv->poll_time_us); if (ret) { dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n"); return ret; @@ -242,33 +402,28 @@ static int altera_cvp_write_init(struct fpga_manager *mgr, * STEP 6 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP */ - altera_cvp_dummy_write(conf); + if (conf->priv->switch_clk) + conf->priv->switch_clk(conf); + + if (altera_cvp_chkcfg) { + ret = altera_cvp_chk_error(mgr, 0); + if (ret) { + dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n"); + return ret; + } + } /* STEP 7 - set START_XFER */ - pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val); val |= VSE_CVP_PROG_CTRL_START_XFER; - pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */ - pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); - val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; - val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; - pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); - - return 0; -} - -static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes) -{ - struct altera_cvp_conf *conf = mgr->priv; - u32 val; - - /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */ - pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val); - if (val & VSE_CVP_STATUS_CFG_ERR) { - dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n", - bytes); - return -EPROTO; + if (conf->priv->switch_clk) { + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); + val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; + val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); } return 0; } @@ -277,20 +432,32 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf, size_t count) { struct altera_cvp_conf *conf = mgr->priv; + size_t done, remaining, len; const u32 *data; - size_t done, remaining; int status = 0; - u32 mask; /* STEP 9 - write 32-bit data from RBF file to CVP data register */ data = (u32 *)buf; remaining = count; done = 0; - while (remaining >= 4) { - conf->write_data(conf, *data++); - done += 4; - remaining -= 4; + while (remaining) { + /* Use credit throttling if available */ + if (conf->priv->wait_credit) { + status = conf->priv->wait_credit(mgr, done); + if (status) { + dev_err(&conf->pci_dev->dev, + "Wait Credit ERR: 0x%x\n", status); + return status; + } + } + + len = min(conf->priv->block_size, remaining); + altera_cvp_send_block(conf, data, len); + data += len / sizeof(u32); + done += len; + remaining -= len; + conf->sent_packets++; /* * STEP 10 (optional) and STEP 11 @@ -308,11 +475,6 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf, } } - /* write up to 3 trailing bytes, if any */ - mask = BIT(remaining * 8) - 1; - if (mask) - conf->write_data(conf, *data & mask); - if (altera_cvp_chkcfg) status = altera_cvp_chk_error(mgr, count); @@ -323,31 +485,30 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info) { struct altera_cvp_conf *conf = mgr->priv; - struct pci_dev *pdev = conf->pci_dev; + u32 mask, val; int ret; - u32 mask; - u32 val; ret = altera_cvp_teardown(mgr, info); if (ret) return ret; /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */ - pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val); + altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val); if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) { dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n"); return -EPROTO; } /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */ - pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); + altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL; val &= ~VSE_CVP_MODE_CTRL_CVP_MODE; - pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); + altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val); /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */ mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE; - ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US); + ret = altera_cvp_wait_status(conf, mask, mask, + conf->priv->user_time_us); if (ret) dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n"); @@ -361,6 +522,21 @@ static const struct fpga_manager_ops altera_cvp_ops = { .write_complete = altera_cvp_write_complete, }; +static const struct cvp_priv cvp_priv_v1 = { + .switch_clk = altera_cvp_dummy_write, + .block_size = ALTERA_CVP_V1_SIZE, + .poll_time_us = V1_POLL_TIMEOUT_US, + .user_time_us = TIMEOUT_US, +}; + +static const struct cvp_priv cvp_priv_v2 = { + .clear_state = altera_cvp_v2_clear_state, + .wait_credit = altera_cvp_v2_wait_for_credit, + .block_size = ALTERA_CVP_V2_SIZE, + .poll_time_us = V2_POLL_TIMEOUT_US, + .user_time_us = V2_USER_TIMEOUT_US, +}; + static ssize_t chkcfg_show(struct device_driver *dev, char *buf) { return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); @@ -402,22 +578,29 @@ static int altera_cvp_probe(struct pci_dev *pdev, { struct altera_cvp_conf *conf; struct fpga_manager *mgr; + int ret, offset; u16 cmd, val; u32 regval; - int ret; + + /* Discover the Vendor Specific Offset for this device */ + offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR); + if (!offset) { + dev_err(&pdev->dev, "No Vendor Specific Offset.\n"); + return -ENODEV; + } /* * First check if this is the expected FPGA device. PCI config * space access works without enabling the PCI device, memory * space access is enabled further down. */ - pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val); + pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val); if (val != VSE_PCIE_EXT_CAP_ID_VAL) { dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val); return -ENODEV; } - pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val); + pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, ®val); if (!(regval & VSE_CVP_STATUS_CVP_EN)) { dev_err(&pdev->dev, "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n", @@ -429,6 +612,8 @@ static int altera_cvp_probe(struct pci_dev *pdev, if (!conf) return -ENOMEM; + conf->vsec_offset = offset; + /* * Enable memory BAR access. We cannot use pci_enable_device() here * because it will make the driver unusable with FPGA devices that @@ -453,6 +638,11 @@ static int altera_cvp_probe(struct pci_dev *pdev, conf->pci_dev = pdev; conf->write_data = altera_cvp_write_data_iomem; + if (conf->vsec_offset == V1_VSEC_OFFSET) + conf->priv = &cvp_priv_v1; + else + conf->priv = &cvp_priv_v2; + conf->map = pci_iomap(pdev, CVP_BAR, 0); if (!conf->map) { dev_warn(&pdev->dev, "Mapping CVP BAR failed\n"); diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c index b293d83143f1..99b9cc0e70f0 100644 --- a/drivers/fpga/altera-pr-ip-core-plat.c +++ b/drivers/fpga/altera-pr-ip-core-plat.c @@ -32,7 +32,9 @@ static int alt_pr_platform_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - return alt_pr_unregister(dev); + alt_pr_unregister(dev); + + return 0; } static const struct of_device_id alt_pr_of_match[] = { diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index a7a3bf0b5202..2cf25fd5e897 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -201,15 +201,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base) } EXPORT_SYMBOL_GPL(alt_pr_register); -int alt_pr_unregister(struct device *dev) +void alt_pr_unregister(struct device *dev) { struct fpga_manager *mgr = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); fpga_mgr_unregister(mgr); - - return 0; } EXPORT_SYMBOL_GPL(alt_pr_unregister); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 678d0115f840..0221dee8dd4c 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Altera Passive Serial SPI Driver * @@ -5,10 +6,6 @@ * * Joshua Clayton <stillcompiling@gmail.com> * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * * Manage Altera FPGA firmware that is loaded over SPI using the passive * serial configuration method. * Firmware must be in binary "rbf" format. @@ -213,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, return -EIO; } - if (!IS_ERR(conf->confd)) { + if (conf->confd) { if (!gpiod_get_raw_value_cansleep(conf->confd)) { dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); return -EIO; @@ -292,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi) return PTR_ERR(conf->status); } - conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); + conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN); if (IS_ERR(conf->confd)) { - dev_warn(&spi->dev, "Not using confd gpio: %ld\n", - PTR_ERR(conf->confd)); + dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", + PTR_ERR(conf->confd)); + return PTR_ERR(conf->confd); + } else if (!conf->confd) { + dev_warn(&spi->dev, "Not using confd gpio"); } /* Register manager with unique name */ diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index e18a786fc943..62f924489db5 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -12,6 +12,7 @@ #include <linux/dma-mapping.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> +#include <linux/mm.h> #include "dfl-afu.h" @@ -32,52 +33,6 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata) } /** - * afu_dma_adjust_locked_vm - adjust locked memory - * @dev: port device - * @npages: number of pages - * @incr: increase or decrease locked memory - * - * Increase or decrease the locked memory size with npages input. - * - * Return 0 on success. - * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK. - */ -static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) -{ - unsigned long locked, lock_limit; - int ret = 0; - - /* the task is exiting. */ - if (!current->mm) - return 0; - - down_write(¤t->mm->mmap_sem); - - if (incr) { - locked = current->mm->locked_vm + npages; - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - ret = -ENOMEM; - else - current->mm->locked_vm += npages; - } else { - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) - npages = current->mm->locked_vm; - current->mm->locked_vm -= npages; - } - - dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, - incr ? '+' : '-', npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), - ret ? "- exceeded" : ""); - - up_write(¤t->mm->mmap_sem); - - return ret; -} - -/** * afu_dma_pin_pages - pin pages of given dma memory region * @pdata: feature device platform data * @region: dma memory region to be pinned @@ -92,7 +47,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, struct device *dev = &pdata->dev->dev; int ret, pinned; - ret = afu_dma_adjust_locked_vm(dev, npages, true); + ret = account_locked_vm(current->mm, npages, true); if (ret) return ret; @@ -102,7 +57,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, goto unlock_vm; } - pinned = get_user_pages_fast(region->user_addr, npages, 1, + pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, region->pages); if (pinned < 0) { ret = pinned; @@ -121,7 +76,7 @@ put_pages: free_pages: kfree(region->pages); unlock_vm: - afu_dma_adjust_locked_vm(dev, npages, false); + account_locked_vm(current->mm, npages, false); return ret; } @@ -141,7 +96,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, put_all_pages(region->pages, npages); kfree(region->pages); - afu_dma_adjust_locked_vm(dev, npages, false); + account_locked_vm(current->mm, npages, false); dev_dbg(dev, "%ld pages unpinned\n", npages); } @@ -399,7 +354,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, region->pages[0], 0, region->length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&pdata->dev->dev, region->iova)) { + if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { dev_err(&pdata->dev->dev, "failed to map for dma\n"); ret = -EFAULT; goto unpin_pages; diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c new file mode 100644 index 000000000000..c1467ae1a6b6 --- /dev/null +++ b/drivers/fpga/dfl-afu-error.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting + * + * Copyright 2019 Intel Corporation, Inc. + * + * Authors: + * Wu Hao <hao.wu@linux.intel.com> + * Xiao Guangrong <guangrong.xiao@linux.intel.com> + * Joseph Grecco <joe.grecco@intel.com> + * Enno Luebbers <enno.luebbers@intel.com> + * Tim Whisonant <tim.whisonant@intel.com> + * Ananda Ravuri <ananda.ravuri@intel.com> + * Mitchel Henry <henry.mitchel@intel.com> + */ + +#include <linux/uaccess.h> + +#include "dfl-afu.h" + +#define PORT_ERROR_MASK 0x8 +#define PORT_ERROR 0x10 +#define PORT_FIRST_ERROR 0x18 +#define PORT_MALFORMED_REQ0 0x20 +#define PORT_MALFORMED_REQ1 0x28 + +#define ERROR_MASK GENMASK_ULL(63, 0) + +/* mask or unmask port errors by the error mask register. */ +static void __afu_port_err_mask(struct device *dev, bool mask) +{ + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + + writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK); +} + +static void afu_port_err_mask(struct device *dev, bool mask) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + + mutex_lock(&pdata->lock); + __afu_port_err_mask(dev, mask); + mutex_unlock(&pdata->lock); +} + +/* clear port errors. */ +static int afu_port_err_clear(struct device *dev, u64 err) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + struct platform_device *pdev = to_platform_device(dev); + void __iomem *base_err, *base_hdr; + int ret = -EBUSY; + u64 v; + + base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + + /* + * clear Port Errors + * + * - Check for AP6 State + * - Halt Port by keeping Port in reset + * - Set PORT Error mask to all 1 to mask errors + * - Clear all errors + * - Set Port mask to all 0 to enable errors + * - All errors start capturing new errors + * - Enable Port by pulling the port out of reset + */ + + /* if device is still in AP6 power state, can not clear any error. */ + v = readq(base_hdr + PORT_HDR_STS); + if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) { + dev_err(dev, "Could not clear errors, device in AP6 state.\n"); + goto done; + } + + /* Halt Port by keeping Port in reset */ + ret = __afu_port_disable(pdev); + if (ret) + goto done; + + /* Mask all errors */ + __afu_port_err_mask(dev, true); + + /* Clear errors if err input matches with current port errors.*/ + v = readq(base_err + PORT_ERROR); + + if (v == err) { + writeq(v, base_err + PORT_ERROR); + + v = readq(base_err + PORT_FIRST_ERROR); + writeq(v, base_err + PORT_FIRST_ERROR); + } else { + ret = -EINVAL; + } + + /* Clear mask */ + __afu_port_err_mask(dev, false); + + /* Enable the Port by clear the reset */ + __afu_port_enable(pdev); + +done: + mutex_unlock(&pdata->lock); + return ret; +} + +static ssize_t errors_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 error; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + + mutex_lock(&pdata->lock); + error = readq(base + PORT_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)error); +} + +static ssize_t errors_store(struct device *dev, struct device_attribute *attr, + const char *buff, size_t count) +{ + u64 value; + int ret; + + if (kstrtou64(buff, 0, &value)) + return -EINVAL; + + ret = afu_port_err_clear(dev, value); + + return ret ? ret : count; +} +static DEVICE_ATTR_RW(errors); + +static ssize_t first_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 error; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + + mutex_lock(&pdata->lock); + error = readq(base + PORT_FIRST_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)error); +} +static DEVICE_ATTR_RO(first_error); + +static ssize_t first_malformed_req_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 req0, req1; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + + mutex_lock(&pdata->lock); + req0 = readq(base + PORT_MALFORMED_REQ0); + req1 = readq(base + PORT_MALFORMED_REQ1); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%016llx%016llx\n", + (unsigned long long)req1, (unsigned long long)req0); +} +static DEVICE_ATTR_RO(first_malformed_req); + +static struct attribute *port_err_attrs[] = { + &dev_attr_errors.attr, + &dev_attr_first_error.attr, + &dev_attr_first_malformed_req.attr, + NULL, +}; + +static umode_t port_err_attrs_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + + /* + * sysfs entries are visible only if related private feature is + * enumerated. + */ + if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR)) + return 0; + + return attr->mode; +} + +const struct attribute_group port_err_group = { + .name = "errors", + .attrs = port_err_attrs, + .is_visible = port_err_attrs_visible, +}; + +static int port_err_init(struct platform_device *pdev, + struct dfl_feature *feature) +{ + afu_port_err_mask(&pdev->dev, false); + + return 0; +} + +static void port_err_uinit(struct platform_device *pdev, + struct dfl_feature *feature) +{ + afu_port_err_mask(&pdev->dev, true); +} + +const struct dfl_feature_id port_err_id_table[] = { + {.id = PORT_FEATURE_ID_ERROR,}, + {0,} +}; + +const struct dfl_feature_ops port_err_ops = { + .init = port_err_init, + .uinit = port_err_uinit, +}; diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 02baa6a227c0..e4a34dc7947f 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -22,14 +22,17 @@ #include "dfl-afu.h" /** - * port_enable - enable a port + * __afu_port_enable - enable a port by clear reset * @pdev: port platform device. * * Enable Port by clear the port soft reset bit, which is set by default. * The AFU is unable to respond to any MMIO access while in reset. - * port_enable function should only be used after port_disable function. + * __afu_port_enable function should only be used after __afu_port_disable + * function. + * + * The caller needs to hold lock for protection. */ -static void port_enable(struct platform_device *pdev) +void __afu_port_enable(struct platform_device *pdev) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); void __iomem *base; @@ -52,13 +55,14 @@ static void port_enable(struct platform_device *pdev) #define RST_POLL_TIMEOUT 1000 /* us */ /** - * port_disable - disable a port + * __afu_port_disable - disable a port by hold reset * @pdev: port platform device. * - * Disable Port by setting the port soft reset bit, it puts the port into - * reset. + * Disable Port by setting the port soft reset bit, it puts the port into reset. + * + * The caller needs to hold lock for protection. */ -static int port_disable(struct platform_device *pdev) +int __afu_port_disable(struct platform_device *pdev) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); void __iomem *base; @@ -104,9 +108,9 @@ static int __port_reset(struct platform_device *pdev) { int ret; - ret = port_disable(pdev); + ret = __afu_port_disable(pdev); if (!ret) - port_enable(pdev); + __afu_port_enable(pdev); return ret; } @@ -141,27 +145,267 @@ id_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(id); -static const struct attribute *port_hdr_attrs[] = { +static ssize_t +ltr_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + v = readq(base + PORT_HDR_CTRL); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v)); +} + +static ssize_t +ltr_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + bool ltr; + u64 v; + + if (kstrtobool(buf, <r)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + v = readq(base + PORT_HDR_CTRL); + v &= ~PORT_CTRL_LATENCY; + v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0); + writeq(v, base + PORT_HDR_CTRL); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_RW(ltr); + +static ssize_t +ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + v = readq(base + PORT_HDR_STS); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v)); +} + +static ssize_t +ap1_event_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + bool clear; + + if (kstrtobool(buf, &clear) || !clear) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_RW(ap1_event); + +static ssize_t +ap2_event_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + v = readq(base + PORT_HDR_STS); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v)); +} + +static ssize_t +ap2_event_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + bool clear; + + if (kstrtobool(buf, &clear) || !clear) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_RW(ap2_event); + +static ssize_t +power_state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + v = readq(base + PORT_HDR_STS); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v)); +} +static DEVICE_ATTR_RO(power_state); + +static ssize_t +userclk_freqcmd_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + u64 userclk_freq_cmd; + void __iomem *base; + + if (kstrtou64(buf, 0, &userclk_freq_cmd)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_WO(userclk_freqcmd); + +static ssize_t +userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + u64 userclk_freqcntr_cmd; + void __iomem *base; + + if (kstrtou64(buf, 0, &userclk_freqcntr_cmd)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_WO(userclk_freqcntrcmd); + +static ssize_t +userclk_freqsts_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + u64 userclk_freqsts; + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts); +} +static DEVICE_ATTR_RO(userclk_freqsts); + +static ssize_t +userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + u64 userclk_freqcntrsts; + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + mutex_lock(&pdata->lock); + userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", + (unsigned long long)userclk_freqcntrsts); +} +static DEVICE_ATTR_RO(userclk_freqcntrsts); + +static struct attribute *port_hdr_attrs[] = { &dev_attr_id.attr, + &dev_attr_ltr.attr, + &dev_attr_ap1_event.attr, + &dev_attr_ap2_event.attr, + &dev_attr_power_state.attr, + &dev_attr_userclk_freqcmd.attr, + &dev_attr_userclk_freqcntrcmd.attr, + &dev_attr_userclk_freqsts.attr, + &dev_attr_userclk_freqcntrsts.attr, NULL, }; -static int port_hdr_init(struct platform_device *pdev, - struct dfl_feature *feature) +static umode_t port_hdr_attrs_visible(struct kobject *kobj, + struct attribute *attr, int n) { - dev_dbg(&pdev->dev, "PORT HDR Init.\n"); + struct device *dev = kobj_to_dev(kobj); + umode_t mode = attr->mode; + void __iomem *base; - port_reset(pdev); + base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + + if (dfl_feature_revision(base) > 0) { + /* + * userclk sysfs interfaces are only visible in case port + * revision is 0, as hardware with revision >0 doesn't + * support this. + */ + if (attr == &dev_attr_userclk_freqcmd.attr || + attr == &dev_attr_userclk_freqcntrcmd.attr || + attr == &dev_attr_userclk_freqsts.attr || + attr == &dev_attr_userclk_freqcntrsts.attr) + mode = 0; + } - return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs); + return mode; } -static void port_hdr_uinit(struct platform_device *pdev, - struct dfl_feature *feature) +static const struct attribute_group port_hdr_group = { + .attrs = port_hdr_attrs, + .is_visible = port_hdr_attrs_visible, +}; + +static int port_hdr_init(struct platform_device *pdev, + struct dfl_feature *feature) { - dev_dbg(&pdev->dev, "PORT HDR UInit.\n"); + port_reset(pdev); - sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs); + return 0; } static long @@ -185,9 +429,13 @@ port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature, return ret; } +static const struct dfl_feature_id port_hdr_id_table[] = { + {.id = PORT_FEATURE_ID_HEADER,}, + {0,} +}; + static const struct dfl_feature_ops port_hdr_ops = { .init = port_hdr_init, - .uinit = port_hdr_uinit, .ioctl = port_hdr_ioctl, }; @@ -214,52 +462,91 @@ afu_id_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(afu_id); -static const struct attribute *port_afu_attrs[] = { +static struct attribute *port_afu_attrs[] = { &dev_attr_afu_id.attr, NULL }; +static umode_t port_afu_attrs_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + + /* + * sysfs entries are visible only if related private feature is + * enumerated. + */ + if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU)) + return 0; + + return attr->mode; +} + +static const struct attribute_group port_afu_group = { + .attrs = port_afu_attrs, + .is_visible = port_afu_attrs_visible, +}; + static int port_afu_init(struct platform_device *pdev, struct dfl_feature *feature) { struct resource *res = &pdev->resource[feature->resource_index]; - int ret; - dev_dbg(&pdev->dev, "PORT AFU Init.\n"); + return afu_mmio_region_add(dev_get_platdata(&pdev->dev), + DFL_PORT_REGION_INDEX_AFU, + resource_size(res), res->start, + DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | + DFL_PORT_REGION_WRITE); +} - ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev), - DFL_PORT_REGION_INDEX_AFU, resource_size(res), - res->start, DFL_PORT_REGION_READ | - DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP); - if (ret) - return ret; +static const struct dfl_feature_id port_afu_id_table[] = { + {.id = PORT_FEATURE_ID_AFU,}, + {0,} +}; - return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs); -} +static const struct dfl_feature_ops port_afu_ops = { + .init = port_afu_init, +}; -static void port_afu_uinit(struct platform_device *pdev, - struct dfl_feature *feature) +static int port_stp_init(struct platform_device *pdev, + struct dfl_feature *feature) { - dev_dbg(&pdev->dev, "PORT AFU UInit.\n"); + struct resource *res = &pdev->resource[feature->resource_index]; - sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs); + return afu_mmio_region_add(dev_get_platdata(&pdev->dev), + DFL_PORT_REGION_INDEX_STP, + resource_size(res), res->start, + DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | + DFL_PORT_REGION_WRITE); } -static const struct dfl_feature_ops port_afu_ops = { - .init = port_afu_init, - .uinit = port_afu_uinit, +static const struct dfl_feature_id port_stp_id_table[] = { + {.id = PORT_FEATURE_ID_STP,}, + {0,} +}; + +static const struct dfl_feature_ops port_stp_ops = { + .init = port_stp_init, }; static struct dfl_feature_driver port_feature_drvs[] = { { - .id = PORT_FEATURE_ID_HEADER, + .id_table = port_hdr_id_table, .ops = &port_hdr_ops, }, { - .id = PORT_FEATURE_ID_AFU, + .id_table = port_afu_id_table, .ops = &port_afu_ops, }, { + .id_table = port_err_id_table, + .ops = &port_err_ops, + }, + { + .id_table = port_stp_id_table, + .ops = &port_stp_ops, + }, + { .ops = NULL, } }; @@ -545,9 +832,9 @@ static int port_enable_set(struct platform_device *pdev, bool enable) mutex_lock(&pdata->lock); if (enable) - port_enable(pdev); + __afu_port_enable(pdev); else - ret = port_disable(pdev); + ret = __afu_port_disable(pdev); mutex_unlock(&pdata->lock); return ret; @@ -599,9 +886,17 @@ static int afu_remove(struct platform_device *pdev) return 0; } +static const struct attribute_group *afu_dev_groups[] = { + &port_hdr_group, + &port_afu_group, + &port_err_group, + NULL +}; + static struct platform_driver afu_driver = { .driver = { - .name = DFL_FPGA_FEATURE_DEV_PORT, + .name = DFL_FPGA_FEATURE_DEV_PORT, + .dev_groups = afu_dev_groups, }, .probe = afu_probe, .remove = afu_remove, diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h index 0c7630ae3cda..576e94960086 100644 --- a/drivers/fpga/dfl-afu.h +++ b/drivers/fpga/dfl-afu.h @@ -79,6 +79,10 @@ struct dfl_afu { struct dfl_feature_platform_data *pdata; }; +/* hold pdata->lock when call __afu_port_enable/disable */ +void __afu_port_enable(struct platform_device *pdev); +int __afu_port_disable(struct platform_device *pdev); + void afu_mmio_region_init(struct dfl_feature_platform_data *pdata); int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, u32 region_index, u64 region_size, u64 phys, u32 flags); @@ -97,4 +101,9 @@ int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); struct dfl_afu_dma_region * afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size); + +extern const struct dfl_feature_ops port_err_ops; +extern const struct dfl_feature_id port_err_id_table[]; +extern const struct attribute_group port_err_group; + #endif /* __DFL_AFU_H */ diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c new file mode 100644 index 000000000000..f897d414b923 --- /dev/null +++ b/drivers/fpga/dfl-fme-error.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for FPGA Management Engine Error Management + * + * Copyright 2019 Intel Corporation, Inc. + * + * Authors: + * Kang Luwei <luwei.kang@intel.com> + * Xiao Guangrong <guangrong.xiao@linux.intel.com> + * Wu Hao <hao.wu@intel.com> + * Joseph Grecco <joe.grecco@intel.com> + * Enno Luebbers <enno.luebbers@intel.com> + * Tim Whisonant <tim.whisonant@intel.com> + * Ananda Ravuri <ananda.ravuri@intel.com> + * Mitchel, Henry <henry.mitchel@intel.com> + */ + +#include <linux/uaccess.h> + +#include "dfl.h" +#include "dfl-fme.h" + +#define FME_ERROR_MASK 0x8 +#define FME_ERROR 0x10 +#define MBP_ERROR BIT_ULL(6) +#define PCIE0_ERROR_MASK 0x18 +#define PCIE0_ERROR 0x20 +#define PCIE1_ERROR_MASK 0x28 +#define PCIE1_ERROR 0x30 +#define FME_FIRST_ERROR 0x38 +#define FME_NEXT_ERROR 0x40 +#define RAS_NONFAT_ERROR_MASK 0x48 +#define RAS_NONFAT_ERROR 0x50 +#define RAS_CATFAT_ERROR_MASK 0x58 +#define RAS_CATFAT_ERROR 0x60 +#define RAS_ERROR_INJECT 0x68 +#define INJECT_ERROR_MASK GENMASK_ULL(2, 0) + +#define ERROR_MASK GENMASK_ULL(63, 0) + +static ssize_t pcie0_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 value; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + value = readq(base + PCIE0_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t pcie0_errors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + int ret = 0; + u64 v, val; + + if (kstrtou64(buf, 0, &val)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK); + + v = readq(base + PCIE0_ERROR); + if (val == v) + writeq(v, base + PCIE0_ERROR); + else + ret = -EINVAL; + + writeq(0ULL, base + PCIE0_ERROR_MASK); + mutex_unlock(&pdata->lock); + return ret ? ret : count; +} +static DEVICE_ATTR_RW(pcie0_errors); + +static ssize_t pcie1_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 value; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + value = readq(base + PCIE1_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t pcie1_errors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + int ret = 0; + u64 v, val; + + if (kstrtou64(buf, 0, &val)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK); + + v = readq(base + PCIE1_ERROR); + if (val == v) + writeq(v, base + PCIE1_ERROR); + else + ret = -EINVAL; + + writeq(0ULL, base + PCIE1_ERROR_MASK); + mutex_unlock(&pdata->lock); + return ret ? ret : count; +} +static DEVICE_ATTR_RW(pcie1_errors); + +static ssize_t nonfatal_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + return sprintf(buf, "0x%llx\n", + (unsigned long long)readq(base + RAS_NONFAT_ERROR)); +} +static DEVICE_ATTR_RO(nonfatal_errors); + +static ssize_t catfatal_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + return sprintf(buf, "0x%llx\n", + (unsigned long long)readq(base + RAS_CATFAT_ERROR)); +} +static DEVICE_ATTR_RO(catfatal_errors); + +static ssize_t inject_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + v = readq(base + RAS_ERROR_INJECT); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", + (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v)); +} + +static ssize_t inject_errors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u8 inject_error; + u64 v; + + if (kstrtou8(buf, 0, &inject_error)) + return -EINVAL; + + if (inject_error & ~INJECT_ERROR_MASK) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + v = readq(base + RAS_ERROR_INJECT); + v &= ~INJECT_ERROR_MASK; + v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error); + writeq(v, base + RAS_ERROR_INJECT); + mutex_unlock(&pdata->lock); + + return count; +} +static DEVICE_ATTR_RW(inject_errors); + +static ssize_t fme_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 value; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + value = readq(base + FME_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t fme_errors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 v, val; + int ret = 0; + + if (kstrtou64(buf, 0, &val)) + return -EINVAL; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK); + + v = readq(base + FME_ERROR); + if (val == v) + writeq(v, base + FME_ERROR); + else + ret = -EINVAL; + + /* Workaround: disable MBP_ERROR if feature revision is 0 */ + writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR, + base + FME_ERROR_MASK); + mutex_unlock(&pdata->lock); + return ret ? ret : count; +} +static DEVICE_ATTR_RW(fme_errors); + +static ssize_t first_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 value; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + value = readq(base + FME_FIRST_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} +static DEVICE_ATTR_RO(first_error); + +static ssize_t next_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + u64 value; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + value = readq(base + FME_NEXT_ERROR); + mutex_unlock(&pdata->lock); + + return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} +static DEVICE_ATTR_RO(next_error); + +static struct attribute *fme_global_err_attrs[] = { + &dev_attr_pcie0_errors.attr, + &dev_attr_pcie1_errors.attr, + &dev_attr_nonfatal_errors.attr, + &dev_attr_catfatal_errors.attr, + &dev_attr_inject_errors.attr, + &dev_attr_fme_errors.attr, + &dev_attr_first_error.attr, + &dev_attr_next_error.attr, + NULL, +}; + +static umode_t fme_global_err_attrs_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + + /* + * sysfs entries are visible only if related private feature is + * enumerated. + */ + if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR)) + return 0; + + return attr->mode; +} + +const struct attribute_group fme_global_err_group = { + .name = "errors", + .attrs = fme_global_err_attrs, + .is_visible = fme_global_err_attrs_visible, +}; + +static void fme_err_mask(struct device *dev, bool mask) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + void __iomem *base; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + + mutex_lock(&pdata->lock); + + /* Workaround: keep MBP_ERROR always masked if revision is 0 */ + if (dfl_feature_revision(base)) + writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK); + else + writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK); + + writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK); + writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK); + writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK); + writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK); + + mutex_unlock(&pdata->lock); +} + +static int fme_global_err_init(struct platform_device *pdev, + struct dfl_feature *feature) +{ + fme_err_mask(&pdev->dev, false); + + return 0; +} + +static void fme_global_err_uinit(struct platform_device *pdev, + struct dfl_feature *feature) +{ + fme_err_mask(&pdev->dev, true); +} + +const struct dfl_feature_id fme_global_err_id_table[] = { + {.id = FME_FEATURE_ID_GLOBAL_ERR,}, + {0,} +}; + +const struct dfl_feature_ops fme_global_err_ops = { + .init = fme_global_err_init, + .uinit = fme_global_err_uinit, +}; diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 086ad2420ade..7c930e6b314d 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -14,8 +14,11 @@ * Henry Mitchel <henry.mitchel@intel.com> */ +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/uaccess.h> #include <linux/fpga-dfl.h> #include "dfl.h" @@ -72,50 +75,509 @@ static ssize_t bitstream_metadata_show(struct device *dev, } static DEVICE_ATTR_RO(bitstream_metadata); -static const struct attribute *fme_hdr_attrs[] = { +static ssize_t cache_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + + v = readq(base + FME_HDR_CAP); + + return sprintf(buf, "%u\n", + (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v)); +} +static DEVICE_ATTR_RO(cache_size); + +static ssize_t fabric_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + + v = readq(base + FME_HDR_CAP); + + return sprintf(buf, "%u\n", + (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v)); +} +static DEVICE_ATTR_RO(fabric_version); + +static ssize_t socket_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + + v = readq(base + FME_HDR_CAP); + + return sprintf(buf, "%u\n", + (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v)); +} +static DEVICE_ATTR_RO(socket_id); + +static struct attribute *fme_hdr_attrs[] = { &dev_attr_ports_num.attr, &dev_attr_bitstream_id.attr, &dev_attr_bitstream_metadata.attr, + &dev_attr_cache_size.attr, + &dev_attr_fabric_version.attr, + &dev_attr_socket_id.attr, NULL, }; -static int fme_hdr_init(struct platform_device *pdev, - struct dfl_feature *feature) +static const struct attribute_group fme_hdr_group = { + .attrs = fme_hdr_attrs, +}; + +static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata, + unsigned long arg) { - void __iomem *base = feature->ioaddr; - int ret; + struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + int port_id; - dev_dbg(&pdev->dev, "FME HDR Init.\n"); - dev_dbg(&pdev->dev, "FME cap %llx.\n", - (unsigned long long)readq(base + FME_HDR_CAP)); + if (get_user(port_id, (int __user *)arg)) + return -EFAULT; - ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs); - if (ret) - return ret; + return dfl_fpga_cdev_release_port(cdev, port_id); +} - return 0; +static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata, + unsigned long arg) +{ + struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + int port_id; + + if (get_user(port_id, (int __user *)arg)) + return -EFAULT; + + return dfl_fpga_cdev_assign_port(cdev, port_id); } -static void fme_hdr_uinit(struct platform_device *pdev, - struct dfl_feature *feature) +static long fme_hdr_ioctl(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) { - dev_dbg(&pdev->dev, "FME HDR UInit.\n"); - sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs); + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + + switch (cmd) { + case DFL_FPGA_FME_PORT_RELEASE: + return fme_hdr_ioctl_release_port(pdata, arg); + case DFL_FPGA_FME_PORT_ASSIGN: + return fme_hdr_ioctl_assign_port(pdata, arg); + } + + return -ENODEV; } +static const struct dfl_feature_id fme_hdr_id_table[] = { + {.id = FME_FEATURE_ID_HEADER,}, + {0,} +}; + static const struct dfl_feature_ops fme_hdr_ops = { - .init = fme_hdr_init, - .uinit = fme_hdr_uinit, + .ioctl = fme_hdr_ioctl, +}; + +#define FME_THERM_THRESHOLD 0x8 +#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0) +#define TEMP_THRESHOLD1_EN BIT_ULL(7) +#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8) +#define TEMP_THRESHOLD2_EN BIT_ULL(15) +#define TRIP_THRESHOLD GENMASK_ULL(30, 24) +#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */ +#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */ +/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */ +#define TEMP_THRESHOLD1_POLICY BIT_ULL(44) + +#define FME_THERM_RDSENSOR_FMT1 0x10 +#define FPGA_TEMPERATURE GENMASK_ULL(6, 0) + +#define FME_THERM_CAP 0x20 +#define THERM_NO_THROTTLE BIT_ULL(0) + +#define MD_PRE_DEG + +static bool fme_thermal_throttle_support(void __iomem *base) +{ + u64 v = readq(base + FME_THERM_CAP); + + return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true; +} + +static umode_t thermal_hwmon_attrs_visible(const void *drvdata, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct dfl_feature *feature = drvdata; + + /* temperature is always supported, and check hardware cap for others */ + if (attr == hwmon_temp_input) + return 0444; + + return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0; +} + +static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u64 v; + + switch (attr) { + case hwmon_temp_input: + v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1); + *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000); + break; + case hwmon_temp_max: + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000); + break; + case hwmon_temp_crit: + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000); + break; + case hwmon_temp_emergency: + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000); + break; + case hwmon_temp_max_alarm: + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v); + break; + case hwmon_temp_crit_alarm: + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct hwmon_ops thermal_hwmon_ops = { + .is_visible = thermal_hwmon_attrs_visible, + .read = thermal_hwmon_read, +}; + +static const struct hwmon_channel_info *thermal_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY | + HWMON_T_MAX | HWMON_T_MAX_ALARM | + HWMON_T_CRIT | HWMON_T_CRIT_ALARM), + NULL +}; + +static const struct hwmon_chip_info thermal_hwmon_chip_info = { + .ops = &thermal_hwmon_ops, + .info = thermal_hwmon_info, +}; + +static ssize_t temp1_max_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u64 v; + + v = readq(feature->ioaddr + FME_THERM_THRESHOLD); + + return sprintf(buf, "%u\n", + (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v)); +} + +static DEVICE_ATTR_RO(temp1_max_policy); + +static struct attribute *thermal_extra_attrs[] = { + &dev_attr_temp1_max_policy.attr, + NULL, +}; + +static umode_t thermal_extra_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct dfl_feature *feature = dev_get_drvdata(dev); + + return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0; +} + +static const struct attribute_group thermal_extra_group = { + .attrs = thermal_extra_attrs, + .is_visible = thermal_extra_attrs_visible, +}; +__ATTRIBUTE_GROUPS(thermal_extra); + +static int fme_thermal_mgmt_init(struct platform_device *pdev, + struct dfl_feature *feature) +{ + struct device *hwmon; + + /* + * create hwmon to allow userspace monitoring temperature and other + * threshold information. + * + * temp1_input -> FPGA device temperature + * temp1_max -> hardware threshold 1 -> 50% or 90% throttling + * temp1_crit -> hardware threshold 2 -> 100% throttling + * temp1_emergency -> hardware trip_threshold to shutdown FPGA + * temp1_max_alarm -> hardware threshold 1 alarm + * temp1_crit_alarm -> hardware threshold 2 alarm + * + * create device specific sysfs interfaces, e.g. read temp1_max_policy + * to understand the actual hardware throttling action (50% vs 90%). + * + * If hardware doesn't support automatic throttling per thresholds, + * then all above sysfs interfaces are not visible except temp1_input + * for temperature. + */ + hwmon = devm_hwmon_device_register_with_info(&pdev->dev, + "dfl_fme_thermal", feature, + &thermal_hwmon_chip_info, + thermal_extra_groups); + if (IS_ERR(hwmon)) { + dev_err(&pdev->dev, "Fail to register thermal hwmon\n"); + return PTR_ERR(hwmon); + } + + return 0; +} + +static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = { + {.id = FME_FEATURE_ID_THERMAL_MGMT,}, + {0,} +}; + +static const struct dfl_feature_ops fme_thermal_mgmt_ops = { + .init = fme_thermal_mgmt_init, +}; + +#define FME_PWR_STATUS 0x8 +#define FME_LATENCY_TOLERANCE BIT_ULL(18) +#define PWR_CONSUMED GENMASK_ULL(17, 0) + +#define FME_PWR_THRESHOLD 0x10 +#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */ +#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */ +#define PWR_THRESHOLD_MAX 0x7f /* in Watts */ +#define PWR_THRESHOLD1_STATUS BIT_ULL(16) +#define PWR_THRESHOLD2_STATUS BIT_ULL(17) + +#define FME_PWR_XEON_LIMIT 0x18 +#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */ +#define XEON_PWR_EN BIT_ULL(15) +#define FME_PWR_FPGA_LIMIT 0x20 +#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */ +#define FPGA_PWR_EN BIT_ULL(15) + +static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u64 v; + + switch (attr) { + case hwmon_power_input: + v = readq(feature->ioaddr + FME_PWR_STATUS); + *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000); + break; + case hwmon_power_max: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000); + break; + case hwmon_power_crit: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000); + break; + case hwmon_power_max_alarm: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v); + break; + case hwmon_power_crit_alarm: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent); + struct dfl_feature *feature = dev_get_drvdata(dev); + int ret = 0; + u64 v; + + val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX); + + mutex_lock(&pdata->lock); + + switch (attr) { + case hwmon_power_max: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + v &= ~PWR_THRESHOLD1; + v |= FIELD_PREP(PWR_THRESHOLD1, val); + writeq(v, feature->ioaddr + FME_PWR_THRESHOLD); + break; + case hwmon_power_crit: + v = readq(feature->ioaddr + FME_PWR_THRESHOLD); + v &= ~PWR_THRESHOLD2; + v |= FIELD_PREP(PWR_THRESHOLD2, val); + writeq(v, feature->ioaddr + FME_PWR_THRESHOLD); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&pdata->lock); + + return ret; +} + +static umode_t power_hwmon_attrs_visible(const void *drvdata, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (attr) { + case hwmon_power_input: + case hwmon_power_max_alarm: + case hwmon_power_crit_alarm: + return 0444; + case hwmon_power_max: + case hwmon_power_crit: + return 0644; + } + + return 0; +} + +static const struct hwmon_ops power_hwmon_ops = { + .is_visible = power_hwmon_attrs_visible, + .read = power_hwmon_read, + .write = power_hwmon_write, +}; + +static const struct hwmon_channel_info *power_hwmon_info[] = { + HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MAX_ALARM | + HWMON_P_CRIT | HWMON_P_CRIT_ALARM), + NULL +}; + +static const struct hwmon_chip_info power_hwmon_chip_info = { + .ops = &power_hwmon_ops, + .info = power_hwmon_info, +}; + +static ssize_t power1_xeon_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u16 xeon_limit = 0; + u64 v; + + v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT); + + if (FIELD_GET(XEON_PWR_EN, v)) + xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v); + + return sprintf(buf, "%u\n", xeon_limit * 100000); +} + +static ssize_t power1_fpga_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u16 fpga_limit = 0; + u64 v; + + v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT); + + if (FIELD_GET(FPGA_PWR_EN, v)) + fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v); + + return sprintf(buf, "%u\n", fpga_limit * 100000); +} + +static ssize_t power1_ltr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_feature *feature = dev_get_drvdata(dev); + u64 v; + + v = readq(feature->ioaddr + FME_PWR_STATUS); + + return sprintf(buf, "%u\n", + (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v)); +} + +static DEVICE_ATTR_RO(power1_xeon_limit); +static DEVICE_ATTR_RO(power1_fpga_limit); +static DEVICE_ATTR_RO(power1_ltr); + +static struct attribute *power_extra_attrs[] = { + &dev_attr_power1_xeon_limit.attr, + &dev_attr_power1_fpga_limit.attr, + &dev_attr_power1_ltr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(power_extra); + +static int fme_power_mgmt_init(struct platform_device *pdev, + struct dfl_feature *feature) +{ + struct device *hwmon; + + hwmon = devm_hwmon_device_register_with_info(&pdev->dev, + "dfl_fme_power", feature, + &power_hwmon_chip_info, + power_extra_groups); + if (IS_ERR(hwmon)) { + dev_err(&pdev->dev, "Fail to register power hwmon\n"); + return PTR_ERR(hwmon); + } + + return 0; +} + +static const struct dfl_feature_id fme_power_mgmt_id_table[] = { + {.id = FME_FEATURE_ID_POWER_MGMT,}, + {0,} +}; + +static const struct dfl_feature_ops fme_power_mgmt_ops = { + .init = fme_power_mgmt_init, }; static struct dfl_feature_driver fme_feature_drvs[] = { { - .id = FME_FEATURE_ID_HEADER, + .id_table = fme_hdr_id_table, .ops = &fme_hdr_ops, }, { - .id = FME_FEATURE_ID_PR_MGMT, - .ops = &pr_mgmt_ops, + .id_table = fme_pr_mgmt_id_table, + .ops = &fme_pr_mgmt_ops, + }, + { + .id_table = fme_global_err_id_table, + .ops = &fme_global_err_ops, + }, + { + .id_table = fme_thermal_mgmt_id_table, + .ops = &fme_thermal_mgmt_ops, + }, + { + .id_table = fme_power_mgmt_id_table, + .ops = &fme_power_mgmt_ops, }, { .ops = NULL, @@ -263,9 +725,16 @@ static int fme_remove(struct platform_device *pdev) return 0; } +static const struct attribute_group *fme_dev_groups[] = { + &fme_hdr_group, + &fme_global_err_group, + NULL +}; + static struct platform_driver fme_driver = { .driver = { - .name = DFL_FPGA_FEATURE_DEV_FME, + .name = DFL_FPGA_FEATURE_DEV_FME, + .dev_groups = fme_dev_groups, }, .probe = fme_probe, .remove = fme_remove, diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index 76f37709dd1a..b3f7eee3c93f 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -30,8 +30,8 @@ #define FME_PR_STS 0x10 #define FME_PR_DATA 0x18 #define FME_PR_ERR 0x20 -#define FME_PR_INTFC_ID_H 0xA8 -#define FME_PR_INTFC_ID_L 0xB0 +#define FME_PR_INTFC_ID_L 0xA8 +#define FME_PR_INTFC_ID_H 0xB0 /* FME PR Control Register Bitfield */ #define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */ diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index d9ca9554844a..a233a53db708 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -74,6 +74,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) struct dfl_fme *fme; unsigned long minsz; void *buf = NULL; + size_t length; int ret = 0; u64 v; @@ -85,9 +86,6 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) if (port_pr.argsz < minsz || port_pr.flags) return -EINVAL; - if (!IS_ALIGNED(port_pr.buffer_size, 4)) - return -EINVAL; - /* get fme header region */ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, FME_FEATURE_ID_HEADER); @@ -103,7 +101,13 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) port_pr.buffer_size)) return -EFAULT; - buf = vmalloc(port_pr.buffer_size); + /* + * align PR buffer per PR bandwidth, as HW ignores the extra padding + * data automatically. + */ + length = ALIGN(port_pr.buffer_size, 4); + + buf = vmalloc(length); if (!buf) return -ENOMEM; @@ -140,7 +144,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) fpga_image_info_free(region->info); info->buf = buf; - info->count = port_pr.buffer_size; + info->count = length; info->region_id = port_pr.port_id; region->info = info; @@ -159,9 +163,6 @@ unlock_exit: mutex_unlock(&pdata->lock); free_exit: vfree(buf); - if (copy_to_user((void __user *)arg, &port_pr, minsz)) - return -EFAULT; - return ret; } @@ -469,7 +470,12 @@ static long fme_pr_ioctl(struct platform_device *pdev, return ret; } -const struct dfl_feature_ops pr_mgmt_ops = { +const struct dfl_feature_id fme_pr_mgmt_id_table[] = { + {.id = FME_FEATURE_ID_PR_MGMT,}, + {0} +}; + +const struct dfl_feature_ops fme_pr_mgmt_ops = { .init = pr_mgmt_init, .uinit = pr_mgmt_uinit, .ioctl = fme_pr_ioctl, diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h index 5394a216c5c0..6685c8ef965b 100644 --- a/drivers/fpga/dfl-fme.h +++ b/drivers/fpga/dfl-fme.h @@ -33,6 +33,10 @@ struct dfl_fme { struct dfl_feature_platform_data *pdata; }; -extern const struct dfl_feature_ops pr_mgmt_ops; +extern const struct dfl_feature_ops fme_pr_mgmt_ops; +extern const struct dfl_feature_id fme_pr_mgmt_id_table[]; +extern const struct dfl_feature_ops fme_global_err_ops; +extern const struct dfl_feature_id fme_global_err_id_table[]; +extern const struct attribute_group fme_global_err_group; #endif /* __DFL_FME_H */ diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 66b5720582bb..89ca292236ad 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -223,8 +223,43 @@ disable_error_report_exit: return ret; } +static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) +{ + struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); + struct dfl_fpga_cdev *cdev = drvdata->cdev; + int ret = 0; + + if (!num_vfs) { + /* + * disable SRIOV and then put released ports back to default + * PF access mode. + */ + pci_disable_sriov(pcidev); + + dfl_fpga_cdev_config_ports_pf(cdev); + + } else { + /* + * before enable SRIOV, put released ports into VF access mode + * first of all. + */ + ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs); + if (ret) + return ret; + + ret = pci_enable_sriov(pcidev, num_vfs); + if (ret) + dfl_fpga_cdev_config_ports_pf(cdev); + } + + return ret; +} + static void cci_pci_remove(struct pci_dev *pcidev) { + if (dev_is_pf(&pcidev->dev)) + cci_pci_sriov_configure(pcidev, 0); + cci_remove_feature_devs(pcidev); pci_disable_pcie_error_reporting(pcidev); } @@ -234,6 +269,7 @@ static struct pci_driver cci_pci_driver = { .id_table = cci_pcie_id_tbl, .probe = cci_pci_probe, .remove = cci_pci_remove, + .sriov_configure = cci_pci_sriov_configure, }; module_pci_driver(cci_pci_driver); diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 2c09e502e721..96a2b8274a33 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -40,6 +40,13 @@ enum dfl_fpga_devt_type { DFL_FPGA_DEVT_MAX, }; +static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; + +static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { + "dfl-fme-pdata", + "dfl-port-pdata", +}; + /** * dfl_dev_info - dfl feature device information. * @name: name string of the feature platform device. @@ -224,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); */ int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) { - struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev); - int port_id; + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_fpga_port_ops *port_ops; + if (pdata->id != FEATURE_DEV_ID_UNUSED) + return pdata->id == *(int *)pport_id; + + port_ops = dfl_fpga_port_ops_get(pdev); if (!port_ops || !port_ops->get_id) return 0; - port_id = port_ops->get_id(pdev); + pdata->id = port_ops->get_id(pdev); dfl_fpga_port_ops_put(port_ops); - return port_id == *(int *)pport_id; + return pdata->id == *(int *)pport_id; } EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); @@ -248,7 +259,8 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) dfl_fpga_dev_for_each_feature(pdata, feature) if (feature->ops) { - feature->ops->uinit(pdev, feature); + if (feature->ops->uinit) + feature->ops->uinit(pdev, feature); feature->ops = NULL; } } @@ -259,17 +271,34 @@ static int dfl_feature_instance_init(struct platform_device *pdev, struct dfl_feature *feature, struct dfl_feature_driver *drv) { - int ret; + int ret = 0; - ret = drv->ops->init(pdev, feature); - if (ret) - return ret; + if (drv->ops->init) { + ret = drv->ops->init(pdev, feature); + if (ret) + return ret; + } feature->ops = drv->ops; return ret; } +static bool dfl_feature_drv_match(struct dfl_feature *feature, + struct dfl_feature_driver *driver) +{ + const struct dfl_feature_id *ids = driver->id_table; + + if (ids) { + while (ids->id) { + if (ids->id == feature->id) + return true; + ids++; + } + } + return false; +} + /** * dfl_fpga_dev_feature_init - init for sub features of dfl feature device * @pdev: feature device. @@ -290,8 +319,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev, while (drv->ops) { dfl_fpga_dev_for_each_feature(pdata, feature) { - /* match feature and drv using id */ - if (feature->id == drv->id) { + if (dfl_feature_drv_match(feature, drv)) { ret = dfl_feature_instance_init(pdev, pdata, feature, drv); if (ret) @@ -315,7 +343,7 @@ static void dfl_chardev_uinit(void) for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) if (MAJOR(dfl_chrdevs[i].devt)) { unregister_chrdev_region(dfl_chrdevs[i].devt, - MINORMASK); + MINORMASK + 1); dfl_chrdevs[i].devt = MKDEV(0, 0); } } @@ -325,8 +353,8 @@ static int dfl_chardev_init(void) int i, ret; for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) { - ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK, - dfl_chrdevs[i].name); + ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, + MINORMASK + 1, dfl_chrdevs[i].name); if (ret) goto exit; } @@ -443,11 +471,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) struct platform_device *fdev = binfo->feature_dev; struct dfl_feature_platform_data *pdata; struct dfl_feature_info *finfo, *p; + enum dfl_id_type type; int ret, index = 0; if (!fdev) return 0; + type = feature_dev_id_type(fdev); + if (WARN_ON_ONCE(type >= DFL_ID_MAX)) + return -EINVAL; + /* * we do not need to care for the memory which is associated with * the platform device. After calling platform_device_unregister(), @@ -462,7 +495,10 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) pdata->dev = fdev; pdata->num = binfo->feature_num; pdata->dfl_cdev = binfo->cdev; + pdata->id = FEATURE_DEV_ID_UNUSED; mutex_init(&pdata->lock); + lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], + dfl_pdata_key_strings[type]); /* * the count should be initialized to 0 to make sure @@ -497,7 +533,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) ret = platform_device_add(binfo->feature_dev); if (!ret) { - if (feature_dev_id_type(binfo->feature_dev) == PORT_ID) + if (type == PORT_ID) dfl_fpga_cdev_add_port_dev(binfo->cdev, binfo->feature_dev); else @@ -959,25 +995,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) { struct dfl_feature_platform_data *pdata, *ptmp; - remove_feature_devs(cdev); - mutex_lock(&cdev->lock); - if (cdev->fme_dev) { - /* the fme should be unregistered. */ - WARN_ON(device_is_registered(cdev->fme_dev)); + if (cdev->fme_dev) put_device(cdev->fme_dev); - } list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { struct platform_device *port_dev = pdata->dev; - /* the port should be unregistered. */ - WARN_ON(device_is_registered(&port_dev->dev)); + /* remove released ports */ + if (!device_is_registered(&port_dev->dev)) { + dfl_id_free(feature_dev_id_type(port_dev), + port_dev->id); + platform_device_put(port_dev); + } + list_del(&pdata->node); put_device(&port_dev->dev); } mutex_unlock(&cdev->lock); + remove_feature_devs(cdev); + fpga_region_unregister(cdev->region); devm_kfree(cdev->parent, cdev); } @@ -1028,6 +1066,170 @@ static int __init dfl_fpga_init(void) return ret; } +/** + * dfl_fpga_cdev_release_port - release a port platform device + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to release a port platform device. This is a + * mandatory step before turn a port from PF into VF for SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) +{ + struct platform_device *port_pdev; + int ret = -ENODEV; + + mutex_lock(&cdev->lock); + port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, + dfl_fpga_check_port_id); + if (!port_pdev) + goto unlock_exit; + + if (!device_is_registered(&port_pdev->dev)) { + ret = -EBUSY; + goto put_dev_exit; + } + + ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev)); + if (ret) + goto put_dev_exit; + + platform_device_del(port_pdev); + cdev->released_port_num++; +put_dev_exit: + put_device(&port_pdev->dev); +unlock_exit: + mutex_unlock(&cdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); + +/** + * dfl_fpga_cdev_assign_port - assign a port platform device back + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to assign a port platform device back. This is + * a mandatory step after disable SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) +{ + struct platform_device *port_pdev; + int ret = -ENODEV; + + mutex_lock(&cdev->lock); + port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, + dfl_fpga_check_port_id); + if (!port_pdev) + goto unlock_exit; + + if (device_is_registered(&port_pdev->dev)) { + ret = -EBUSY; + goto put_dev_exit; + } + + ret = platform_device_add(port_pdev); + if (ret) + goto put_dev_exit; + + dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev)); + cdev->released_port_num--; +put_dev_exit: + put_device(&port_pdev->dev); +unlock_exit: + mutex_unlock(&cdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); + +static void config_port_access_mode(struct device *fme_dev, int port_id, + bool is_vf) +{ + void __iomem *base; + u64 v; + + base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER); + + v = readq(base + FME_HDR_PORT_OFST(port_id)); + + v &= ~FME_PORT_OFST_ACC_CTRL; + v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL, + is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF); + + writeq(v, base + FME_HDR_PORT_OFST(port_id)); +} + +#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true) +#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false) + +/** + * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode + * + * @cdev: parent container device. + * + * This function is needed in sriov configuration routine. It could be used to + * configure the all released ports from VF access mode to PF. + */ +void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) +{ + struct dfl_feature_platform_data *pdata; + + mutex_lock(&cdev->lock); + list_for_each_entry(pdata, &cdev->port_dev_list, node) { + if (device_is_registered(&pdata->dev->dev)) + continue; + + config_port_pf_mode(cdev->fme_dev, pdata->id); + } + mutex_unlock(&cdev->lock); +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); + +/** + * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode + * + * @cdev: parent container device. + * @num_vfs: VF device number. + * + * This function is needed in sriov configuration routine. It could be used to + * configure the released ports from PF access mode to VF. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) +{ + struct dfl_feature_platform_data *pdata; + int ret = 0; + + mutex_lock(&cdev->lock); + /* + * can't turn multiple ports into 1 VF device, only 1 port for 1 VF + * device, so if released port number doesn't match VF device number, + * then reject the request with -EINVAL error code. + */ + if (cdev->released_port_num != num_vfs) { + ret = -EINVAL; + goto done; + } + + list_for_each_entry(pdata, &cdev->port_dev_list, node) { + if (device_is_registered(&pdata->dev->dev)) + continue; + + config_port_vf_mode(cdev->fme_dev, pdata->id); + } +done: + mutex_unlock(&cdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); + static void __exit dfl_fpga_exit(void) { dfl_chardev_uinit(); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index a8b869e9e5b7..9f0e656de720 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -30,8 +30,8 @@ /* plus one for fme device */ #define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1) -/* Reserved 0x0 for Header Group Register and 0xff for AFU */ -#define FEATURE_ID_FIU_HEADER 0x0 +/* Reserved 0xfe for Header Group Register and 0xff for AFU */ +#define FEATURE_ID_FIU_HEADER 0xfe #define FEATURE_ID_AFU 0xff #define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER @@ -119,6 +119,11 @@ #define PORT_HDR_NEXT_AFU NEXT_AFU #define PORT_HDR_CAP 0x30 #define PORT_HDR_CTRL 0x38 +#define PORT_HDR_STS 0x40 +#define PORT_HDR_USRCLK_CMD0 0x50 +#define PORT_HDR_USRCLK_CMD1 0x58 +#define PORT_HDR_USRCLK_STS0 0x60 +#define PORT_HDR_USRCLK_STS1 0x68 /* Port Capability Register Bitfield */ #define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */ @@ -130,6 +135,16 @@ /* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/ #define PORT_CTRL_LATENCY BIT_ULL(2) #define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */ + +/* Port Status Register Bitfield */ +#define PORT_STS_AP2_EVT BIT_ULL(13) /* AP2 event detected */ +#define PORT_STS_AP1_EVT BIT_ULL(12) /* AP1 event detected */ +#define PORT_STS_PWR_STATE GENMASK_ULL(11, 8) /* AFU power states */ +#define PORT_STS_PWR_STATE_NORM 0 +#define PORT_STS_PWR_STATE_AP1 1 /* 50% throttling */ +#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */ +#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */ + /** * struct dfl_fpga_port_ops - port ops * @@ -154,13 +169,22 @@ void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops); int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id); /** - * struct dfl_feature_driver - sub feature's driver + * struct dfl_feature_id - dfl private feature id * - * @id: sub feature id. - * @ops: ops of this sub feature. + * @id: unique dfl private feature id. */ -struct dfl_feature_driver { +struct dfl_feature_id { u64 id; +}; + +/** + * struct dfl_feature_driver - dfl private feature driver + * + * @id_table: id_table for dfl private features supported by this driver. + * @ops: ops of this dfl private feature driver. + */ +struct dfl_feature_driver { + const struct dfl_feature_id *id_table; const struct dfl_feature_ops *ops; }; @@ -183,6 +207,8 @@ struct dfl_feature { #define DEV_STATUS_IN_USE 0 +#define FEATURE_DEV_ID_UNUSED (-1) + /** * struct dfl_feature_platform_data - platform data for feature devices * @@ -191,6 +217,7 @@ struct dfl_feature { * @cdev: cdev of feature dev. * @dev: ptr to platform device linked with this platform data. * @dfl_cdev: ptr to container device. + * @id: id used for this feature device. * @disable_count: count for port disable. * @num: number for sub features. * @dev_status: dev status (e.g. DEV_STATUS_IN_USE). @@ -203,6 +230,7 @@ struct dfl_feature_platform_data { struct cdev cdev; struct platform_device *dev; struct dfl_fpga_cdev *dfl_cdev; + int id; unsigned int disable_count; unsigned long dev_status; void *private; @@ -331,6 +359,11 @@ static inline bool dfl_feature_is_port(void __iomem *base) (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT); } +static inline u8 dfl_feature_revision(void __iomem *base) +{ + return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH)); +} + /** * struct dfl_fpga_enum_info - DFL FPGA enumeration information * @@ -373,6 +406,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info); * @fme_dev: FME feature device under this container device. * @lock: mutex lock to protect the port device list. * @port_dev_list: list of all port feature devices under this container device. + * @released_port_num: released port number under this container device. */ struct dfl_fpga_cdev { struct device *parent; @@ -380,6 +414,7 @@ struct dfl_fpga_cdev { struct device *fme_dev; struct mutex lock; struct list_head port_dev_list; + int released_port_num; }; struct dfl_fpga_cdev * @@ -407,4 +442,9 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, return pdev; } + +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); +void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev); +int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf); #endif /* __FPGA_DFL_H */ diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 80bd8f1b2aa6..4bab9028940a 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -19,11 +19,6 @@ static struct class *fpga_bridge_class; /* Lock for adding/removing bridges to linked lists*/ static spinlock_t bridge_list_lock; -static int fpga_bridge_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * fpga_bridge_enable - Enable transactions on the bridge * @@ -104,8 +99,7 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, { struct device *dev; - dev = class_find_device(fpga_bridge_class, NULL, np, - fpga_bridge_of_node_match); + dev = class_find_device_by_of_node(fpga_bridge_class, np); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index c3866816456a..e05104f5e40c 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -482,11 +482,6 @@ struct fpga_manager *fpga_mgr_get(struct device *dev) } EXPORT_SYMBOL_GPL(fpga_mgr_get); -static int fpga_mgr_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr. * @@ -498,8 +493,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { struct device *dev; - dev = class_find_device(fpga_mgr_class, NULL, node, - fpga_mgr_of_node_match); + dev = class_find_device_by_of_node(fpga_mgr_class, node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index 6154661b8f76..56e112e14a10 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * FPGA Manager Driver for Lattice iCE40. * * Copyright (c) 2016 Joel Holdsworth * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * * This driver adds support to the FPGA manager for configuring the SRAM of * Lattice iCE40 FPGAs through slave SPI. */ diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 75f64abf9c81..e405309baadc 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -22,11 +22,6 @@ static const struct of_device_id fpga_region_of_match[] = { }; MODULE_DEVICE_TABLE(of, fpga_region_of_match); -static int fpga_region_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_fpga_region_find - find FPGA region * @np: device node of FPGA Region @@ -37,7 +32,7 @@ static int fpga_region_of_node_match(struct device *dev, const void *data) */ static struct fpga_region *of_fpga_region_find(struct device_node *np) { - return fpga_region_class_find(NULL, np, fpga_region_of_node_match); + return fpga_region_class_find(NULL, np, device_match_of_node); } /** diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index 13851b3d1c56..215d33789c74 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -507,12 +507,16 @@ static int __init s10_init(void) if (!fw_np) return -ENODEV; + of_node_get(fw_np); np = of_find_matching_node(fw_np, s10_of_match); - if (!np) + if (!np) { + of_node_put(fw_np); return -ENODEV; + } of_node_put(np); ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); + of_node_put(fw_np); if (ret) return ret; diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index dc22a5842609..9a17fe98c1b0 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Technologic Systems TS-73xx SBC FPGA loader * @@ -5,15 +6,6 @@ * * FPGA Manager Driver for the on-board Altera Cyclone II FPGA found on * TS-7300, heavily based on load_fpga.c in their vendor tree. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/delay.h> diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index 641036135207..af9b387c56d3 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2017, National Instruments Corp. * Copyright (c) 2017, Xilix Inc * * FPGA Bridge Driver for the Xilinx LogiCORE Partial Reconfiguration * Decoupler IP Core. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index 469486be20c4..272ee0c22822 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Xilinx Spartan6 Slave Serial SPI Driver * @@ -5,10 +6,6 @@ * * Anatolij Gustschin <agust@denx.de> * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * * Manage Xilinx FPGA firmware that is loaded over SPI using * the slave serial configuration interface. */ diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 57b0e6775958..ee7765049607 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2015 Xilinx Inc. * Copyright (c) 2015, National Instruments Corp. * * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver * in their vendor tree. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> @@ -586,10 +578,8 @@ static int zynq_fpga_probe(struct platform_device *pdev) init_completion(&priv->dma_done); priv->irq = platform_get_irq(pdev, 0); - if (priv->irq < 0) { - dev_err(dev, "No IRQ available\n"); + if (priv->irq < 0) return priv->irq; - } priv->clk = devm_clk_get(dev, "ref_clk"); if (IS_ERR(priv->clk)) { diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c new file mode 100644 index 000000000000..b8a88d21d038 --- /dev/null +++ b/drivers/fpga/zynqmp-fpga.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Xilinx, Inc. + */ + +#include <linux/dma-mapping.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/string.h> +#include <linux/firmware/xlnx-zynqmp.h> + +/* Constant Definitions */ +#define IXR_FPGA_DONE_MASK BIT(3) + +/** + * struct zynqmp_fpga_priv - Private data structure + * @dev: Device data structure + * @flags: flags which is used to identify the bitfile type + */ +struct zynqmp_fpga_priv { + struct device *dev; + u32 flags; +}; + +static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t size) +{ + struct zynqmp_fpga_priv *priv; + + priv = mgr->priv; + priv->flags = info->flags; + + return 0; +} + +static int zynqmp_fpga_ops_write(struct fpga_manager *mgr, + const char *buf, size_t size) +{ + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + struct zynqmp_fpga_priv *priv; + dma_addr_t dma_addr; + u32 eemi_flags = 0; + char *kbuf; + int ret; + + if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load) + return -ENXIO; + + priv = mgr->priv; + + kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + memcpy(kbuf, buf, size); + + wmb(); /* ensure all writes are done before initiate FW call */ + + if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG) + eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL; + + ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags); + + dma_free_coherent(priv->dev, size, kbuf, dma_addr); + + return ret; +} + +static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + return 0; +} + +static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr) +{ + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + u32 status; + + if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status) + return FPGA_MGR_STATE_UNKNOWN; + + eemi_ops->fpga_get_status(&status); + if (status & IXR_FPGA_DONE_MASK) + return FPGA_MGR_STATE_OPERATING; + + return FPGA_MGR_STATE_UNKNOWN; +} + +static const struct fpga_manager_ops zynqmp_fpga_ops = { + .state = zynqmp_fpga_ops_state, + .write_init = zynqmp_fpga_ops_write_init, + .write = zynqmp_fpga_ops_write, + .write_complete = zynqmp_fpga_ops_write_complete, +}; + +static int zynqmp_fpga_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zynqmp_fpga_priv *priv; + struct fpga_manager *mgr; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager", + &zynqmp_fpga_ops, priv); + if (!mgr) + return -ENOMEM; + + platform_set_drvdata(pdev, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) { + dev_err(dev, "unable to register FPGA manager"); + return ret; + } + + return 0; +} + +static int zynqmp_fpga_remove(struct platform_device *pdev) +{ + struct fpga_manager *mgr = platform_get_drvdata(pdev); + + fpga_mgr_unregister(mgr); + + return 0; +} + +static const struct of_device_id zynqmp_fpga_of_match[] = { + { .compatible = "xlnx,zynqmp-pcap-fpga", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match); + +static struct platform_driver zynqmp_fpga_driver = { + .probe = zynqmp_fpga_probe, + .remove = zynqmp_fpga_remove, + .driver = { + .name = "zynqmp_fpga_manager", + .of_match_table = of_match_ptr(zynqmp_fpga_of_match), + }, +}; + +module_platform_driver(zynqmp_fpga_driver); + +MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>"); +MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager"); +MODULE_LICENSE("GPL"); |