diff options
Diffstat (limited to 'drivers/fpga')
| -rw-r--r-- | drivers/fpga/Kconfig | 7 | ||||
| -rw-r--r-- | drivers/fpga/Makefile | 3 | ||||
| -rw-r--r-- | drivers/fpga/altera-cvp.c | 342 | ||||
| -rw-r--r-- | drivers/fpga/altera-pr-ip-core-plat.c | 4 | ||||
| -rw-r--r-- | drivers/fpga/altera-pr-ip-core.c | 4 | ||||
| -rw-r--r-- | drivers/fpga/altera-ps-spi.c | 11 | ||||
| -rw-r--r-- | drivers/fpga/dfl-afu-error.c | 230 | ||||
| -rw-r--r-- | drivers/fpga/dfl-afu-main.c | 381 | ||||
| -rw-r--r-- | drivers/fpga/dfl-afu.h | 9 | ||||
| -rw-r--r-- | drivers/fpga/dfl-fme-error.c | 359 | ||||
| -rw-r--r-- | drivers/fpga/dfl-fme-main.c | 128 | ||||
| -rw-r--r-- | drivers/fpga/dfl-fme-pr.c | 7 | ||||
| -rw-r--r-- | drivers/fpga/dfl-fme.h | 6 | ||||
| -rw-r--r-- | drivers/fpga/dfl-pci.c | 36 | ||||
| -rw-r--r-- | drivers/fpga/dfl.c | 226 | ||||
| -rw-r--r-- | drivers/fpga/dfl.h | 52 | ||||
| -rw-r--r-- | drivers/fpga/fpga-bridge.c | 8 | ||||
| -rw-r--r-- | drivers/fpga/fpga-mgr.c | 8 | 
18 files changed, 1631 insertions, 190 deletions
| diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 474f304ec109..73c779e920ed 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -40,16 +40,17 @@ config ALTERA_PR_IP_CORE_PLAT  config FPGA_MGR_ALTERA_PS_SPI  	tristate "Altera FPGA Passive Serial over SPI"  	depends on SPI +	select BITREVERSE  	help  	  FPGA manager driver support for Altera Arria/Cyclone/Stratix  	  using the passive serial interface over SPI.  config FPGA_MGR_ALTERA_CVP -	tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager" +	tristate "Altera CvP FPGA Manager"  	depends on PCI  	help -	  FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V -	  and Arria 10 Altera FPGAs using the CvP interface over PCIe. +	  FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V, +	  Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe.  config FPGA_MGR_ZYNQ_FPGA  	tristate "Xilinx Zynq FPGA" diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 312b9371742f..4865b74b00a4 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -39,8 +39,9 @@ obj-$(CONFIG_FPGA_DFL_FME_BRIDGE)	+= dfl-fme-br.o  obj-$(CONFIG_FPGA_DFL_FME_REGION)	+= dfl-fme-region.o  obj-$(CONFIG_FPGA_DFL_AFU)		+= dfl-afu.o -dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o +dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o  dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o +dfl-afu-objs += dfl-afu-error.o  # Drivers for FPGAs which implement DFL  obj-$(CONFIG_FPGA_DFL_PCI)		+= dfl-pci.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 770915fb97f9..4e0edb60bfba 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -22,10 +22,10 @@  #define TIMEOUT_US	2000	/* CVP STATUS timeout for USERMODE polling */  /* Vendor Specific Extended Capability Registers */ -#define VSE_PCIE_EXT_CAP_ID		0x200 +#define VSE_PCIE_EXT_CAP_ID		0x0  #define VSE_PCIE_EXT_CAP_ID_VAL		0x000b	/* 16bit */ -#define VSE_CVP_STATUS			0x21c	/* 32bit */ +#define VSE_CVP_STATUS			0x1c	/* 32bit */  #define VSE_CVP_STATUS_CFG_RDY		BIT(18)	/* CVP_CONFIG_READY */  #define VSE_CVP_STATUS_CFG_ERR		BIT(19)	/* CVP_CONFIG_ERROR */  #define VSE_CVP_STATUS_CVP_EN		BIT(20)	/* ctrl block is enabling CVP */ @@ -33,41 +33,93 @@  #define VSE_CVP_STATUS_CFG_DONE		BIT(23)	/* CVP_CONFIG_DONE */  #define VSE_CVP_STATUS_PLD_CLK_IN_USE	BIT(24)	/* PLD_CLK_IN_USE */ -#define VSE_CVP_MODE_CTRL		0x220	/* 32bit */ +#define VSE_CVP_MODE_CTRL		0x20	/* 32bit */  #define VSE_CVP_MODE_CTRL_CVP_MODE	BIT(0)	/* CVP (1) or normal mode (0) */  #define VSE_CVP_MODE_CTRL_HIP_CLK_SEL	BIT(1) /* PMA (1) or fabric clock (0) */  #define VSE_CVP_MODE_CTRL_NUMCLKS_OFF	8	/* NUMCLKS bits offset */  #define VSE_CVP_MODE_CTRL_NUMCLKS_MASK	GENMASK(15, 8) -#define VSE_CVP_DATA			0x228	/* 32bit */ -#define VSE_CVP_PROG_CTRL		0x22c	/* 32bit */ +#define VSE_CVP_DATA			0x28	/* 32bit */ +#define VSE_CVP_PROG_CTRL		0x2c	/* 32bit */  #define VSE_CVP_PROG_CTRL_CONFIG	BIT(0)  #define VSE_CVP_PROG_CTRL_START_XFER	BIT(1) +#define VSE_CVP_PROG_CTRL_MASK		GENMASK(1, 0) -#define VSE_UNCOR_ERR_STATUS		0x234	/* 32bit */ +#define VSE_UNCOR_ERR_STATUS		0x34	/* 32bit */  #define VSE_UNCOR_ERR_CVP_CFG_ERR	BIT(5)	/* CVP_CONFIG_ERROR_LATCHED */ +#define V1_VSEC_OFFSET			0x200	/* Vendor Specific Offset V1 */ +/* V2 Defines */ +#define VSE_CVP_TX_CREDITS		0x49	/* 8bit */ + +#define V2_CREDIT_TIMEOUT_US		20000 +#define V2_CHECK_CREDIT_US		10 +#define V2_POLL_TIMEOUT_US		1000000 +#define V2_USER_TIMEOUT_US		500000 + +#define V1_POLL_TIMEOUT_US		10 +  #define DRV_NAME		"altera-cvp"  #define ALTERA_CVP_MGR_NAME	"Altera CvP FPGA Manager" +/* Write block sizes */ +#define ALTERA_CVP_V1_SIZE	4 +#define ALTERA_CVP_V2_SIZE	4096 +  /* Optional CvP config error status check for debugging */  static bool altera_cvp_chkcfg; +struct cvp_priv; +  struct altera_cvp_conf {  	struct fpga_manager	*mgr;  	struct pci_dev		*pci_dev;  	void __iomem		*map; -	void			(*write_data)(struct altera_cvp_conf *, u32); +	void			(*write_data)(struct altera_cvp_conf *conf, +					      u32 data);  	char			mgr_name[64];  	u8			numclks; +	u32			sent_packets; +	u32			vsec_offset; +	const struct cvp_priv	*priv; +}; + +struct cvp_priv { +	void	(*switch_clk)(struct altera_cvp_conf *conf); +	int	(*clear_state)(struct altera_cvp_conf *conf); +	int	(*wait_credit)(struct fpga_manager *mgr, u32 blocks); +	size_t	block_size; +	int	poll_time_us; +	int	user_time_us;  }; +static int altera_read_config_byte(struct altera_cvp_conf *conf, +				   int where, u8 *val) +{ +	return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where, +				    val); +} + +static int altera_read_config_dword(struct altera_cvp_conf *conf, +				    int where, u32 *val) +{ +	return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where, +				     val); +} + +static int altera_write_config_dword(struct altera_cvp_conf *conf, +				     int where, u32 val) +{ +	return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where, +				      val); +} +  static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)  {  	struct altera_cvp_conf *conf = mgr->priv;  	u32 status; -	pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status); +	altera_read_config_dword(conf, VSE_CVP_STATUS, &status);  	if (status & VSE_CVP_STATUS_CFG_DONE)  		return FPGA_MGR_STATE_OPERATING; @@ -85,7 +137,8 @@ static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)  static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)  { -	pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val); +	pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA, +			       val);  }  /* switches between CvP clock and internal clock */ @@ -95,10 +148,10 @@ static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)  	u32 val;  	/* set 1 CVP clock cycle for every CVP Data Register Write */ -	pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);  	val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;  	val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; -	pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);  	for (i = 0; i < CVP_DUMMY_WR; i++)  		conf->write_data(conf, 0); /* dummy data, could be any value */ @@ -115,7 +168,7 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,  		retries++;  	do { -		pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val); +		altera_read_config_dword(conf, VSE_CVP_STATUS, &val);  		if ((val & status_mask) == status_val)  			return 0; @@ -126,32 +179,136 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,  	return -ETIMEDOUT;  } +static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes) +{ +	struct altera_cvp_conf *conf = mgr->priv; +	u32 val; +	int ret; + +	/* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */ +	ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val); +	if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) { +		dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n", +			bytes); +		return -EPROTO; +	} +	return 0; +} + +/* + * CvP Version2 Functions + * Recent Intel FPGAs use a credit mechanism to throttle incoming + * bitstreams and a different method of clearing the state. + */ + +static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf) +{ +	u32 val; +	int ret; + +	/* Clear the START_XFER and CVP_CONFIG bits */ +	ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val); +	if (ret) { +		dev_err(&conf->pci_dev->dev, +			"Error reading CVP Program Control Register\n"); +		return ret; +	} + +	val &= ~VSE_CVP_PROG_CTRL_MASK; +	ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); +	if (ret) { +		dev_err(&conf->pci_dev->dev, +			"Error writing CVP Program Control Register\n"); +		return ret; +	} + +	return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, +				      conf->priv->poll_time_us); +} + +static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr, +					 u32 blocks) +{ +	u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US; +	struct altera_cvp_conf *conf = mgr->priv; +	int ret; +	u8 val; + +	do { +		ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val); +		if (ret) { +			dev_err(&conf->pci_dev->dev, +				"Error reading CVP Credit Register\n"); +			return ret; +		} + +		/* Return if there is space in FIFO */ +		if (val - (u8)conf->sent_packets) +			return 0; + +		ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE); +		if (ret) { +			dev_err(&conf->pci_dev->dev, +				"CE Bit error credit reg[0x%x]:sent[0x%x]\n", +				val, conf->sent_packets); +			return -EAGAIN; +		} + +		/* Limit the check credit byte traffic */ +		usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1); +	} while (timeout--); + +	dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n"); +	return -ETIMEDOUT; +} + +static int altera_cvp_send_block(struct altera_cvp_conf *conf, +				 const u32 *data, size_t len) +{ +	u32 mask, words = len / sizeof(u32); +	int i, remainder; + +	for (i = 0; i < words; i++) +		conf->write_data(conf, *data++); + +	/* write up to 3 trailing bytes, if any */ +	remainder = len % sizeof(u32); +	if (remainder) { +		mask = BIT(remainder * 8) - 1; +		if (mask) +			conf->write_data(conf, *data & mask); +	} + +	return 0; +} +  static int altera_cvp_teardown(struct fpga_manager *mgr,  			       struct fpga_image_info *info)  {  	struct altera_cvp_conf *conf = mgr->priv; -	struct pci_dev *pdev = conf->pci_dev;  	int ret;  	u32 val;  	/* STEP 12 - reset START_XFER bit */ -	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);  	val &= ~VSE_CVP_PROG_CTRL_START_XFER; -	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);  	/* STEP 13 - reset CVP_CONFIG bit */  	val &= ~VSE_CVP_PROG_CTRL_CONFIG; -	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);  	/*  	 * STEP 14  	 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy  	 *   writes to the HIP  	 */ -	altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */ +	if (conf->priv->switch_clk) +		conf->priv->switch_clk(conf);  	/* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */ -	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10); +	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, +				     conf->priv->poll_time_us);  	if (ret)  		dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n"); @@ -163,7 +320,6 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,  				 const char *buf, size_t count)  {  	struct altera_cvp_conf *conf = mgr->priv; -	struct pci_dev *pdev = conf->pci_dev;  	u32 iflags, val;  	int ret; @@ -183,7 +339,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,  		conf->numclks = 1; /* for uncompressed and unencrypted images */  	/* STEP 1 - read CVP status and check CVP_EN flag */ -	pci_read_config_dword(pdev, VSE_CVP_STATUS, &val); +	altera_read_config_dword(conf, VSE_CVP_STATUS, &val);  	if (!(val & VSE_CVP_STATUS_CVP_EN)) {  		dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);  		return -ENODEV; @@ -201,30 +357,42 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,  	 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)  	 */  	/* switch from fabric to PMA clock */ -	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);  	val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL; -	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);  	/* set CVP mode */ -	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);  	val |= VSE_CVP_MODE_CTRL_CVP_MODE; -	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);  	/*  	 * STEP 3  	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP  	 */ -	altera_cvp_dummy_write(conf); +	if (conf->priv->switch_clk) +		conf->priv->switch_clk(conf); + +	if (conf->priv->clear_state) { +		ret = conf->priv->clear_state(conf); +		if (ret) { +			dev_err(&mgr->dev, "Problem clearing out state\n"); +			return ret; +		} +	} + +	conf->sent_packets = 0;  	/* STEP 4 - set CVP_CONFIG bit */ -	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);  	/* request control block to begin transfer using CVP */  	val |= VSE_CVP_PROG_CTRL_CONFIG; -	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val); -	/* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */ +	/* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */  	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, -				     VSE_CVP_STATUS_CFG_RDY, 10); +				     VSE_CVP_STATUS_CFG_RDY, +				     conf->priv->poll_time_us);  	if (ret) {  		dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");  		return ret; @@ -234,33 +402,28 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,  	 * STEP 6  	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP  	 */ -	altera_cvp_dummy_write(conf); +	if (conf->priv->switch_clk) +		conf->priv->switch_clk(conf); + +	if (altera_cvp_chkcfg) { +		ret = altera_cvp_chk_error(mgr, 0); +		if (ret) { +			dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n"); +			return ret; +		} +	}  	/* STEP 7 - set START_XFER */ -	pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);  	val |= VSE_CVP_PROG_CTRL_START_XFER; -	pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);  	/* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */ -	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); -	val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; -	val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; -	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); - -	return 0; -} - -static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes) -{ -	struct altera_cvp_conf *conf = mgr->priv; -	u32 val; - -	/* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */ -	pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val); -	if (val & VSE_CVP_STATUS_CFG_ERR) { -		dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n", -			bytes); -		return -EPROTO; +	if (conf->priv->switch_clk) { +		altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val); +		val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; +		val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; +		altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);  	}  	return 0;  } @@ -269,20 +432,32 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,  			    size_t count)  {  	struct altera_cvp_conf *conf = mgr->priv; +	size_t done, remaining, len;  	const u32 *data; -	size_t done, remaining;  	int status = 0; -	u32 mask;  	/* STEP 9 - write 32-bit data from RBF file to CVP data register */  	data = (u32 *)buf;  	remaining = count;  	done = 0; -	while (remaining >= 4) { -		conf->write_data(conf, *data++); -		done += 4; -		remaining -= 4; +	while (remaining) { +		/* Use credit throttling if available */ +		if (conf->priv->wait_credit) { +			status = conf->priv->wait_credit(mgr, done); +			if (status) { +				dev_err(&conf->pci_dev->dev, +					"Wait Credit ERR: 0x%x\n", status); +				return status; +			} +		} + +		len = min(conf->priv->block_size, remaining); +		altera_cvp_send_block(conf, data, len); +		data += len / sizeof(u32); +		done += len; +		remaining -= len; +		conf->sent_packets++;  		/*  		 * STEP 10 (optional) and STEP 11 @@ -300,11 +475,6 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,  		}  	} -	/* write up to 3 trailing bytes, if any */ -	mask = BIT(remaining * 8) - 1; -	if (mask) -		conf->write_data(conf, *data & mask); -  	if (altera_cvp_chkcfg)  		status = altera_cvp_chk_error(mgr, count); @@ -315,31 +485,30 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr,  				     struct fpga_image_info *info)  {  	struct altera_cvp_conf *conf = mgr->priv; -	struct pci_dev *pdev = conf->pci_dev; +	u32 mask, val;  	int ret; -	u32 mask; -	u32 val;  	ret = altera_cvp_teardown(mgr, info);  	if (ret)  		return ret;  	/* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */ -	pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val); +	altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);  	if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {  		dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");  		return -EPROTO;  	}  	/* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */ -	pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); +	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);  	val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;  	val &= ~VSE_CVP_MODE_CTRL_CVP_MODE; -	pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); +	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);  	/* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */  	mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE; -	ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US); +	ret = altera_cvp_wait_status(conf, mask, mask, +				     conf->priv->user_time_us);  	if (ret)  		dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n"); @@ -353,6 +522,21 @@ static const struct fpga_manager_ops altera_cvp_ops = {  	.write_complete	= altera_cvp_write_complete,  }; +static const struct cvp_priv cvp_priv_v1 = { +	.switch_clk	= altera_cvp_dummy_write, +	.block_size	= ALTERA_CVP_V1_SIZE, +	.poll_time_us	= V1_POLL_TIMEOUT_US, +	.user_time_us	= TIMEOUT_US, +}; + +static const struct cvp_priv cvp_priv_v2 = { +	.clear_state	= altera_cvp_v2_clear_state, +	.wait_credit	= altera_cvp_v2_wait_for_credit, +	.block_size	= ALTERA_CVP_V2_SIZE, +	.poll_time_us	= V2_POLL_TIMEOUT_US, +	.user_time_us	= V2_USER_TIMEOUT_US, +}; +  static ssize_t chkcfg_show(struct device_driver *dev, char *buf)  {  	return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); @@ -394,22 +578,29 @@ static int altera_cvp_probe(struct pci_dev *pdev,  {  	struct altera_cvp_conf *conf;  	struct fpga_manager *mgr; +	int ret, offset;  	u16 cmd, val;  	u32 regval; -	int ret; + +	/* Discover the Vendor Specific Offset for this device */ +	offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR); +	if (!offset) { +		dev_err(&pdev->dev, "No Vendor Specific Offset.\n"); +		return -ENODEV; +	}  	/*  	 * First check if this is the expected FPGA device. PCI config  	 * space access works without enabling the PCI device, memory  	 * space access is enabled further down.  	 */ -	pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val); +	pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);  	if (val != VSE_PCIE_EXT_CAP_ID_VAL) {  		dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);  		return -ENODEV;  	} -	pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val); +	pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, ®val);  	if (!(regval & VSE_CVP_STATUS_CVP_EN)) {  		dev_err(&pdev->dev,  			"CVP is disabled for this device: CVP_STATUS Reg 0x%x\n", @@ -421,6 +612,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,  	if (!conf)  		return -ENOMEM; +	conf->vsec_offset = offset; +  	/*  	 * Enable memory BAR access. We cannot use pci_enable_device() here  	 * because it will make the driver unusable with FPGA devices that @@ -445,6 +638,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,  	conf->pci_dev = pdev;  	conf->write_data = altera_cvp_write_data_iomem; +	if (conf->vsec_offset == V1_VSEC_OFFSET) +		conf->priv = &cvp_priv_v1; +	else +		conf->priv = &cvp_priv_v2; +  	conf->map = pci_iomap(pdev, CVP_BAR, 0);  	if (!conf->map) {  		dev_warn(&pdev->dev, "Mapping CVP BAR failed\n"); diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c index b293d83143f1..99b9cc0e70f0 100644 --- a/drivers/fpga/altera-pr-ip-core-plat.c +++ b/drivers/fpga/altera-pr-ip-core-plat.c @@ -32,7 +32,9 @@ static int alt_pr_platform_remove(struct platform_device *pdev)  {  	struct device *dev = &pdev->dev; -	return alt_pr_unregister(dev); +	alt_pr_unregister(dev); + +	return 0;  }  static const struct of_device_id alt_pr_of_match[] = { diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index a7a3bf0b5202..2cf25fd5e897 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -201,15 +201,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)  }  EXPORT_SYMBOL_GPL(alt_pr_register); -int alt_pr_unregister(struct device *dev) +void alt_pr_unregister(struct device *dev)  {  	struct fpga_manager *mgr = dev_get_drvdata(dev);  	dev_dbg(dev, "%s\n", __func__);  	fpga_mgr_unregister(mgr); - -	return 0;  }  EXPORT_SYMBOL_GPL(alt_pr_unregister); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index a13f224303c6..0221dee8dd4c 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,  		return -EIO;  	} -	if (!IS_ERR(conf->confd)) { +	if (conf->confd) {  		if (!gpiod_get_raw_value_cansleep(conf->confd)) {  			dev_err(&mgr->dev, "CONF_DONE is inactive!\n");  			return -EIO; @@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)  		return PTR_ERR(conf->status);  	} -	conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); +	conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);  	if (IS_ERR(conf->confd)) { -		dev_warn(&spi->dev, "Not using confd gpio: %ld\n", -			 PTR_ERR(conf->confd)); +		dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", +			PTR_ERR(conf->confd)); +		return PTR_ERR(conf->confd); +	} else if (!conf->confd) { +		dev_warn(&spi->dev, "Not using confd gpio");  	}  	/* Register manager with unique name */ diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c new file mode 100644 index 000000000000..c1467ae1a6b6 --- /dev/null +++ b/drivers/fpga/dfl-afu-error.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting + * + * Copyright 2019 Intel Corporation, Inc. + * + * Authors: + *   Wu Hao <[email protected]> + *   Xiao Guangrong <[email protected]> + *   Joseph Grecco <[email protected]> + *   Enno Luebbers <[email protected]> + *   Tim Whisonant <[email protected]> + *   Ananda Ravuri <[email protected]> + *   Mitchel Henry <[email protected]> + */ + +#include <linux/uaccess.h> + +#include "dfl-afu.h" + +#define PORT_ERROR_MASK		0x8 +#define PORT_ERROR		0x10 +#define PORT_FIRST_ERROR	0x18 +#define PORT_MALFORMED_REQ0	0x20 +#define PORT_MALFORMED_REQ1	0x28 + +#define ERROR_MASK		GENMASK_ULL(63, 0) + +/* mask or unmask port errors by the error mask register. */ +static void __afu_port_err_mask(struct device *dev, bool mask) +{ +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + +	writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK); +} + +static void afu_port_err_mask(struct device *dev, bool mask) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); + +	mutex_lock(&pdata->lock); +	__afu_port_err_mask(dev, mask); +	mutex_unlock(&pdata->lock); +} + +/* clear port errors. */ +static int afu_port_err_clear(struct device *dev, u64 err) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	struct platform_device *pdev = to_platform_device(dev); +	void __iomem *base_err, *base_hdr; +	int ret = -EBUSY; +	u64 v; + +	base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); +	base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); + +	/* +	 * clear Port Errors +	 * +	 * - Check for AP6 State +	 * - Halt Port by keeping Port in reset +	 * - Set PORT Error mask to all 1 to mask errors +	 * - Clear all errors +	 * - Set Port mask to all 0 to enable errors +	 * - All errors start capturing new errors +	 * - Enable Port by pulling the port out of reset +	 */ + +	/* if device is still in AP6 power state, can not clear any error. */ +	v = readq(base_hdr + PORT_HDR_STS); +	if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) { +		dev_err(dev, "Could not clear errors, device in AP6 state.\n"); +		goto done; +	} + +	/* Halt Port by keeping Port in reset */ +	ret = __afu_port_disable(pdev); +	if (ret) +		goto done; + +	/* Mask all errors */ +	__afu_port_err_mask(dev, true); + +	/* Clear errors if err input matches with current port errors.*/ +	v = readq(base_err + PORT_ERROR); + +	if (v == err) { +		writeq(v, base_err + PORT_ERROR); + +		v = readq(base_err + PORT_FIRST_ERROR); +		writeq(v, base_err + PORT_FIRST_ERROR); +	} else { +		ret = -EINVAL; +	} + +	/* Clear mask */ +	__afu_port_err_mask(dev, false); + +	/* Enable the Port by clear the reset */ +	__afu_port_enable(pdev); + +done: +	mutex_unlock(&pdata->lock); +	return ret; +} + +static ssize_t errors_show(struct device *dev, struct device_attribute *attr, +			   char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 error; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + +	mutex_lock(&pdata->lock); +	error = readq(base + PORT_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)error); +} + +static ssize_t errors_store(struct device *dev, struct device_attribute *attr, +			    const char *buff, size_t count) +{ +	u64 value; +	int ret; + +	if (kstrtou64(buff, 0, &value)) +		return -EINVAL; + +	ret = afu_port_err_clear(dev, value); + +	return ret ? ret : count; +} +static DEVICE_ATTR_RW(errors); + +static ssize_t first_error_show(struct device *dev, +				struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 error; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + +	mutex_lock(&pdata->lock); +	error = readq(base + PORT_FIRST_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)error); +} +static DEVICE_ATTR_RO(first_error); + +static ssize_t first_malformed_req_show(struct device *dev, +					struct device_attribute *attr, +					char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 req0, req1; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR); + +	mutex_lock(&pdata->lock); +	req0 = readq(base + PORT_MALFORMED_REQ0); +	req1 = readq(base + PORT_MALFORMED_REQ1); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%016llx%016llx\n", +		       (unsigned long long)req1, (unsigned long long)req0); +} +static DEVICE_ATTR_RO(first_malformed_req); + +static struct attribute *port_err_attrs[] = { +	&dev_attr_errors.attr, +	&dev_attr_first_error.attr, +	&dev_attr_first_malformed_req.attr, +	NULL, +}; + +static umode_t port_err_attrs_visible(struct kobject *kobj, +				      struct attribute *attr, int n) +{ +	struct device *dev = kobj_to_dev(kobj); + +	/* +	 * sysfs entries are visible only if related private feature is +	 * enumerated. +	 */ +	if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR)) +		return 0; + +	return attr->mode; +} + +const struct attribute_group port_err_group = { +	.name       = "errors", +	.attrs      = port_err_attrs, +	.is_visible = port_err_attrs_visible, +}; + +static int port_err_init(struct platform_device *pdev, +			 struct dfl_feature *feature) +{ +	afu_port_err_mask(&pdev->dev, false); + +	return 0; +} + +static void port_err_uinit(struct platform_device *pdev, +			   struct dfl_feature *feature) +{ +	afu_port_err_mask(&pdev->dev, true); +} + +const struct dfl_feature_id port_err_id_table[] = { +	{.id = PORT_FEATURE_ID_ERROR,}, +	{0,} +}; + +const struct dfl_feature_ops port_err_ops = { +	.init = port_err_init, +	.uinit = port_err_uinit, +}; diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 02baa6a227c0..e4a34dc7947f 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -22,14 +22,17 @@  #include "dfl-afu.h"  /** - * port_enable - enable a port + * __afu_port_enable - enable a port by clear reset   * @pdev: port platform device.   *   * Enable Port by clear the port soft reset bit, which is set by default.   * The AFU is unable to respond to any MMIO access while in reset. - * port_enable function should only be used after port_disable function. + * __afu_port_enable function should only be used after __afu_port_disable + * function. + * + * The caller needs to hold lock for protection.   */ -static void port_enable(struct platform_device *pdev) +void __afu_port_enable(struct platform_device *pdev)  {  	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);  	void __iomem *base; @@ -52,13 +55,14 @@ static void port_enable(struct platform_device *pdev)  #define RST_POLL_TIMEOUT 1000 /* us */  /** - * port_disable - disable a port + * __afu_port_disable - disable a port by hold reset   * @pdev: port platform device.   * - * Disable Port by setting the port soft reset bit, it puts the port into - * reset. + * Disable Port by setting the port soft reset bit, it puts the port into reset. + * + * The caller needs to hold lock for protection.   */ -static int port_disable(struct platform_device *pdev) +int __afu_port_disable(struct platform_device *pdev)  {  	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);  	void __iomem *base; @@ -104,9 +108,9 @@ static int __port_reset(struct platform_device *pdev)  {  	int ret; -	ret = port_disable(pdev); +	ret = __afu_port_disable(pdev);  	if (!ret) -		port_enable(pdev); +		__afu_port_enable(pdev);  	return ret;  } @@ -141,27 +145,267 @@ id_show(struct device *dev, struct device_attribute *attr, char *buf)  }  static DEVICE_ATTR_RO(id); -static const struct attribute *port_hdr_attrs[] = { +static ssize_t +ltr_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	v = readq(base + PORT_HDR_CTRL); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v)); +} + +static ssize_t +ltr_store(struct device *dev, struct device_attribute *attr, +	  const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	bool ltr; +	u64 v; + +	if (kstrtobool(buf, <r)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	v = readq(base + PORT_HDR_CTRL); +	v &= ~PORT_CTRL_LATENCY; +	v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0); +	writeq(v, base + PORT_HDR_CTRL); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_RW(ltr); + +static ssize_t +ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	v = readq(base + PORT_HDR_STS); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v)); +} + +static ssize_t +ap1_event_store(struct device *dev, struct device_attribute *attr, +		const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	bool clear; + +	if (kstrtobool(buf, &clear) || !clear) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_RW(ap1_event); + +static ssize_t +ap2_event_show(struct device *dev, struct device_attribute *attr, +	       char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	v = readq(base + PORT_HDR_STS); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v)); +} + +static ssize_t +ap2_event_store(struct device *dev, struct device_attribute *attr, +		const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	bool clear; + +	if (kstrtobool(buf, &clear) || !clear) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_RW(ap2_event); + +static ssize_t +power_state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	v = readq(base + PORT_HDR_STS); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v)); +} +static DEVICE_ATTR_RO(power_state); + +static ssize_t +userclk_freqcmd_store(struct device *dev, struct device_attribute *attr, +		      const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	u64 userclk_freq_cmd; +	void __iomem *base; + +	if (kstrtou64(buf, 0, &userclk_freq_cmd)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_WO(userclk_freqcmd); + +static ssize_t +userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr, +			  const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	u64 userclk_freqcntr_cmd; +	void __iomem *base; + +	if (kstrtou64(buf, 0, &userclk_freqcntr_cmd)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_WO(userclk_freqcntrcmd); + +static ssize_t +userclk_freqsts_show(struct device *dev, struct device_attribute *attr, +		     char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	u64 userclk_freqsts; +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts); +} +static DEVICE_ATTR_RO(userclk_freqsts); + +static ssize_t +userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr, +			 char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	u64 userclk_freqcntrsts; +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	mutex_lock(&pdata->lock); +	userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", +		       (unsigned long long)userclk_freqcntrsts); +} +static DEVICE_ATTR_RO(userclk_freqcntrsts); + +static struct attribute *port_hdr_attrs[] = {  	&dev_attr_id.attr, +	&dev_attr_ltr.attr, +	&dev_attr_ap1_event.attr, +	&dev_attr_ap2_event.attr, +	&dev_attr_power_state.attr, +	&dev_attr_userclk_freqcmd.attr, +	&dev_attr_userclk_freqcntrcmd.attr, +	&dev_attr_userclk_freqsts.attr, +	&dev_attr_userclk_freqcntrsts.attr,  	NULL,  }; -static int port_hdr_init(struct platform_device *pdev, -			 struct dfl_feature *feature) +static umode_t port_hdr_attrs_visible(struct kobject *kobj, +				      struct attribute *attr, int n)  { -	dev_dbg(&pdev->dev, "PORT HDR Init.\n"); +	struct device *dev = kobj_to_dev(kobj); +	umode_t mode = attr->mode; +	void __iomem *base; -	port_reset(pdev); +	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER); + +	if (dfl_feature_revision(base) > 0) { +		/* +		 * userclk sysfs interfaces are only visible in case port +		 * revision is 0, as hardware with revision >0 doesn't +		 * support this. +		 */ +		if (attr == &dev_attr_userclk_freqcmd.attr || +		    attr == &dev_attr_userclk_freqcntrcmd.attr || +		    attr == &dev_attr_userclk_freqsts.attr || +		    attr == &dev_attr_userclk_freqcntrsts.attr) +			mode = 0; +	} -	return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs); +	return mode;  } -static void port_hdr_uinit(struct platform_device *pdev, -			   struct dfl_feature *feature) +static const struct attribute_group port_hdr_group = { +	.attrs      = port_hdr_attrs, +	.is_visible = port_hdr_attrs_visible, +}; + +static int port_hdr_init(struct platform_device *pdev, +			 struct dfl_feature *feature)  { -	dev_dbg(&pdev->dev, "PORT HDR UInit.\n"); +	port_reset(pdev); -	sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs); +	return 0;  }  static long @@ -185,9 +429,13 @@ port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,  	return ret;  } +static const struct dfl_feature_id port_hdr_id_table[] = { +	{.id = PORT_FEATURE_ID_HEADER,}, +	{0,} +}; +  static const struct dfl_feature_ops port_hdr_ops = {  	.init = port_hdr_init, -	.uinit = port_hdr_uinit,  	.ioctl = port_hdr_ioctl,  }; @@ -214,52 +462,91 @@ afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)  }  static DEVICE_ATTR_RO(afu_id); -static const struct attribute *port_afu_attrs[] = { +static struct attribute *port_afu_attrs[] = {  	&dev_attr_afu_id.attr,  	NULL  }; +static umode_t port_afu_attrs_visible(struct kobject *kobj, +				      struct attribute *attr, int n) +{ +	struct device *dev = kobj_to_dev(kobj); + +	/* +	 * sysfs entries are visible only if related private feature is +	 * enumerated. +	 */ +	if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU)) +		return 0; + +	return attr->mode; +} + +static const struct attribute_group port_afu_group = { +	.attrs      = port_afu_attrs, +	.is_visible = port_afu_attrs_visible, +}; +  static int port_afu_init(struct platform_device *pdev,  			 struct dfl_feature *feature)  {  	struct resource *res = &pdev->resource[feature->resource_index]; -	int ret; -	dev_dbg(&pdev->dev, "PORT AFU Init.\n"); +	return afu_mmio_region_add(dev_get_platdata(&pdev->dev), +				   DFL_PORT_REGION_INDEX_AFU, +				   resource_size(res), res->start, +				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | +				   DFL_PORT_REGION_WRITE); +} -	ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev), -				  DFL_PORT_REGION_INDEX_AFU, resource_size(res), -				  res->start, DFL_PORT_REGION_READ | -				  DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP); -	if (ret) -		return ret; +static const struct dfl_feature_id port_afu_id_table[] = { +	{.id = PORT_FEATURE_ID_AFU,}, +	{0,} +}; -	return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs); -} +static const struct dfl_feature_ops port_afu_ops = { +	.init = port_afu_init, +}; -static void port_afu_uinit(struct platform_device *pdev, -			   struct dfl_feature *feature) +static int port_stp_init(struct platform_device *pdev, +			 struct dfl_feature *feature)  { -	dev_dbg(&pdev->dev, "PORT AFU UInit.\n"); +	struct resource *res = &pdev->resource[feature->resource_index]; -	sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs); +	return afu_mmio_region_add(dev_get_platdata(&pdev->dev), +				   DFL_PORT_REGION_INDEX_STP, +				   resource_size(res), res->start, +				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ | +				   DFL_PORT_REGION_WRITE);  } -static const struct dfl_feature_ops port_afu_ops = { -	.init = port_afu_init, -	.uinit = port_afu_uinit, +static const struct dfl_feature_id port_stp_id_table[] = { +	{.id = PORT_FEATURE_ID_STP,}, +	{0,} +}; + +static const struct dfl_feature_ops port_stp_ops = { +	.init = port_stp_init,  };  static struct dfl_feature_driver port_feature_drvs[] = {  	{ -		.id = PORT_FEATURE_ID_HEADER, +		.id_table = port_hdr_id_table,  		.ops = &port_hdr_ops,  	},  	{ -		.id = PORT_FEATURE_ID_AFU, +		.id_table = port_afu_id_table,  		.ops = &port_afu_ops,  	},  	{ +		.id_table = port_err_id_table, +		.ops = &port_err_ops, +	}, +	{ +		.id_table = port_stp_id_table, +		.ops = &port_stp_ops, +	}, +	{  		.ops = NULL,  	}  }; @@ -545,9 +832,9 @@ static int port_enable_set(struct platform_device *pdev, bool enable)  	mutex_lock(&pdata->lock);  	if (enable) -		port_enable(pdev); +		__afu_port_enable(pdev);  	else -		ret = port_disable(pdev); +		ret = __afu_port_disable(pdev);  	mutex_unlock(&pdata->lock);  	return ret; @@ -599,9 +886,17 @@ static int afu_remove(struct platform_device *pdev)  	return 0;  } +static const struct attribute_group *afu_dev_groups[] = { +	&port_hdr_group, +	&port_afu_group, +	&port_err_group, +	NULL +}; +  static struct platform_driver afu_driver = {  	.driver	= { -		.name    = DFL_FPGA_FEATURE_DEV_PORT, +		.name	    = DFL_FPGA_FEATURE_DEV_PORT, +		.dev_groups = afu_dev_groups,  	},  	.probe   = afu_probe,  	.remove  = afu_remove, diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h index 0c7630ae3cda..576e94960086 100644 --- a/drivers/fpga/dfl-afu.h +++ b/drivers/fpga/dfl-afu.h @@ -79,6 +79,10 @@ struct dfl_afu {  	struct dfl_feature_platform_data *pdata;  }; +/* hold pdata->lock when call __afu_port_enable/disable */ +void __afu_port_enable(struct platform_device *pdev); +int __afu_port_disable(struct platform_device *pdev); +  void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);  int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,  			u32 region_index, u64 region_size, u64 phys, u32 flags); @@ -97,4 +101,9 @@ int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);  struct dfl_afu_dma_region *  afu_dma_region_find(struct dfl_feature_platform_data *pdata,  		    u64 iova, u64 size); + +extern const struct dfl_feature_ops port_err_ops; +extern const struct dfl_feature_id port_err_id_table[]; +extern const struct attribute_group port_err_group; +  #endif /* __DFL_AFU_H */ diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c new file mode 100644 index 000000000000..f897d414b923 --- /dev/null +++ b/drivers/fpga/dfl-fme-error.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for FPGA Management Engine Error Management + * + * Copyright 2019 Intel Corporation, Inc. + * + * Authors: + *   Kang Luwei <[email protected]> + *   Xiao Guangrong <[email protected]> + *   Wu Hao <[email protected]> + *   Joseph Grecco <[email protected]> + *   Enno Luebbers <[email protected]> + *   Tim Whisonant <[email protected]> + *   Ananda Ravuri <[email protected]> + *   Mitchel, Henry <[email protected]> + */ + +#include <linux/uaccess.h> + +#include "dfl.h" +#include "dfl-fme.h" + +#define FME_ERROR_MASK		0x8 +#define FME_ERROR		0x10 +#define MBP_ERROR		BIT_ULL(6) +#define PCIE0_ERROR_MASK	0x18 +#define PCIE0_ERROR		0x20 +#define PCIE1_ERROR_MASK	0x28 +#define PCIE1_ERROR		0x30 +#define FME_FIRST_ERROR		0x38 +#define FME_NEXT_ERROR		0x40 +#define RAS_NONFAT_ERROR_MASK	0x48 +#define RAS_NONFAT_ERROR	0x50 +#define RAS_CATFAT_ERROR_MASK	0x58 +#define RAS_CATFAT_ERROR	0x60 +#define RAS_ERROR_INJECT	0x68 +#define INJECT_ERROR_MASK	GENMASK_ULL(2, 0) + +#define ERROR_MASK		GENMASK_ULL(63, 0) + +static ssize_t pcie0_errors_show(struct device *dev, +				 struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 value; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	value = readq(base + PCIE0_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t pcie0_errors_store(struct device *dev, +				  struct device_attribute *attr, +				  const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	int ret = 0; +	u64 v, val; + +	if (kstrtou64(buf, 0, &val)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK); + +	v = readq(base + PCIE0_ERROR); +	if (val == v) +		writeq(v, base + PCIE0_ERROR); +	else +		ret = -EINVAL; + +	writeq(0ULL, base + PCIE0_ERROR_MASK); +	mutex_unlock(&pdata->lock); +	return ret ? ret : count; +} +static DEVICE_ATTR_RW(pcie0_errors); + +static ssize_t pcie1_errors_show(struct device *dev, +				 struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 value; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	value = readq(base + PCIE1_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t pcie1_errors_store(struct device *dev, +				  struct device_attribute *attr, +				  const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	int ret = 0; +	u64 v, val; + +	if (kstrtou64(buf, 0, &val)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK); + +	v = readq(base + PCIE1_ERROR); +	if (val == v) +		writeq(v, base + PCIE1_ERROR); +	else +		ret = -EINVAL; + +	writeq(0ULL, base + PCIE1_ERROR_MASK); +	mutex_unlock(&pdata->lock); +	return ret ? ret : count; +} +static DEVICE_ATTR_RW(pcie1_errors); + +static ssize_t nonfatal_errors_show(struct device *dev, +				    struct device_attribute *attr, char *buf) +{ +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	return sprintf(buf, "0x%llx\n", +		       (unsigned long long)readq(base + RAS_NONFAT_ERROR)); +} +static DEVICE_ATTR_RO(nonfatal_errors); + +static ssize_t catfatal_errors_show(struct device *dev, +				    struct device_attribute *attr, char *buf) +{ +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	return sprintf(buf, "0x%llx\n", +		       (unsigned long long)readq(base + RAS_CATFAT_ERROR)); +} +static DEVICE_ATTR_RO(catfatal_errors); + +static ssize_t inject_errors_show(struct device *dev, +				  struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	v = readq(base + RAS_ERROR_INJECT); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", +		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v)); +} + +static ssize_t inject_errors_store(struct device *dev, +				   struct device_attribute *attr, +				   const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u8 inject_error; +	u64 v; + +	if (kstrtou8(buf, 0, &inject_error)) +		return -EINVAL; + +	if (inject_error & ~INJECT_ERROR_MASK) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	v = readq(base + RAS_ERROR_INJECT); +	v &= ~INJECT_ERROR_MASK; +	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error); +	writeq(v, base + RAS_ERROR_INJECT); +	mutex_unlock(&pdata->lock); + +	return count; +} +static DEVICE_ATTR_RW(inject_errors); + +static ssize_t fme_errors_show(struct device *dev, +			       struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 value; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	value = readq(base + FME_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} + +static ssize_t fme_errors_store(struct device *dev, +				struct device_attribute *attr, +				const char *buf, size_t count) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 v, val; +	int ret = 0; + +	if (kstrtou64(buf, 0, &val)) +		return -EINVAL; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK); + +	v = readq(base + FME_ERROR); +	if (val == v) +		writeq(v, base + FME_ERROR); +	else +		ret = -EINVAL; + +	/* Workaround: disable MBP_ERROR if feature revision is 0 */ +	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR, +	       base + FME_ERROR_MASK); +	mutex_unlock(&pdata->lock); +	return ret ? ret : count; +} +static DEVICE_ATTR_RW(fme_errors); + +static ssize_t first_error_show(struct device *dev, +				struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 value; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	value = readq(base + FME_FIRST_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} +static DEVICE_ATTR_RO(first_error); + +static ssize_t next_error_show(struct device *dev, +			       struct device_attribute *attr, char *buf) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; +	u64 value; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); +	value = readq(base + FME_NEXT_ERROR); +	mutex_unlock(&pdata->lock); + +	return sprintf(buf, "0x%llx\n", (unsigned long long)value); +} +static DEVICE_ATTR_RO(next_error); + +static struct attribute *fme_global_err_attrs[] = { +	&dev_attr_pcie0_errors.attr, +	&dev_attr_pcie1_errors.attr, +	&dev_attr_nonfatal_errors.attr, +	&dev_attr_catfatal_errors.attr, +	&dev_attr_inject_errors.attr, +	&dev_attr_fme_errors.attr, +	&dev_attr_first_error.attr, +	&dev_attr_next_error.attr, +	NULL, +}; + +static umode_t fme_global_err_attrs_visible(struct kobject *kobj, +					    struct attribute *attr, int n) +{ +	struct device *dev = kobj_to_dev(kobj); + +	/* +	 * sysfs entries are visible only if related private feature is +	 * enumerated. +	 */ +	if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR)) +		return 0; + +	return attr->mode; +} + +const struct attribute_group fme_global_err_group = { +	.name       = "errors", +	.attrs      = fme_global_err_attrs, +	.is_visible = fme_global_err_attrs_visible, +}; + +static void fme_err_mask(struct device *dev, bool mask) +{ +	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev); +	void __iomem *base; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR); + +	mutex_lock(&pdata->lock); + +	/* Workaround: keep MBP_ERROR always masked if revision is 0 */ +	if (dfl_feature_revision(base)) +		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK); +	else +		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK); + +	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK); +	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK); +	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK); +	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK); + +	mutex_unlock(&pdata->lock); +} + +static int fme_global_err_init(struct platform_device *pdev, +			       struct dfl_feature *feature) +{ +	fme_err_mask(&pdev->dev, false); + +	return 0; +} + +static void fme_global_err_uinit(struct platform_device *pdev, +				 struct dfl_feature *feature) +{ +	fme_err_mask(&pdev->dev, true); +} + +const struct dfl_feature_id fme_global_err_id_table[] = { +	{.id = FME_FEATURE_ID_GLOBAL_ERR,}, +	{0,} +}; + +const struct dfl_feature_ops fme_global_err_ops = { +	.init = fme_global_err_init, +	.uinit = fme_global_err_uinit, +}; diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 086ad2420ade..4d78e182878f 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -16,6 +16,7 @@  #include <linux/kernel.h>  #include <linux/module.h> +#include <linux/uaccess.h>  #include <linux/fpga-dfl.h>  #include "dfl.h" @@ -72,50 +73,126 @@ static ssize_t bitstream_metadata_show(struct device *dev,  }  static DEVICE_ATTR_RO(bitstream_metadata); -static const struct attribute *fme_hdr_attrs[] = { +static ssize_t cache_size_show(struct device *dev, +			       struct device_attribute *attr, char *buf) +{ +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + +	v = readq(base + FME_HDR_CAP); + +	return sprintf(buf, "%u\n", +		       (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v)); +} +static DEVICE_ATTR_RO(cache_size); + +static ssize_t fabric_version_show(struct device *dev, +				   struct device_attribute *attr, char *buf) +{ +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + +	v = readq(base + FME_HDR_CAP); + +	return sprintf(buf, "%u\n", +		       (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v)); +} +static DEVICE_ATTR_RO(fabric_version); + +static ssize_t socket_id_show(struct device *dev, +			      struct device_attribute *attr, char *buf) +{ +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER); + +	v = readq(base + FME_HDR_CAP); + +	return sprintf(buf, "%u\n", +		       (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v)); +} +static DEVICE_ATTR_RO(socket_id); + +static struct attribute *fme_hdr_attrs[] = {  	&dev_attr_ports_num.attr,  	&dev_attr_bitstream_id.attr,  	&dev_attr_bitstream_metadata.attr, +	&dev_attr_cache_size.attr, +	&dev_attr_fabric_version.attr, +	&dev_attr_socket_id.attr,  	NULL,  }; -static int fme_hdr_init(struct platform_device *pdev, -			struct dfl_feature *feature) +static const struct attribute_group fme_hdr_group = { +	.attrs = fme_hdr_attrs, +}; + +static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata, +				       unsigned long arg)  { -	void __iomem *base = feature->ioaddr; -	int ret; +	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; +	int port_id; -	dev_dbg(&pdev->dev, "FME HDR Init.\n"); -	dev_dbg(&pdev->dev, "FME cap %llx.\n", -		(unsigned long long)readq(base + FME_HDR_CAP)); +	if (get_user(port_id, (int __user *)arg)) +		return -EFAULT; -	ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs); -	if (ret) -		return ret; +	return dfl_fpga_cdev_release_port(cdev, port_id); +} -	return 0; +static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata, +				      unsigned long arg) +{ +	struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; +	int port_id; + +	if (get_user(port_id, (int __user *)arg)) +		return -EFAULT; + +	return dfl_fpga_cdev_assign_port(cdev, port_id);  } -static void fme_hdr_uinit(struct platform_device *pdev, -			  struct dfl_feature *feature) +static long fme_hdr_ioctl(struct platform_device *pdev, +			  struct dfl_feature *feature, +			  unsigned int cmd, unsigned long arg)  { -	dev_dbg(&pdev->dev, "FME HDR UInit.\n"); -	sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs); +	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + +	switch (cmd) { +	case DFL_FPGA_FME_PORT_RELEASE: +		return fme_hdr_ioctl_release_port(pdata, arg); +	case DFL_FPGA_FME_PORT_ASSIGN: +		return fme_hdr_ioctl_assign_port(pdata, arg); +	} + +	return -ENODEV;  } +static const struct dfl_feature_id fme_hdr_id_table[] = { +	{.id = FME_FEATURE_ID_HEADER,}, +	{0,} +}; +  static const struct dfl_feature_ops fme_hdr_ops = { -	.init = fme_hdr_init, -	.uinit = fme_hdr_uinit, +	.ioctl = fme_hdr_ioctl,  };  static struct dfl_feature_driver fme_feature_drvs[] = {  	{ -		.id = FME_FEATURE_ID_HEADER, +		.id_table = fme_hdr_id_table,  		.ops = &fme_hdr_ops,  	},  	{ -		.id = FME_FEATURE_ID_PR_MGMT, -		.ops = &pr_mgmt_ops, +		.id_table = fme_pr_mgmt_id_table, +		.ops = &fme_pr_mgmt_ops, +	}, +	{ +		.id_table = fme_global_err_id_table, +		.ops = &fme_global_err_ops,  	},  	{  		.ops = NULL, @@ -263,9 +340,16 @@ static int fme_remove(struct platform_device *pdev)  	return 0;  } +static const struct attribute_group *fme_dev_groups[] = { +	&fme_hdr_group, +	&fme_global_err_group, +	NULL +}; +  static struct platform_driver fme_driver = {  	.driver	= { -		.name    = DFL_FPGA_FEATURE_DEV_FME, +		.name       = DFL_FPGA_FEATURE_DEV_FME, +		.dev_groups = fme_dev_groups,  	},  	.probe   = fme_probe,  	.remove  = fme_remove, diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index 3c71dc3faaf5..a233a53db708 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -470,7 +470,12 @@ static long fme_pr_ioctl(struct platform_device *pdev,  	return ret;  } -const struct dfl_feature_ops pr_mgmt_ops = { +const struct dfl_feature_id fme_pr_mgmt_id_table[] = { +	{.id = FME_FEATURE_ID_PR_MGMT,}, +	{0} +}; + +const struct dfl_feature_ops fme_pr_mgmt_ops = {  	.init = pr_mgmt_init,  	.uinit = pr_mgmt_uinit,  	.ioctl = fme_pr_ioctl, diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h index 5394a216c5c0..6685c8ef965b 100644 --- a/drivers/fpga/dfl-fme.h +++ b/drivers/fpga/dfl-fme.h @@ -33,6 +33,10 @@ struct dfl_fme {  	struct dfl_feature_platform_data *pdata;  }; -extern const struct dfl_feature_ops pr_mgmt_ops; +extern const struct dfl_feature_ops fme_pr_mgmt_ops; +extern const struct dfl_feature_id fme_pr_mgmt_id_table[]; +extern const struct dfl_feature_ops fme_global_err_ops; +extern const struct dfl_feature_id fme_global_err_id_table[]; +extern const struct attribute_group fme_global_err_group;  #endif /* __DFL_FME_H */ diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 66b5720582bb..89ca292236ad 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -223,8 +223,43 @@ disable_error_report_exit:  	return ret;  } +static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) +{ +	struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); +	struct dfl_fpga_cdev *cdev = drvdata->cdev; +	int ret = 0; + +	if (!num_vfs) { +		/* +		 * disable SRIOV and then put released ports back to default +		 * PF access mode. +		 */ +		pci_disable_sriov(pcidev); + +		dfl_fpga_cdev_config_ports_pf(cdev); + +	} else { +		/* +		 * before enable SRIOV, put released ports into VF access mode +		 * first of all. +		 */ +		ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs); +		if (ret) +			return ret; + +		ret = pci_enable_sriov(pcidev, num_vfs); +		if (ret) +			dfl_fpga_cdev_config_ports_pf(cdev); +	} + +	return ret; +} +  static void cci_pci_remove(struct pci_dev *pcidev)  { +	if (dev_is_pf(&pcidev->dev)) +		cci_pci_sriov_configure(pcidev, 0); +  	cci_remove_feature_devs(pcidev);  	pci_disable_pcie_error_reporting(pcidev);  } @@ -234,6 +269,7 @@ static struct pci_driver cci_pci_driver = {  	.id_table = cci_pcie_id_tbl,  	.probe = cci_pci_probe,  	.remove = cci_pci_remove, +	.sriov_configure = cci_pci_sriov_configure,  };  module_pci_driver(cci_pci_driver); diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 4b66aaa32b5a..96a2b8274a33 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -231,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);   */  int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)  { -	struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev); -	int port_id; +	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); +	struct dfl_fpga_port_ops *port_ops; + +	if (pdata->id != FEATURE_DEV_ID_UNUSED) +		return pdata->id == *(int *)pport_id; +	port_ops = dfl_fpga_port_ops_get(pdev);  	if (!port_ops || !port_ops->get_id)  		return 0; -	port_id = port_ops->get_id(pdev); +	pdata->id = port_ops->get_id(pdev);  	dfl_fpga_port_ops_put(port_ops); -	return port_id == *(int *)pport_id; +	return pdata->id == *(int *)pport_id;  }  EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); @@ -255,7 +259,8 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)  	dfl_fpga_dev_for_each_feature(pdata, feature)  		if (feature->ops) { -			feature->ops->uinit(pdev, feature); +			if (feature->ops->uinit) +				feature->ops->uinit(pdev, feature);  			feature->ops = NULL;  		}  } @@ -266,17 +271,34 @@ static int dfl_feature_instance_init(struct platform_device *pdev,  				     struct dfl_feature *feature,  				     struct dfl_feature_driver *drv)  { -	int ret; +	int ret = 0; -	ret = drv->ops->init(pdev, feature); -	if (ret) -		return ret; +	if (drv->ops->init) { +		ret = drv->ops->init(pdev, feature); +		if (ret) +			return ret; +	}  	feature->ops = drv->ops;  	return ret;  } +static bool dfl_feature_drv_match(struct dfl_feature *feature, +				  struct dfl_feature_driver *driver) +{ +	const struct dfl_feature_id *ids = driver->id_table; + +	if (ids) { +		while (ids->id) { +			if (ids->id == feature->id) +				return true; +			ids++; +		} +	} +	return false; +} +  /**   * dfl_fpga_dev_feature_init - init for sub features of dfl feature device   * @pdev: feature device. @@ -297,8 +319,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,  	while (drv->ops) {  		dfl_fpga_dev_for_each_feature(pdata, feature) { -			/* match feature and drv using id */ -			if (feature->id == drv->id) { +			if (dfl_feature_drv_match(feature, drv)) {  				ret = dfl_feature_instance_init(pdev, pdata,  								feature, drv);  				if (ret) @@ -474,6 +495,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)  	pdata->dev = fdev;  	pdata->num = binfo->feature_num;  	pdata->dfl_cdev = binfo->cdev; +	pdata->id = FEATURE_DEV_ID_UNUSED;  	mutex_init(&pdata->lock);  	lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],  				   dfl_pdata_key_strings[type]); @@ -973,25 +995,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)  {  	struct dfl_feature_platform_data *pdata, *ptmp; -	remove_feature_devs(cdev); -  	mutex_lock(&cdev->lock); -	if (cdev->fme_dev) { -		/* the fme should be unregistered. */ -		WARN_ON(device_is_registered(cdev->fme_dev)); +	if (cdev->fme_dev)  		put_device(cdev->fme_dev); -	}  	list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {  		struct platform_device *port_dev = pdata->dev; -		/* the port should be unregistered. */ -		WARN_ON(device_is_registered(&port_dev->dev)); +		/* remove released ports */ +		if (!device_is_registered(&port_dev->dev)) { +			dfl_id_free(feature_dev_id_type(port_dev), +				    port_dev->id); +			platform_device_put(port_dev); +		} +  		list_del(&pdata->node);  		put_device(&port_dev->dev);  	}  	mutex_unlock(&cdev->lock); +	remove_feature_devs(cdev); +  	fpga_region_unregister(cdev->region);  	devm_kfree(cdev->parent, cdev);  } @@ -1042,6 +1066,170 @@ static int __init dfl_fpga_init(void)  	return ret;  } +/** + * dfl_fpga_cdev_release_port - release a port platform device + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to release a port platform device. This is a + * mandatory step before turn a port from PF into VF for SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) +{ +	struct platform_device *port_pdev; +	int ret = -ENODEV; + +	mutex_lock(&cdev->lock); +	port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, +					      dfl_fpga_check_port_id); +	if (!port_pdev) +		goto unlock_exit; + +	if (!device_is_registered(&port_pdev->dev)) { +		ret = -EBUSY; +		goto put_dev_exit; +	} + +	ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev)); +	if (ret) +		goto put_dev_exit; + +	platform_device_del(port_pdev); +	cdev->released_port_num++; +put_dev_exit: +	put_device(&port_pdev->dev); +unlock_exit: +	mutex_unlock(&cdev->lock); +	return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); + +/** + * dfl_fpga_cdev_assign_port - assign a port platform device back + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to assign a port platform device back. This is + * a mandatory step after disable SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) +{ +	struct platform_device *port_pdev; +	int ret = -ENODEV; + +	mutex_lock(&cdev->lock); +	port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, +					      dfl_fpga_check_port_id); +	if (!port_pdev) +		goto unlock_exit; + +	if (device_is_registered(&port_pdev->dev)) { +		ret = -EBUSY; +		goto put_dev_exit; +	} + +	ret = platform_device_add(port_pdev); +	if (ret) +		goto put_dev_exit; + +	dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev)); +	cdev->released_port_num--; +put_dev_exit: +	put_device(&port_pdev->dev); +unlock_exit: +	mutex_unlock(&cdev->lock); +	return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); + +static void config_port_access_mode(struct device *fme_dev, int port_id, +				    bool is_vf) +{ +	void __iomem *base; +	u64 v; + +	base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER); + +	v = readq(base + FME_HDR_PORT_OFST(port_id)); + +	v &= ~FME_PORT_OFST_ACC_CTRL; +	v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL, +			is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF); + +	writeq(v, base + FME_HDR_PORT_OFST(port_id)); +} + +#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true) +#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false) + +/** + * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode + * + * @cdev: parent container device. + * + * This function is needed in sriov configuration routine. It could be used to + * configure the all released ports from VF access mode to PF. + */ +void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) +{ +	struct dfl_feature_platform_data *pdata; + +	mutex_lock(&cdev->lock); +	list_for_each_entry(pdata, &cdev->port_dev_list, node) { +		if (device_is_registered(&pdata->dev->dev)) +			continue; + +		config_port_pf_mode(cdev->fme_dev, pdata->id); +	} +	mutex_unlock(&cdev->lock); +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); + +/** + * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode + * + * @cdev: parent container device. + * @num_vfs: VF device number. + * + * This function is needed in sriov configuration routine. It could be used to + * configure the released ports from PF access mode to VF. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) +{ +	struct dfl_feature_platform_data *pdata; +	int ret = 0; + +	mutex_lock(&cdev->lock); +	/* +	 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF +	 * device, so if released port number doesn't match VF device number, +	 * then reject the request with -EINVAL error code. +	 */ +	if (cdev->released_port_num != num_vfs) { +		ret = -EINVAL; +		goto done; +	} + +	list_for_each_entry(pdata, &cdev->port_dev_list, node) { +		if (device_is_registered(&pdata->dev->dev)) +			continue; + +		config_port_vf_mode(cdev->fme_dev, pdata->id); +	} +done: +	mutex_unlock(&cdev->lock); +	return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); +  static void __exit dfl_fpga_exit(void)  {  	dfl_chardev_uinit(); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index a8b869e9e5b7..9f0e656de720 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -30,8 +30,8 @@  /* plus one for fme device */  #define MAX_DFL_FEATURE_DEV_NUM    (MAX_DFL_FPGA_PORT_NUM + 1) -/* Reserved 0x0 for Header Group Register and 0xff for AFU */ -#define FEATURE_ID_FIU_HEADER		0x0 +/* Reserved 0xfe for Header Group Register and 0xff for AFU */ +#define FEATURE_ID_FIU_HEADER		0xfe  #define FEATURE_ID_AFU			0xff  #define FME_FEATURE_ID_HEADER		FEATURE_ID_FIU_HEADER @@ -119,6 +119,11 @@  #define PORT_HDR_NEXT_AFU	NEXT_AFU  #define PORT_HDR_CAP		0x30  #define PORT_HDR_CTRL		0x38 +#define PORT_HDR_STS		0x40 +#define PORT_HDR_USRCLK_CMD0	0x50 +#define PORT_HDR_USRCLK_CMD1	0x58 +#define PORT_HDR_USRCLK_STS0	0x60 +#define PORT_HDR_USRCLK_STS1	0x68  /* Port Capability Register Bitfield */  #define PORT_CAP_PORT_NUM	GENMASK_ULL(1, 0)	/* ID of this port */ @@ -130,6 +135,16 @@  /* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/  #define PORT_CTRL_LATENCY	BIT_ULL(2)  #define PORT_CTRL_SFTRST_ACK	BIT_ULL(4)		/* HW ack for reset */ + +/* Port Status Register Bitfield */ +#define PORT_STS_AP2_EVT	BIT_ULL(13)		/* AP2 event detected */ +#define PORT_STS_AP1_EVT	BIT_ULL(12)		/* AP1 event detected */ +#define PORT_STS_PWR_STATE	GENMASK_ULL(11, 8)	/* AFU power states */ +#define PORT_STS_PWR_STATE_NORM 0 +#define PORT_STS_PWR_STATE_AP1	1			/* 50% throttling */ +#define PORT_STS_PWR_STATE_AP2	2			/* 90% throttling */ +#define PORT_STS_PWR_STATE_AP6	6			/* 100% throttling */ +  /**   * struct dfl_fpga_port_ops - port ops   * @@ -154,13 +169,22 @@ void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);  int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);  /** - * struct dfl_feature_driver - sub feature's driver + * struct dfl_feature_id - dfl private feature id   * - * @id: sub feature id. - * @ops: ops of this sub feature. + * @id: unique dfl private feature id.   */ -struct dfl_feature_driver { +struct dfl_feature_id {  	u64 id; +}; + +/** + * struct dfl_feature_driver - dfl private feature driver + * + * @id_table: id_table for dfl private features supported by this driver. + * @ops: ops of this dfl private feature driver. + */ +struct dfl_feature_driver { +	const struct dfl_feature_id *id_table;  	const struct dfl_feature_ops *ops;  }; @@ -183,6 +207,8 @@ struct dfl_feature {  #define DEV_STATUS_IN_USE	0 +#define FEATURE_DEV_ID_UNUSED	(-1) +  /**   * struct dfl_feature_platform_data - platform data for feature devices   * @@ -191,6 +217,7 @@ struct dfl_feature {   * @cdev: cdev of feature dev.   * @dev: ptr to platform device linked with this platform data.   * @dfl_cdev: ptr to container device. + * @id: id used for this feature device.   * @disable_count: count for port disable.   * @num: number for sub features.   * @dev_status: dev status (e.g. DEV_STATUS_IN_USE). @@ -203,6 +230,7 @@ struct dfl_feature_platform_data {  	struct cdev cdev;  	struct platform_device *dev;  	struct dfl_fpga_cdev *dfl_cdev; +	int id;  	unsigned int disable_count;  	unsigned long dev_status;  	void *private; @@ -331,6 +359,11 @@ static inline bool dfl_feature_is_port(void __iomem *base)  		(FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);  } +static inline u8 dfl_feature_revision(void __iomem *base) +{ +	return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH)); +} +  /**   * struct dfl_fpga_enum_info - DFL FPGA enumeration information   * @@ -373,6 +406,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);   * @fme_dev: FME feature device under this container device.   * @lock: mutex lock to protect the port device list.   * @port_dev_list: list of all port feature devices under this container device. + * @released_port_num: released port number under this container device.   */  struct dfl_fpga_cdev {  	struct device *parent; @@ -380,6 +414,7 @@ struct dfl_fpga_cdev {  	struct device *fme_dev;  	struct mutex lock;  	struct list_head port_dev_list; +	int released_port_num;  };  struct dfl_fpga_cdev * @@ -407,4 +442,9 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,  	return pdev;  } + +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); +void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev); +int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);  #endif /* __FPGA_DFL_H */ diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 80bd8f1b2aa6..4bab9028940a 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -19,11 +19,6 @@ static struct class *fpga_bridge_class;  /* Lock for adding/removing bridges to linked lists*/  static spinlock_t bridge_list_lock; -static int fpga_bridge_of_node_match(struct device *dev, const void *data) -{ -	return dev->of_node == data; -} -  /**   * fpga_bridge_enable - Enable transactions on the bridge   * @@ -104,8 +99,7 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,  {  	struct device *dev; -	dev = class_find_device(fpga_bridge_class, NULL, np, -				fpga_bridge_of_node_match); +	dev = class_find_device_by_of_node(fpga_bridge_class, np);  	if (!dev)  		return ERR_PTR(-ENODEV); diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index c3866816456a..e05104f5e40c 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -482,11 +482,6 @@ struct fpga_manager *fpga_mgr_get(struct device *dev)  }  EXPORT_SYMBOL_GPL(fpga_mgr_get); -static int fpga_mgr_of_node_match(struct device *dev, const void *data) -{ -	return dev->of_node == data; -} -  /**   * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr.   * @@ -498,8 +493,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)  {  	struct device *dev; -	dev = class_find_device(fpga_mgr_class, NULL, node, -				fpga_mgr_of_node_match); +	dev = class_find_device_by_of_node(fpga_mgr_class, node);  	if (!dev)  		return ERR_PTR(-ENODEV); |