diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/mvpp2.c')
| -rw-r--r-- | drivers/net/ethernet/marvell/mvpp2.c | 1602 |
1 files changed, 1095 insertions, 507 deletions
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4fe430ceb194..48d21c1e09f2 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -25,6 +25,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/phy.h> #include <linux/clk.h> #include <linux/hrtimer.h> @@ -49,9 +50,11 @@ #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) #define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 #define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) @@ -99,6 +102,7 @@ /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) @@ -117,9 +121,6 @@ #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 -#define MVPP2_TXQ_THRESH_REG 0x2094 -#define MVPP2_TRANSMITTED_THRESH_OFFSET 16 -#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) @@ -140,6 +141,7 @@ #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) @@ -152,9 +154,52 @@ #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) #define MVPP2_BASE_ADDR_ENABLE 0x4060 +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) -#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 +#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) @@ -209,14 +254,19 @@ #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) #define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP2_BM_MC_RLS_REG 0x64c4 -#define MVPP2_BM_MC_ID_MASK 0xfff -#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 @@ -252,12 +302,8 @@ #define MVPP2_SRC_ADDR_HIGH 0x28 #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ - 0x400 + (port) * 0x400) -#define MVPP2_MIB_LATE_COLLISION 0x7c -#define MVPP2_ISR_SUM_MASK_REG 0x220c #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 @@ -290,6 +336,30 @@ #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) + +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff @@ -338,15 +408,9 @@ /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 -/* Maximum number of RXQs used by single port */ -#define MVPP2_MAX_RXQ 8 - /* Dfault number of RXQs in use */ #define MVPP2_DEFAULT_RXQ 4 -/* Total number of RXQs available to all ports */ -#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) - /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD 128 @@ -513,28 +577,28 @@ enum mvpp2_tag_type { /* Sram result info bits assignment */ #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 #define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK 0xc -#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 -#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) #define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 -#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 #define MVPP2_PRS_RI_L3_IP4 BIT(12) #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) #define MVPP2_PRS_RI_L3_IP6 BIT(14) #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 -#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 @@ -618,6 +682,11 @@ enum mvpp2_prs_l3_cast { */ #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_CPUS 4 + enum mvpp2_bm_type { MVPP2_BM_FREE, MVPP2_BM_SWF_LONG, @@ -629,12 +698,19 @@ enum mvpp2_bm_type { /* Shared Packet Processor resources */ struct mvpp2 { /* Shared registers' base addresses */ - void __iomem *base; void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each CPU can access the base register through a + * separate address space, each 64 KB apart from each + * other. + */ + void __iomem *cpu_base[MVPP2_MAX_CPUS]; /* Common clocks */ struct clk *pp_clk; struct clk *gop_clk; + struct clk *mg_clk; /* List of pointers to port structures */ struct mvpp2_port **port_list; @@ -652,6 +728,12 @@ struct mvpp2 { /* Tclk value */ u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; }; struct mvpp2_pcpu_stats { @@ -673,6 +755,11 @@ struct mvpp2_port_pcpu { struct mvpp2_port { u8 id; + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + int irq; struct mvpp2 *priv; @@ -744,22 +831,24 @@ struct mvpp2_port { #define MVPP2_RXD_L3_IP6 BIT(30) #define MVPP2_RXD_BUF_HDR BIT(31) -struct mvpp2_tx_desc { +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* physical addr of transmitted buffer */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ u32 buf_cookie; /* cookie for access to TX buffer in tx path */ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ u32 reserved2; /* reserved (for future use) */ }; -struct mvpp2_rx_desc { +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { u32 status; /* info about received packet */ u16 reserved1; /* parser_info (for future use, PnC) */ u16 data_size; /* size of received packet in bytes */ - u32 buf_phys_addr; /* physical address of the buffer */ + u32 buf_dma_addr; /* physical address of the buffer */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved2; /* gem_port_id (for future use, PON) */ u16 reserved3; /* csum_l4 (for future use, PnC) */ @@ -770,12 +859,51 @@ struct mvpp2_rx_desc { u32 reserved8; }; +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + struct mvpp2_txq_pcpu_buf { /* Transmitted SKB */ struct sk_buff *skb; /* Physical address of transmitted buffer */ - dma_addr_t phys; + dma_addr_t dma; /* Size transmitted */ size_t size; @@ -822,16 +950,13 @@ struct mvpp2_tx_queue { /* Per-CPU control of physical Tx queues */ struct mvpp2_txq_pcpu __percpu *pcpu; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; - u32 done_pkts_coal; /* Virtual address of thex Tx DMA descriptors array */ struct mvpp2_tx_desc *descs; /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last Tx DMA descriptor */ int last_desc; @@ -854,7 +979,7 @@ struct mvpp2_rx_queue { struct mvpp2_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last RX DMA descriptor */ int last_desc; @@ -918,42 +1043,25 @@ struct mvpp2_bm_pool { /* Buffer Pointers Pool External (BPPE) size */ int size; + /* BPPE size in bytes */ + int size_bytes; /* Number of buffers for this pool */ int buf_num; /* Pool buffer size */ int buf_size; /* Packet size */ int pkt_size; + int frag_size; /* BPPE virtual base address */ u32 *virt_addr; - /* BPPE physical base address */ - dma_addr_t phys_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; /* Ports using BM pool */ u32 port_map; - - /* Occupied buffers indicator */ - atomic_t in_use; - int in_use_thresh; }; -struct mvpp2_buff_hdr { - u32 next_buff_phys_addr; - u32 next_buff_virt_addr; - u16 byte_count; - u16 info; - u8 reserved1; /* bm_qset (for future use, BM) */ -}; - -/* Buffer header info bits */ -#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff -#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) -#define MVPP2_B_HDR_INFO_LAST_OFFS 12 -#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) -#define MVPP2_B_HDR_INFO_IS_LAST(info) \ - ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) - /* Static declaractions */ /* Number of RXQs used by single port */ @@ -968,12 +1076,177 @@ static int txq_number = MVPP2_MAX_TXQ; static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { - writel(data, priv->base + offset); + writel(data, priv->cpu_base[0] + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { - return readl(priv->base + offset); + return readl(priv->cpu_base[0] + offset); +} + +/* These accessors should be used to access: + * + * - per-CPU registers, where each CPU has its own copy of the + * register. + * + * MVPP2_BM_VIRT_ALLOC_REG + * MVPP2_BM_ADDR_HIGH_ALLOC + * MVPP22_BM_ADDR_HIGH_RLS_REG + * MVPP2_BM_VIRT_RLS_REG + * MVPP2_ISR_RX_TX_CAUSE_REG + * MVPP2_ISR_RX_TX_MASK_REG + * MVPP2_TXQ_NUM_REG + * MVPP2_AGGR_TXQ_UPDATE_REG + * MVPP2_TXQ_RSVD_REQ_REG + * MVPP2_TXQ_RSVD_RSLT_REG + * MVPP2_TXQ_SENT_REG + * MVPP2_RXQ_NUM_REG + * + * - global registers that must be accessed through a specific CPU + * window, because they are related to an access to a per-CPU + * register + * + * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) + * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) + * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + */ +static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel(data, priv->cpu_base[cpu] + offset); +} + +static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl(priv->cpu_base[cpu] + offset); +} + +static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.buf_dma_addr; + else + return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); +} + +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = dma_addr; + } else { + u64 val = (u64)dma_addr; + + tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); + tx_desc->pp22.buf_dma_addr_ptp |= val; + } +} + +static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.data_size; + else + return tx_desc->pp22.data_size; +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = size; + else + tx_desc->pp22.data_size = size; +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = command; + else + tx_desc->pp22.command = command; +} + +static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int offset) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.packet_offset = offset; + else + tx_desc->pp22.packet_offset = offset; +} + +static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.packet_offset; + else + return tx_desc->pp22.packet_offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_dma_addr; + else + return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_cookie; + else + return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.data_size; + else + return rx_desc->pp22.data_size; +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.status; + else + return rx_desc->pp22.status; } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) @@ -983,15 +1256,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) txq_pcpu->txq_get_index = 0; } -static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, +static void mvpp2_txq_inc_put(struct mvpp2_port *port, + struct mvpp2_txq_pcpu *txq_pcpu, struct sk_buff *skb, struct mvpp2_tx_desc *tx_desc) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_put_index; tx_buf->skb = skb; - tx_buf->size = tx_desc->data_size; - tx_buf->phys = tx_desc->buf_phys_addr; + tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); + tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + + mvpp2_txdesc_offset_get(port, tx_desc); txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -1420,7 +1695,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_DROP_ALL; @@ -1457,7 +1732,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_MAC_PROMISCUOUS; @@ -1503,7 +1778,7 @@ static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = index; @@ -1555,7 +1830,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; @@ -1618,7 +1893,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; @@ -1752,10 +2027,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, if (tid <= tid_aux) { ret = -EINVAL; - goto error; + goto free_pe; } - memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); + memset(pe, 0, sizeof(*pe)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; @@ -1784,8 +2059,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); - -error: +free_pe: kfree(pe); return ret; @@ -1870,7 +2144,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, ai = mvpp2_prs_double_vlan_ai_free_get(priv); if (ai < 0) { ret = ai; - goto error; + goto free_pe; } /* Get first single/triple vlan tid */ @@ -1893,10 +2167,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, if (tid >= tid_aux) { ret = -ERANGE; - goto error; + goto free_pe; } - memset(pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(pe, 0, sizeof(*pe)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; @@ -1920,8 +2194,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); - -error: +free_pe: kfree(pe); return ret; } @@ -1943,7 +2216,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; @@ -2001,7 +2274,7 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; @@ -2057,7 +2330,7 @@ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2096,7 +2369,7 @@ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2156,7 +2429,7 @@ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; @@ -2178,7 +2451,7 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); pe.index = MVPP2_PE_MH_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); @@ -2201,7 +2474,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); /* Non-promiscuous mode for all ports - DROP unknown packets */ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; @@ -2262,7 +2535,7 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* Set default entry, in case DSA or EDSA tag not found */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = MVPP2_PE_DSA_DEFAULT; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); @@ -2292,7 +2565,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2318,7 +2591,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2348,7 +2621,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2382,7 +2655,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2447,7 +2720,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2544,7 +2817,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) return err; /* Set default double vlan entry */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; @@ -2564,7 +2837,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Set default vlan none entry */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_NONE; @@ -2594,7 +2867,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2644,7 +2917,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2671,7 +2944,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2729,7 +3002,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) return err; /* Default IPv4 entry for unknown protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_PROTO_UN; @@ -2754,7 +3027,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Default IPv4 entry for unicast address */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_ADDR_UN; @@ -2822,7 +3095,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2843,7 +3116,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_PROTO_UN; @@ -2936,7 +3209,7 @@ static int mvpp2_prs_default_init(struct platform_device *pdev, mvpp2_prs_hw_inv(priv, index); priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(struct mvpp2_prs_shadow), + sizeof(*priv->prs_shadow), GFP_KERNEL); if (!priv->prs_shadow) return -ENOMEM; @@ -3364,6 +3637,22 @@ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + else + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + /* Buffer Manager configuration routines */ /* Create pool */ @@ -3371,26 +3660,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int size) { - int size_bytes; u32 val; - size_bytes = sizeof(u32) * size; - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, - &bm_pool->phys_addr, + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + + /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 + * bytes per buffer pointer + */ + if (priv->hw_version == MVPP21) + bm_pool->size_bytes = 2 * sizeof(u32) * size; + else + bm_pool->size_bytes = 2 * sizeof(u64) * size; + + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, + &bm_pool->dma_addr, GFP_KERNEL); if (!bm_pool->virt_addr) return -ENOMEM; - if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { - dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, - bm_pool->phys_addr); + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, bm_pool->dma_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - bm_pool->phys_addr); + lower_32_bits(bm_pool->dma_addr)); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); @@ -3401,7 +3703,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; - atomic_set(&bm_pool->in_use, 0); return 0; } @@ -3419,6 +3720,36 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); } +static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *dma_addr, + phys_addr_t *phys_addr) +{ + int cpu = get_cpu(); + + *dma_addr = mvpp2_percpu_read(priv, cpu, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + + if (priv->hw_version == MVPP22) { + u32 val; + u32 dma_addr_highbits, phys_addr_highbits; + + val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); + phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> + MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; + + if (sizeof(dma_addr_t) == 8) + *dma_addr |= (u64)dma_addr_highbits << 32; + + if (sizeof(phys_addr_t) == 8) + *phys_addr |= (u64)phys_addr_highbits << 32; + } + + put_cpu(); +} + /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) @@ -3426,20 +3757,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, int i; for (i = 0; i < bm_pool->buf_num; i++) { - dma_addr_t buf_phys_addr; - u32 vaddr; + dma_addr_t buf_dma_addr; + phys_addr_t buf_phys_addr; + void *data; - /* Get buffer virtual address (indirect access) */ - buf_phys_addr = mvpp2_read(priv, - MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, + &buf_dma_addr, &buf_phys_addr); - dma_unmap_single(dev, buf_phys_addr, + dma_unmap_single(dev, buf_dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE); - if (!vaddr) + data = (void *)phys_to_virt(buf_phys_addr); + if (!data) break; - dev_kfree_skb_any((struct sk_buff *)vaddr); + + mvpp2_frag_free(bm_pool, data); } /* Update BM driver with number of buffers removed from pool */ @@ -3463,9 +3795,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size, + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, bm_pool->virt_addr, - bm_pool->phys_addr); + bm_pool->dma_addr); return 0; } @@ -3507,7 +3839,7 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) /* Allocate and initialize BM pools */ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, - sizeof(struct mvpp2_bm_pool), GFP_KERNEL); + sizeof(*priv->bm_pools), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; @@ -3521,17 +3853,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) { - u32 val; + u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_POOL_LONG_MASK; - val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & - MVPP2_RXQ_POOL_LONG_MASK); + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } @@ -3539,102 +3874,93 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int short_pool) { - u32 val; + u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_POOL_SHORT_MASK; - val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & - MVPP2_RXQ_POOL_SHORT_MASK); + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_SHORT_MASK; + else + mask = MVPP22_RXQ_POOL_SHORT_MASK; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Allocate skb for BM pool */ -static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_phys_addr, - gfp_t gfp_mask) +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_dma_addr, + phys_addr_t *buf_phys_addr, + gfp_t gfp_mask) { - struct sk_buff *skb; - dma_addr_t phys_addr; + dma_addr_t dma_addr; + void *data; - skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask); - if (!skb) + data = mvpp2_frag_alloc(bm_pool); + if (!data) return NULL; - phys_addr = dma_map_single(port->dev->dev.parent, skb->head, - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { - dev_kfree_skb_any(skb); + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_frag_free(bm_pool, data); return NULL; } - *buf_phys_addr = phys_addr; + *buf_dma_addr = dma_addr; + *buf_phys_addr = virt_to_phys(data); - return skb; -} - -/* Set pool number in a BM cookie */ -static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) -{ - u32 bm; - - bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); - bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); - - return bm; -} - -/* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(u32 cookie) -{ - return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; + return data; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr) + dma_addr_t buf_dma_addr, + phys_addr_t buf_phys_addr) { - mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); - mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); -} + int cpu = get_cpu(); -/* Release multicast buffer */ -static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr, - int mc_id) -{ - u32 val = 0; + if (port->priv->hw_version == MVPP22) { + u32 val = 0; - val |= (mc_id & MVPP2_BM_MC_ID_MASK); - mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; - mvpp2_bm_pool_put(port, pool, - buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, - buf_virt_addr); -} + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; -/* Refill BM pool */ -static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - u32 phys_addr, u32 cookie) -{ - int pool = mvpp2_bm_cookie_pool_get(bm); + mvpp2_percpu_write(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_percpu_write(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); - mvpp2_bm_pool_put(port, pool, phys_addr, cookie); + put_cpu(); } /* Allocate buffers for the pool */ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { - struct sk_buff *skb; int i, buf_size, total_size; - u32 bm; - dma_addr_t phys_addr; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); @@ -3647,18 +3973,18 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, return 0; } - bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); for (i = 0; i < buf_num; i++) { - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, + &phys_addr, GFP_KERNEL); + if (!buf) break; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, + phys_addr); } /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; - bm_pool->in_use_thresh = bm_pool->buf_num / 4; netdev_dbg(port->dev, "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", @@ -3710,6 +4036,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, port->priv, new_pool); new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); @@ -3778,6 +4107,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) } port_pool->pkt_size = pkt_size; + port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", @@ -3812,30 +4143,70 @@ static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); } -/* Mask the current CPU's Rx/Tx interrupts */ +/* Mask the current CPU's Rx/Tx interrupts + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); } -/* Unmask the current CPU's Rx/Tx interrupts */ +/* Unmask the current CPU's Rx/Tx interrupts. + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), - (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), + (MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } /* Port configuration routines */ +static void mvpp22_port_mii_set(struct mvpp2_port *port) +{ + u32 val; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_CTRL3_REG); + val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR) + val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + writel(val, port->base + MVPP22_XLG_CTRL3_REG); + } + + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + if (port->phy_interface == PHY_INTERFACE_MODE_RGMII) + val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL; + else + val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + val &= ~MVPP22_CTRL4_DP_CLK_SEL; + val |= MVPP22_CTRL4_SYNC_BYPASS; + val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); +} + static void mvpp2_port_mii_set(struct mvpp2_port *port) { u32 val; + if (port->priv->hw_version == MVPP22) + mvpp22_port_mii_set(port); + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); switch (port->phy_interface) { @@ -3864,19 +4235,40 @@ static void mvpp2_port_enable(struct mvpp2_port *port) { u32 val; - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val |= MVPP2_GMAC_PORT_EN_MASK; - val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS; + val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } } static void mvpp2_port_disable(struct mvpp2_port *port) { u32 val; - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~(MVPP2_GMAC_PORT_EN_MASK); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~(MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS); + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } } /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ @@ -3939,16 +4331,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) { int tx_port_num, val, queue, ptxq, lrxq; - /* Configure port to loopback if needed */ - if (port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port); + if (port->priv->hw_version == MVPP21) { + /* Configure port to loopback if needed */ + if (port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port); - /* Update TX FIFO MIN Threshold */ - val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; - /* Min. TX threshold must be less than minimal packet length */ - val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); - writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } /* Disable Legacy WRR, Disable EJP, Release from reset */ tx_port_num = mvpp2_egress_port(port); @@ -4035,7 +4429,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port) for (queue = 0; queue < txq_number; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; - if (txq->descs != NULL) + if (txq->descs) qmap |= (1 << queue); } @@ -4135,31 +4529,8 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Obtain BM cookie information from descriptor */ -static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) -{ - int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - int cpu = smp_processor_id(); - - return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | - ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); -} - /* Tx descriptors helper methods */ -/* Get number of Tx descriptors waiting to be transmitted by HW */ -static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); - - return val & MVPP2_TXQ_PENDING_MASK; -} - /* Get pointer to next Tx descriptor to be processed (send) by HW */ static struct mvpp2_tx_desc * mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) @@ -4170,16 +4541,24 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) return txq->descs + tx_desc; } -/* Update HW with number of aggregated Tx descriptors to be sent */ +/* Update HW with number of aggregated Tx descriptors to be sent + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_AGGR_TXQ_UPDATE_REG, pending); } /* Check if there are enough free descriptors in aggregated txq. * If not, update the number of occupied descriptors and repeat the check. + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. */ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, struct mvpp2_tx_queue *aggr_txq, int num) @@ -4198,16 +4577,22 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, return 0; } -/* Reserved Tx descriptors allocation request */ +/* Reserved Tx descriptors allocation request + * + * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called + * only by mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, struct mvpp2_tx_queue *txq, int num) { u32 val; + int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -4301,6 +4686,10 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. * Per-CPU access + * + * Called only from mvpp2_txq_done(), called from mvpp2_tx() + * (migration disabled) and from the TX completion tasklet (migration + * disabled) so using smp_processor_id() is OK. */ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) @@ -4308,12 +4697,16 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); + val = mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; } +/* Called through on_each_cpu(), so runs on all CPUs, with migration + * disabled, therefore using smp_processor_id() is OK. + */ static void mvpp2_txq_sent_counter_clear(void *arg) { struct mvpp2_port *port = arg; @@ -4322,7 +4715,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg) for (queue = 0; queue < txq_number; queue++) { int id = port->txqs[queue]->id; - mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); + mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(id)); } } @@ -4379,27 +4773,54 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) * will be generated by HW. */ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 pkts) + struct mvpp2_rx_queue *rxq) { - u32 val; + int cpu = get_cpu(); + + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; + + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); + + put_cpu(); +} + +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; - val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); + do_div(tmp, USEC_PER_SEC); - rxq->pkts_coal = pkts; + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); + + return tmp > U32_MAX ? U32_MAX : tmp; } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq, u32 usec) + struct mvpp2_rx_queue *rxq) { - u32 val; + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - val = (port->priv->tclk / USEC_PER_SEC) * usec; - mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } - rxq->time_coal = usec; + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } /* Free Tx queue skbuffs */ @@ -4413,13 +4834,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; - mvpp2_txq_inc_get(txq_pcpu); - - dma_unmap_single(port->dev->dev.parent, tx_buf->phys, + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, tx_buf->size, DMA_TO_DEVICE); - if (!tx_buf->skb) - continue; - dev_kfree_skb_any(tx_buf->skb); + if (tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + + mvpp2_txq_inc_get(txq_pcpu); } } @@ -4492,10 +4912,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, int desc_num, int cpu, struct mvpp2 *priv) { + u32 txq_dma; + /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, desc_num * MVPP2_DESC_ALIGNED_SIZE, - &aggr_txq->descs_phys, GFP_KERNEL); + &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; @@ -4505,10 +4927,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, aggr_txq->next_desc_to_proc = mvpp2_read(priv, MVPP2_AGGR_TXQ_INDEX_REG(cpu)); - /* Set Tx descriptors queue starting address */ - /* indirect access */ - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), - aggr_txq->descs_phys); + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); return 0; @@ -4519,12 +4947,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + u32 rxq_dma; + int cpu; + rxq->size = port->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, - &rxq->descs_phys, GFP_KERNEL); + &rxq->descs_dma, GFP_KERNEL); if (!rxq->descs) return -ENOMEM; @@ -4534,17 +4965,23 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); /* Set coalescing pkts and time */ - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); @@ -4564,10 +5001,15 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(rx_desc); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_bm_pool_put(port, pool, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); } @@ -4576,26 +5018,30 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + int cpu; + mvpp2_rxq_drop_pkts(port, rxq); if (rxq->descs) dma_free_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, rxq->descs, - rxq->descs_phys); + rxq->descs_dma); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; - rxq->descs_phys = 0; + rxq->descs_dma = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Create and initialize a Tx queue */ @@ -4611,23 +5057,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Allocate memory for Tx descriptors */ txq->descs = dma_alloc_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - &txq->descs_phys, GFP_KERNEL); + &txq->descs_dma, GFP_KERNEL); if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & - MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, - txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + txq->descs_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + txq->size & MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@ -4638,9 +5086,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, - MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | - MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@ -4659,11 +5108,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu->size = txq->size; - txq_pcpu->buffs = kmalloc(txq_pcpu->size * - sizeof(struct mvpp2_txq_pcpu_buf), - GFP_KERNEL); + txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, + sizeof(*txq_pcpu->buffs), + GFP_KERNEL); if (!txq_pcpu->buffs) - goto error; + goto cleanup; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4672,8 +5121,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; - -error: +cleanup: for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); @@ -4681,7 +5129,7 @@ error: dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); + txq->descs, txq->descs_dma); return -ENOMEM; } @@ -4701,20 +5149,22 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, if (txq->descs) dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); + txq->descs, txq->descs_dma); txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; - txq->descs_phys = 0; + txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Cleanup Tx ports */ @@ -4724,10 +5174,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) int delay, pending, cpu; u32 val; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. @@ -4743,11 +5194,14 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) mdelay(1); delay++; - pending = mvpp2_txq_pend_desc_num_get(port, txq); + pending = mvpp2_percpu_read(port->priv, cpu, + MVPP2_TXQ_PENDING_REG); + pending &= MVPP2_TXQ_PENDING_MASK; } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); @@ -4956,20 +5410,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) static void mvpp2_rx_error(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { - u32 status = rx_desc->status; + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: - netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_OVERRUN: - netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_RESOURCE: - netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", + status, sz); break; } } @@ -4994,23 +5449,20 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - u32 bm, int is_recycle) + struct mvpp2_bm_pool *bm_pool, int pool) { - struct sk_buff *skb; - dma_addr_t phys_addr; - - if (is_recycle && - (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) - return 0; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; /* No recycle or too many buffers are in use, so allocate a new skb */ - skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); - if (!skb) + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, + GFP_ATOMIC); + if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); - atomic_dec(&bm_pool->in_use); + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + return 0; } @@ -5045,43 +5497,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } -static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - struct mvpp2_buff_hdr *buff_hdr; - struct sk_buff *skb; - u32 rx_status = rx_desc->status; - u32 buff_phys_addr; - u32 buff_virt_addr; - u32 buff_phys_addr_next; - u32 buff_virt_addr_next; - int mc_id; - int pool_id; - - pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - buff_phys_addr = rx_desc->buf_phys_addr; - buff_virt_addr = rx_desc->buf_cookie; - - do { - skb = (struct sk_buff *)buff_virt_addr; - buff_hdr = (struct mvpp2_buff_hdr *)skb->head; - - mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info); - - buff_phys_addr_next = buff_hdr->next_buff_phys_addr; - buff_virt_addr_next = buff_hdr->next_buff_virt_addr; - - /* Release buffer */ - mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr, - buff_virt_addr, mc_id); - - buff_phys_addr = buff_phys_addr_next; - buff_virt_addr = buff_virt_addr_next; - - } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info)); -} - /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) @@ -5101,23 +5516,24 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; - dma_addr_t phys_addr; - u32 bm, rx_status; + unsigned int frag_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + u32 rx_status; int pool, rx_bytes, err; + void *data; rx_done++; - rx_status = rx_desc->status; - rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; - phys_addr = rx_desc->buf_phys_addr; - - bm = mvpp2_bm_cookie_build(rx_desc); - pool = mvpp2_bm_cookie_pool_get(bm); + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool]; - /* Check if buffer header is used */ - if (rx_status & MVPP2_RXD_BUF_HDR) { - mvpp2_buff_hdr_rx(port, rx_desc); - continue; - } /* In case of an error, release the requested buffer pointer * to the Buffer Manager. This request process is controlled @@ -5125,31 +5541,38 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { - err_drop_frame: +err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); continue; } - skb = (struct sk_buff *)rx_desc->buf_cookie; + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } - err = mvpp2_rx_refill(port, bm_pool, bm, 0); + err = mvpp2_rx_refill(port, bm_pool, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; } - dma_unmap_single(dev->dev.parent, phys_addr, + dma_unmap_single(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE); rcvd_pkts++; rcvd_bytes += rx_bytes; - atomic_inc(&bm_pool->in_use); - skb_reserve(skb, MVPP2_MH_SIZE); + skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); @@ -5174,11 +5597,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, } static inline void -tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq, +tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - dma_unmap_single(dev, desc->buf_phys_addr, - desc->data_size, DMA_TO_DEVICE); + dma_addr_t buf_dma_addr = + mvpp2_txdesc_dma_addr_get(port, desc); + size_t buf_sz = + mvpp2_txdesc_size_get(port, desc); + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, + buf_sz, DMA_TO_DEVICE); mvpp2_txq_desc_put(txq); } @@ -5190,47 +5617,49 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); struct mvpp2_tx_desc *tx_desc; int i; - dma_addr_t buf_phys_addr; + dma_addr_t buf_dma_addr; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - tx_desc->phys_txq = txq->id; - tx_desc->data_size = frag->size; + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, frag->size); - buf_phys_addr = dma_map_single(port->dev->dev.parent, addr, - tx_desc->data_size, + buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, + frag->size, DMA_TO_DEVICE); - if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) { + if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); - goto error; + goto cleanup; } - tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ - tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L_DESC); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ - tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); } } return 0; - -error: +cleanup: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ for (i = i - 1; i >= 0; i--) { tx_desc = txq->descs + i; - tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + tx_desc_unmap_put(port, txq, tx_desc); } return -ENOMEM; @@ -5243,7 +5672,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; - dma_addr_t buf_phys_addr; + dma_addr_t buf_dma_addr; int frags = 0; u16 txq_id; u32 tx_cmd; @@ -5265,35 +5694,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - tx_desc->phys_txq = txq->id; - tx_desc->data_size = skb_headlen(skb); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); - buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, - tx_desc->data_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) { + buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { mvpp2_txq_desc_put(txq); frags = 0; goto out; } - tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN; + + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); tx_cmd = mvpp2_skb_tx_csum(port, skb); if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; - tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; - tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { - tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + tx_desc_unmap_put(port, txq, tx_desc); frags = 0; goto out; } @@ -5354,6 +5786,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx, cause_rx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); + int cpu = smp_processor_id(); /* Rx/Tx cause register * @@ -5365,8 +5798,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@ -5375,8 +5808,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), - cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; @@ -5405,7 +5839,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (budget > 0) { cause_rx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); mvpp2_interrupts_enable(port); } @@ -5488,7 +5922,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev, return 0; } -static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) { u32 mac_addr_l, mac_addr_m, mac_addr_h; @@ -5656,7 +6090,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) { err = -EADDRNOTAVAIL; - goto error; + goto log_error; } if (!netif_running(dev)) { @@ -5666,7 +6100,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) /* Reconfigure parser to accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) - goto error; + goto log_error; } mvpp2_stop_dev(port); @@ -5678,15 +6112,14 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) /* Reconfigure parser accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) - goto error; + goto log_error; out_start: mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; - -error: - netdev_err(dev, "fail to change MAC address\n"); +log_error: + netdev_err(dev, "failed to change MAC address\n"); return err; } @@ -5711,7 +6144,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) - goto error; + goto log_error; } mvpp2_stop_dev(port); @@ -5725,7 +6158,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) - goto error; + goto log_error; out_start: mvpp2_start_dev(port); @@ -5733,13 +6166,12 @@ out_start: mvpp2_ingress_enable(port); return 0; - -error: - netdev_err(dev, "fail to change MTU\n"); +log_error: + netdev_err(dev, "failed to change MTU\n"); return err; } -static struct rtnl_link_stats64 * +static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); @@ -5771,8 +6203,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -5803,8 +6233,8 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; - mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); - mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); } for (queue = 0; queue < txq_number; queue++) { @@ -5906,7 +6336,7 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev, err_clean_rxqs: mvpp2_cleanup_rxqs(port); err_out: - netdev_err(dev, "fail to change ring parameters"); + netdev_err(dev, "failed to change ring parameters"); return err; } @@ -5935,16 +6365,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -/* Driver initialization */ - -static void mvpp2_port_power_up(struct mvpp2_port *port) -{ - mvpp2_port_mii_set(port); - mvpp2_port_periodic_xon_disable(port); - mvpp2_port_fc_adv_enable(port); - mvpp2_port_reset(port); -} - /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { @@ -5953,7 +6373,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) + if (port->first_rxq + rxq_number > + MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; /* Disable port */ @@ -5973,8 +6394,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { @@ -6019,7 +6442,18 @@ static int mvpp2_port_init(struct mvpp2_port *port) } /* Configure Rx queue group interrupt for this port */ - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number); + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + rxq_number); + } else { + u32 val; + + val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { @@ -6061,8 +6495,7 @@ err_free_percpu: /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, - struct mvpp2 *priv, - int *next_first_rxq) + struct mvpp2 *priv) { struct device_node *phy_node; struct mvpp2_port *port; @@ -6075,11 +6508,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, u32 id; int features; int phy_mode; - int priv_common_regs_num = 2; int err, i, cpu; - dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, - rxq_number); + dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number); if (!dev) return -ENOMEM; @@ -6121,16 +6552,30 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->priv = priv; port->id = id; - port->first_rxq = *next_first_rxq; + if (priv->hw_version == MVPP21) + port->first_rxq = port->id * rxq_number; + else + port->first_rxq = port->id * priv->max_port_rxqs; + port->phy_node = phy_node; port->phy_interface = phy_mode; - res = platform_get_resource(pdev, IORESOURCE_MEM, - priv_common_regs_num + id); - port->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(port->base)) { - err = PTR_ERR(port->base); - goto err_free_irq; + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); + port->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + } else { + if (of_property_read_u32(port_node, "gop-port-id", + &port->gop_id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing gop-port-id value\n"); + goto err_free_irq; + } + + port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); } /* Alloc per-cpu stats */ @@ -6145,7 +6590,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, mac_from = "device tree"; ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { - mvpp2_get_mac_address(port, hw_mac_addr); + if (priv->hw_version == MVPP21) + mvpp21_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; ether_addr_copy(dev->dev_addr, hw_mac_addr); @@ -6165,7 +6611,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev_err(&pdev->dev, "failed to init port %d\n", id); goto err_free_stats; } - mvpp2_port_power_up(port); + + mvpp2_port_mii_set(port); + mvpp2_port_periodic_xon_disable(port); + + if (priv->hw_version == MVPP21) + mvpp2_port_fc_adv_enable(port); + + mvpp2_port_reset(port); port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); if (!port->pcpu) { @@ -6203,8 +6656,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - /* Increment the first Rx queue number to be used by the next port */ - *next_first_rxq += rxq_number; priv->port_list[id] = port; return 0; @@ -6288,6 +6739,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + /* Initialize network controller common part HW */ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) { @@ -6296,7 +6801,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) u32 val; /* Checks for hardware constraints */ - if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) || + if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || (txq_number > MVPP2_MAX_TXQ)) { dev_err(&pdev->dev, "invalid queue size parameter\n"); return -EINVAL; @@ -6307,14 +6812,23 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) if (dram_target_info) mvpp2_conf_mbus_windows(dram_target_info, priv); + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + /* Disable HW PHY polling */ - val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - val |= MVPP2_PHY_AN_STOP_SMI0_MASK; - writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + if (priv->hw_version == MVPP21) { + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val &= ~MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } /* Allocate and initialize aggregated TXQs */ priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), - sizeof(struct mvpp2_tx_queue), + sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; @@ -6332,11 +6846,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_rx_fifo_init(priv); /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number); + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), + rxq_number); + continue; + } else { + u32 val; - writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, - priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } + } + + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); /* Allow cache snoop when transmiting packets */ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); @@ -6363,22 +6891,46 @@ static int mvpp2_probe(struct platform_device *pdev) struct device_node *port_node; struct mvpp2 *priv; struct resource *res; - int port_count, first_rxq; + void __iomem *base; + int port_count, cpu; int err; - priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->iface_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + } + + for_each_present_cpu(cpu) { + u32 addr_space_sz; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->lms_base)) - return PTR_ERR(priv->lms_base); + addr_space_sz = (priv->hw_version == MVPP21 ? + MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); + priv->cpu_base[cpu] = base + cpu * addr_space_sz; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); if (IS_ERR(priv->pp_clk)) @@ -6396,42 +6948,70 @@ static int mvpp2_probe(struct platform_device *pdev) if (err < 0) goto err_pp_clk; + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + } + /* Get system's tclk rate */ priv->tclk = clk_get_rate(priv->pp_clk); + if (priv->hw_version == MVPP22) { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + goto err_mg_clk; + /* Sadly, the BM pools all share the same register to + * store the high 32 bits of their address. So they + * must all have the same high 32 bits, which forces + * us to restrict coherent memory to DMA_BIT_MASK(32). + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_mg_clk; + } + /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_gop_clk; + goto err_mg_clk; } port_count = of_get_available_child_count(dn); if (port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; - goto err_gop_clk; + goto err_mg_clk; } priv->port_list = devm_kcalloc(&pdev->dev, port_count, - sizeof(struct mvpp2_port *), - GFP_KERNEL); + sizeof(*priv->port_list), + GFP_KERNEL); if (!priv->port_list) { err = -ENOMEM; - goto err_gop_clk; + goto err_mg_clk; } /* Initialize ports */ - first_rxq = 0; for_each_available_child_of_node(dn, port_node) { - err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq); + err = mvpp2_port_probe(pdev, port_node, priv); if (err < 0) - goto err_gop_clk; + goto err_mg_clk; } platform_set_drvdata(pdev, priv); return 0; +err_mg_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_clk); err_gop_clk: clk_disable_unprepare(priv->gop_clk); err_pp_clk: @@ -6464,9 +7044,10 @@ static int mvpp2_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, aggr_txq->descs, - aggr_txq->descs_phys); + aggr_txq->descs_dma); } + clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); @@ -6474,7 +7055,14 @@ static int mvpp2_remove(struct platform_device *pdev) } static const struct of_device_id mvpp2_match[] = { - { .compatible = "marvell,armada-375-pp2" }, + { + .compatible = "marvell,armada-375-pp2", + .data = (void *)MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = (void *)MVPP22, + }, { } }; MODULE_DEVICE_TABLE(of, mvpp2_match); |