aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c156
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c64
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c549
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
19 files changed, 684 insertions, 543 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4360ce4d3fb6..720dc99bd1fc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
struct xdp_frame **init_xdpf)
{
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
- void *new_buff;
+ void *new_buff, *aligned_data;
struct page *p;
+ u32 data_shift;
+ int headroom;
/* Check the data alignment and make sure the headroom is large
* enough to store the xdpf backpointer. Use an aligned headroom
@@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
* byte frame headroom. If the XDP program uses all of it, copy the
* data to a new buffer and make room for storing the backpointer.
*/
- if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
xdpf->headroom >= priv->tx_headroom) {
xdpf->headroom = priv->tx_headroom;
return 0;
}
+ /* Try to move the data inside the buffer just enough to align it and
+ * store the xdpf backpointer. If the available headroom isn't large
+ * enough, resort to allocating a new buffer and copying the data.
+ */
+ aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+ data_shift = xdpf->data - aligned_data;
+
+ /* The XDP frame's headroom needs to be large enough to accommodate
+ * shifting the data as well as storing the xdpf backpointer.
+ */
+ if (xdpf->headroom >= data_shift + priv->tx_headroom) {
+ memmove(aligned_data, xdpf->data, xdpf->len);
+ xdpf->data = aligned_data;
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* The new xdp_frame is stored in the new buffer. Reserve enough space
+ * in the headroom for storing it along with the driver's private
+ * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+ * guarantee the data's alignment in the buffer.
+ */
+ headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+ DPAA_FD_DATA_ALIGNMENT);
+
+ /* Assure the extended headroom and data don't overflow the buffer,
+ * while maintaining the mandatory tailroom.
+ */
+ if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ return -ENOMEM;
+
p = dev_alloc_pages(0);
if (unlikely(!p))
return -ENOMEM;
/* Copy the data to the new buffer at a properly aligned offset */
new_buff = page_address(p);
- memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+ memcpy(new_buff + headroom, xdpf->data, xdpf->len);
/* Create an XDP frame around the new buffer in a similar fashion
* to xdp_convert_buff_to_frame.
*/
new_xdpf = new_buff;
- new_xdpf->data = new_buff + priv->tx_headroom;
+ new_xdpf->data = new_buff + headroom;
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
@@ -2532,12 +2566,10 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
return XDP_PASS;
}
- xdp.data = vaddr + fd_off;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + qm_fd_get_length(fd);
- xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
- xdp.rxq = &dpaa_fq->xdp_rxq;
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
/* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the
@@ -2638,7 +2670,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
u32 hash;
u64 ns;
- np = container_of(&portal, struct dpaa_napi_portal, p);
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2653,6 +2684,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
+ np = &percpu_priv->np;
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index fb0bcd18ec0c..492943bb9c48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
- int err;
+ int err, offset;
rcu_read_lock();
@@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog)
goto out;
- xdp.data = vaddr + dpaa2_fd_get_offset(fd);
- xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.rxq = &ch->xdp_rxq;
-
- xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
- (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -399,10 +395,20 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
- if (unlikely(err))
+ if (unlikely(err)) {
+ addr = dma_map_page(priv->net_dev->dev.parent,
+ virt_to_page(vaddr), 0,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ ch->buf_count++;
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ }
ch->stats.xdp_drop++;
- else
+ } else {
ch->stats.xdp_redirect++;
+ }
break;
}
@@ -768,12 +774,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -1262,6 +1267,22 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1648,7 +1669,7 @@ set_cgtd:
* CG taildrop threshold, so it won't interfere with it; we also
* want frames in non-PFC enabled traffic classes to be kept in check)
*/
- td.enable = !tx_pause || (tx_pause && pfc);
+ td.enable = !tx_pause || pfc;
if (priv->rx_cgtd_enabled == td.enable)
return;
@@ -1691,7 +1712,7 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
*/
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1730,7 +1751,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- if (!priv->mac) {
+ if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
* make sure we don't race against the link up notification,
* which may come immediately after dpni_enable();
@@ -1752,7 +1773,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
phylink_start(priv->mac->phylink);
return 0;
@@ -1826,11 +1847,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- if (!priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ } else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- } else {
- phylink_stop(priv->mac->phylink);
}
/* On dpni_disable(), the MC firmware will:
@@ -1952,6 +1973,43 @@ static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
}
}
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -2058,6 +2116,13 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
bool enable;
int err;
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
err = dpaa2_eth_set_rx_csum(priv, enable);
@@ -2115,7 +2180,7 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
return -EOPNOTSUPP;
@@ -2507,6 +2572,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -4015,6 +4082,9 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC;
net_dev->hw_features = net_dev->features;
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
return 0;
}
@@ -4042,10 +4112,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
- return 0;
- if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
@@ -4056,23 +4127,38 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
mac->mc_io = priv->mc_io;
mac->net_dev = priv->net_dev;
- err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
- kfree(mac);
- return err;
- }
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
priv->mac = mac;
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ goto err_close_mac;
+ }
+ }
+
return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (!priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
return;
- dpaa2_mac_disconnect(priv->mac);
+ dpaa2_mac_close(priv->mac);
kfree(priv->mac);
priv->mac = NULL;
}
@@ -4101,7 +4187,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d236b8695c39..9b6a89709ce1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -693,6 +693,21 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac &&
+ (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index f981a523e13a..bf59708b869e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,7 +85,7 @@ static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_nway_reset(priv->mac->phylink);
return -EOPNOTSUPP;
@@ -97,7 +97,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_ksettings_get(priv->mac->phylink,
link_settings);
@@ -115,7 +115,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (!priv->mac)
+ if (!dpaa2_eth_is_type_phy(priv))
return -ENOTSUPP;
return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
@@ -127,7 +127,7 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
- if (priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
return;
}
@@ -150,7 +150,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_set_pauseparam(priv->mac->phylink,
pause);
if (pause->autoneg)
@@ -198,7 +198,7 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_strings(p);
break;
}
@@ -211,7 +211,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
num_ss_stats += dpaa2_mac_get_sset_count();
return num_ss_stats;
default:
@@ -313,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
*(data + i++) = buf_cnt;
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 828c177df03d..ccaf7e35abeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -79,10 +79,20 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ /* We can switch between SGMII and 1000BASE-X at runtime with
+ * pcs-lynx
+ */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (mac->pcs &&
+ (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
+ return false;
+ return interface != mac->if_mode;
+
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -122,13 +132,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
fallthrough;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
break;
default:
goto empty_set;
@@ -174,30 +188,22 @@ static void dpaa2_mac_link_up(struct phylink_config *config,
dpmac_state->up = 1;
- if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
- /* If the DPMAC is configured for PHY mode, we need
- * to pass the link parameters to the MC firmware.
- */
- dpmac_state->rate = speed;
-
- if (duplex == DUPLEX_HALF)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else if (duplex == DUPLEX_FULL)
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
-
- /* This is lossy; the firmware really should take the pause
- * enablement status rather than pause/asym pause status.
- */
- if (rx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (rx_pause ^ tx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
- }
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
@@ -228,32 +234,6 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.mac_link_down = dpaa2_mac_link_down,
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io)
-{
- struct dpmac_attr attr;
- bool fixed = false;
- u16 mc_handle = 0;
- int err;
-
- err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
- &mc_handle);
- if (err || !mc_handle)
- return false;
-
- err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
- if (err)
- goto out;
-
- if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
- fixed = true;
-
-out:
- dpmac_close(mc_io, 0, mc_handle);
-
- return fixed;
-}
-
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct device_node *dpmac_node, int id)
{
@@ -302,36 +282,20 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
- struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
struct device_node *dpmac_node;
struct phylink *phylink;
- struct dpmac_attr attr;
int err;
- err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
- &dpmac_dev->mc_handle);
- if (err || !dpmac_dev->mc_handle) {
- netdev_err(net_dev, "dpmac_open() = %d\n", err);
- return -ENODEV;
- }
-
- err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
- if (err) {
- netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
- goto err_close_dpmac;
- }
-
- mac->if_link_type = attr.link_type;
+ mac->if_link_type = mac->attr.link_type;
- dpmac_node = dpaa2_mac_get_node(attr.id);
+ dpmac_node = dpaa2_mac_get_node(mac->attr.id);
if (!dpmac_node) {
- netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
- err = -ENODEV;
- goto err_close_dpmac;
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
}
- err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
if (err < 0) {
err = -EINVAL;
goto err_put_node;
@@ -351,9 +315,10 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
- if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
- attr.eth_if != DPMAC_ETH_IF_RGMII) {
- err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
goto err_put_node;
}
@@ -389,8 +354,7 @@ err_pcs_destroy:
dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
-err_close_dpmac:
- dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+
return err;
}
@@ -402,8 +366,40 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ return 0;
- dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 955a52856210..13d42dd58ec9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,7 @@ struct dpaa2_mac {
struct dpmac_link_state state;
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -28,6 +29,10 @@ struct dpaa2_mac {
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
struct fsl_mc_io *mc_io);
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 90453dc7baef..9f80bdfeedec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -62,6 +62,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
@@ -662,4 +666,17 @@ struct dpni_rsp_single_step_cfg {
__le32 peer_delay;
};
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6ea7db66a632..aa429c17c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1225,6 +1225,99 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_add_mac_addr() - Add MAC address filter
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index e7b9e195b534..4e96d9362dd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1114,4 +1114,13 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_single_step_cfg *ptp_cfg);
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index d99ea0f4e4a6..ab92382c399a 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -27,7 +27,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI
+ depends on PCI && MDIO_DEVRES && MDIO_BUS
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index e1e950d48c92..c71fe8d751d5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index ee0116ed4738..70e6d97b380f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -14,23 +14,6 @@
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
-{
- return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
-}
-
-static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
- u32 val)
-{
- enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
-}
-
-#define enetc_mdio_rd(mdio_priv, off) \
- _enetc_mdio_rd(mdio_priv, ENETC_##off)
-#define enetc_mdio_wr(mdio_priv, off, val) \
- _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
-
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
@@ -47,15 +30,29 @@ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
-#define MDIO_DATA(x) ((x) & 0xffff)
-#define TIMEOUT 1000
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
- u32 val;
+ bool is_busy;
- return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
- !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
}
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
@@ -75,7 +72,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -83,11 +80,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -95,7 +92,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
}
/* write the value */
- enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -121,7 +118,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -129,11 +126,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -141,21 +138,21 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
/* initiate the read */
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
return value;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed8fcb8b486e..515c5b29d7aa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
phylink_destroy(priv->phylink);
}
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
@@ -1079,6 +1136,8 @@ err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
@@ -1098,14 +1157,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- enetc_phylink_destroy(priv);
- enetc_mdiobus_destroy(pf);
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
unregister_netdev(si->ndev);
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
enetc_free_msix(priv);
enetc_free_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c527f4ee1d3a..0602d5d5d2ee 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -462,6 +462,11 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 04f24c66cf36..3db882322b2b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_NO_HARD_RESET,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -944,7 +945,6 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
@@ -953,7 +953,8 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -995,7 +996,8 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- val = readl(fep->hwp + FEC_RACC);
+ u32 val = readl(fep->hwp + FEC_RACC);
+
/* align IP header */
val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -1662,7 +1664,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1693,6 +1694,8 @@ static void fec_get_mac(struct net_device *ndev)
if (FEC_FLASHMAC)
iap = (unsigned char *)FEC_FLASHMAC;
#else
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+
if (pdata)
iap = (unsigned char *)&pdata->mac;
#endif
@@ -2165,9 +2168,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
if (err)
goto err_out_free_mdiobus;
+ of_node_put(node);
mii_cnt++;
@@ -2180,6 +2183,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
+ of_node_put(node);
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bb9887f98841..62f42921933d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -111,6 +111,7 @@ do { \
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -442,6 +443,9 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_10G;
break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
default:
tmp |= IF_MODE_GMII;
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d391a45cebb6..541de32ea662 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -58,7 +58,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6d853f018d53..ef4e2febeb5b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -70,9 +70,32 @@ static struct {
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
-static struct ucc_geth_info ugeth_primary_info = {
+static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
+{
+ static const u8 count[] = {
+ [UCC_GETH_NUM_OF_THREADS_1] = 1,
+ [UCC_GETH_NUM_OF_THREADS_2] = 2,
+ [UCC_GETH_NUM_OF_THREADS_4] = 4,
+ [UCC_GETH_NUM_OF_THREADS_6] = 6,
+ [UCC_GETH_NUM_OF_THREADS_8] = 8,
+ };
+ if (idx >= ARRAY_SIZE(count))
+ return 0;
+ return count[idx];
+}
+
+static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static const struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
- .bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
@@ -90,8 +113,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
- .numQueuesTx = 1,
- .numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
@@ -157,8 +178,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
-static struct ucc_geth_info ugeth_info[8];
-
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
@@ -558,7 +577,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
int i;
int length;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
@@ -567,7 +586,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
@@ -671,32 +690,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
- int numThreadsTxNumerical;
- switch (ugeth->ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
- numThreadsTxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
pr_info("Thread data TXs:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_tx);
- for (i = 0; i < numThreadsTxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data TX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_tx[i]);
@@ -705,32 +704,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
}
if (ugeth->p_thread_data_rx) {
- int numThreadsRxNumerical;
- switch (ugeth->ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
- numThreadsRxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
pr_info("Thread data RX:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_rx);
- for (i = 0; i < numThreadsRxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data RX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_rx[i]);
@@ -905,7 +884,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_send_q_mem_reg) {
pr_info("Send Q memory registers:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
pr_info("SQQD[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
@@ -937,7 +916,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
pr_info("RX IRQ coalescing tables:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_irq_coalescing_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX IRQ coalescing table entry[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
@@ -959,7 +938,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_rx_bd_qs_tbl) {
pr_info("RX BD QS tables:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX BD QS table[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i]);
@@ -1835,7 +1814,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
@@ -1856,12 +1835,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
kfree(ugeth->rx_skbuff[i]);
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->rx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ kfree(ugeth->p_rx_bd_ring[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
@@ -1880,7 +1854,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
@@ -1898,15 +1872,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
kfree(ugeth->tx_skbuff[i]);
- if (ugeth->p_tx_bd_ring[i]) {
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->tx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->tx_bd_ring_offset[i]);
- ugeth->p_tx_bd_ring[i] = NULL;
- }
+ kfree(ugeth->p_tx_bd_ring[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
}
}
@@ -1921,50 +1888,39 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
ugeth->uccf = NULL;
}
- if (ugeth->p_thread_data_tx) {
- qe_muram_free(ugeth->thread_dat_tx_offset);
- ugeth->p_thread_data_tx = NULL;
- }
- if (ugeth->p_thread_data_rx) {
- qe_muram_free(ugeth->thread_dat_rx_offset);
- ugeth->p_thread_data_rx = NULL;
- }
- if (ugeth->p_exf_glbl_param) {
- qe_muram_free(ugeth->exf_glbl_param_offset);
- ugeth->p_exf_glbl_param = NULL;
- }
- if (ugeth->p_rx_glbl_pram) {
- qe_muram_free(ugeth->rx_glbl_pram_offset);
- ugeth->p_rx_glbl_pram = NULL;
- }
- if (ugeth->p_tx_glbl_pram) {
- qe_muram_free(ugeth->tx_glbl_pram_offset);
- ugeth->p_tx_glbl_pram = NULL;
- }
- if (ugeth->p_send_q_mem_reg) {
- qe_muram_free(ugeth->send_q_mem_reg_offset);
- ugeth->p_send_q_mem_reg = NULL;
- }
- if (ugeth->p_scheduler) {
- qe_muram_free(ugeth->scheduler_offset);
- ugeth->p_scheduler = NULL;
- }
- if (ugeth->p_tx_fw_statistics_pram) {
- qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
- ugeth->p_tx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_fw_statistics_pram) {
- qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
- ugeth->p_rx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_irq_coalescing_tbl) {
- qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
- ugeth->p_rx_irq_coalescing_tbl = NULL;
- }
- if (ugeth->p_rx_bd_qs_tbl) {
- qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
- ugeth->p_rx_bd_qs_tbl = NULL;
- }
+ qe_muram_free_addr(ugeth->p_thread_data_tx);
+ ugeth->p_thread_data_tx = NULL;
+
+ qe_muram_free_addr(ugeth->p_thread_data_rx);
+ ugeth->p_thread_data_rx = NULL;
+
+ qe_muram_free_addr(ugeth->p_exf_glbl_param);
+ ugeth->p_exf_glbl_param = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_glbl_pram);
+ ugeth->p_rx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_glbl_pram);
+ ugeth->p_tx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_send_q_mem_reg);
+ ugeth->p_send_q_mem_reg = NULL;
+
+ qe_muram_free_addr(ugeth->p_scheduler);
+ ugeth->p_scheduler = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -2073,15 +2029,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
- (uf_info->bd_mem_part == MEM_PART_MURAM))) {
- if (netif_msg_probe(ugeth))
- pr_err("Bad memory partition value\n");
- return -EINVAL;
- }
-
/* Rx BD lengths */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
@@ -2092,7 +2041,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* Tx BD lengths */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
pr_err("Tx BD ring length must be no smaller than 2\n");
@@ -2109,14 +2058,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* num Tx queues */
- if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
- if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of rx queues too large\n");
return -EINVAL;
@@ -2124,7 +2073,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
- if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2133,7 +2082,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
- if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2156,10 +2105,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
@@ -2198,53 +2147,32 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Allocate in multiple of
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
- according to spec */
- length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
- / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_TX_BD_RING_ALIGNMENT;
- ugeth->tx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
-
- if (ugeth->tx_bd_ring_offset[j] != 0)
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->tx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_TX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- tx_bd_ring_offset[j]);
- }
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
+ u32 alloc;
+
+ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
- memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
- length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
}
/* Init Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2252,9 +2180,6 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
- ugeth->tx_skbuff[j][i] = NULL;
-
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
@@ -2284,27 +2209,15 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ u32 alloc;
+
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_RX_BD_RING_ALIGNMENT;
- ugeth->rx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
- if (ugeth->rx_bd_ring_offset[j] != 0)
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->rx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_RX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- rx_bd_ring_offset[j]);
- }
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n");
@@ -2313,11 +2226,11 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
}
/* Init Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2325,9 +2238,6 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
- ugeth->rx_skbuff[j][i] = NULL;
-
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
@@ -2359,10 +2269,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
- u16 test;
u8 function_code = 0;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
+ s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
@@ -2371,45 +2281,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
- switch (ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
+ numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
+ if (!numThreadsRxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Rx threads value\n");
return -EINVAL;
}
- switch (ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
+ numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
+ if (!numThreadsTxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Tx threads value\n");
return -EINVAL;
@@ -2507,20 +2387,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
- ugeth->tx_glbl_pram_offset =
+ tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (tx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_tx_glbl_pram =
- (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
- tx_glbl_pram_offset);
- /* Zero out p_tx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
-
+ ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
/* Fill global PRAM */
/* TQPTR */
@@ -2554,7 +2429,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
- qe_muram_alloc(ug_info->numQueuesTx *
+ qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
@@ -2570,29 +2445,20 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32) virt_to_phys(endOfRing));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32)qe_muram_dma(endOfRing));
- }
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
}
/* schedulerbasepointer */
- if (ug_info->numQueuesTx > 1) {
+ if (ucc_geth_tx_queues(ug_info) > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
@@ -2608,8 +2474,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
- /* Zero out p_scheduler */
- memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
@@ -2652,23 +2516,18 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
- /* Zero out p_tx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
- 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
- if (ug_info->numQueuesTx > 1)
+ if (ucc_geth_tx_queues(ug_info) > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
- temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
- test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
-
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
@@ -2678,20 +2537,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
- ugeth->rx_glbl_pram_offset =
+ rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (rx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_rx_glbl_pram =
- (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
- rx_glbl_pram_offset);
- /* Zero out p_rx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
-
+ ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
/* Fill global PRAM */
/* RQPTR */
@@ -2729,16 +2583,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
- /* Zero out p_rx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
- sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
@@ -2754,7 +2605,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
@@ -2803,7 +2654,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
@@ -2817,23 +2668,12 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
- /* Zero out p_rx_bd_qs_tbl */
- memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
- 0,
- ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
- sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
- }
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
/* rest of fields handled by QE */
}
@@ -2854,7 +2694,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
- remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
@@ -2937,14 +2777,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
- kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
- /* Zero out *p_init_enet_param_shadow */
- memset((char *)ugeth->p_init_enet_param_shadow,
- 0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
@@ -2964,7 +2801,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
- ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
@@ -3002,7 +2839,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_init_enet_param_shadow->txglobal =
- ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -3016,7 +2853,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
/* Load Rx bds with buffers */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill Rx bds with buffers\n");
@@ -3287,12 +3124,12 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
/* Tx event processing */
spin_lock(&ugeth->lock);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
@@ -3685,6 +3522,36 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
+static int ucc_geth_parse_clock(struct device_node *np, const char *which,
+ enum qe_clock *out)
+{
+ const char *sprop;
+ char buf[24];
+
+ snprintf(buf, sizeof(buf), "%s-clock-name", which);
+ sprop = of_get_property(np, buf, NULL);
+ if (sprop) {
+ *out = qe_clock_source(sprop);
+ } else {
+ u32 val;
+
+ snprintf(buf, sizeof(buf), "%s-clock", which);
+ if (of_property_read_u32(np, buf, &val)) {
+ /* If both *-clock-name and *-clock are missing,
+ * we want to tell people to use *-clock-name.
+ */
+ pr_err("missing %s-clock-name property\n", buf);
+ return -EINVAL;
+ }
+ *out = val;
+ }
+ if (*out < QE_CLK_NONE || *out > QE_CLK24) {
+ pr_err("invalid %s property\n", buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3695,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
@@ -3725,62 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = &ugeth_info[ucc_num];
- if (ug_info == NULL) {
- if (netif_msg_probe(&debug))
- pr_err("[%d] Missing additional data!\n", ucc_num);
- return -ENODEV;
- }
+ ug_info = kmalloc(sizeof(*ug_info), GFP_KERNEL);
+ if (ug_info == NULL)
+ return -ENOMEM;
+ memcpy(ug_info, &ugeth_primary_info, sizeof(*ug_info));
ug_info->uf_info.ucc_num = ucc_num;
- sprop = of_get_property(np, "rx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.rx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.rx_clock > QE_CLK24)) {
- pr_err("invalid rx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "rx-clock", NULL);
- if (!prop) {
- /* If both rx-clock-name and rx-clock are missing,
- we want to tell people to use rx-clock-name. */
- pr_err("missing rx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.rx_clock = *prop;
- }
-
- sprop = of_get_property(np, "tx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.tx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.tx_clock > QE_CLK24)) {
- pr_err("invalid tx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "tx-clock", NULL);
- if (!prop) {
- pr_err("missing tx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid tx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.tx_clock = *prop;
- }
+ err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
+ if (err)
+ goto err_free_info;
+ err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
+ if (err)
+ goto err_free_info;
err = of_address_to_resource(np, 0, &res);
if (err)
- return -EINVAL;
+ goto err_free_info;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
@@ -3793,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
*/
err = of_phy_register_fixed_link(np);
if (err)
- return err;
+ goto err_free_info;
ug_info->phy_node = of_node_get(np);
}
@@ -3924,6 +3751,8 @@ err_deregister_fixed_link:
of_phy_deregister_fixed_link(np);
of_node_put(ug_info->tbi_node);
of_node_put(ug_info->phy_node);
+err_free_info:
+ kfree(ug_info);
return err;
}
@@ -3940,6 +3769,7 @@ static int ucc_geth_remove(struct platform_device* ofdev)
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ kfree(ugeth->ug_info);
free_netdev(dev);
return 0;
@@ -3968,17 +3798,10 @@ static struct platform_driver ucc_geth_driver = {
static int __init ucc_geth_init(void)
{
- int i, ret;
-
if (netif_msg_drv(&debug))
pr_info(DRV_DESC "\n");
- for (i = 0; i < 8; i++)
- memcpy(&(ugeth_info[i]), &ugeth_primary_info,
- sizeof(ugeth_primary_info));
-
- ret = platform_driver_register(&ucc_geth_driver);
- return ret;
+ return platform_driver_register(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 11d4bf5dc21f..4294ed096ebb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1076,8 +1076,6 @@ struct ucc_geth_tad_params {
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
- u8 numQueuesTx;
- u8 numQueuesRx;
int ipCheckSumCheck;
int ipCheckSumGenerate;
int rxExtendedFiltering;
@@ -1165,9 +1163,7 @@ struct ucc_geth_private {
struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
u32 exf_glbl_param_offset;
struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
- u32 rx_glbl_pram_offset;
struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
- u32 tx_glbl_pram_offset;
struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
@@ -1185,9 +1181,7 @@ struct ucc_geth_private {
struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
- u32 tx_bd_ring_offset[NUM_TX_QUEUES];
u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
- u32 rx_bd_ring_offset[NUM_RX_QUEUES];
u8 __iomem *confBd[NUM_TX_QUEUES];
u8 __iomem *txBd[NUM_TX_QUEUES];
u8 __iomem *rxBd[NUM_RX_QUEUES];