aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/dsa/lan9303_i2c.c5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c5
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c264
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h30
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c66
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c209
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c185
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h22
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c98
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c20
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c200
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h4
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/sun/cassini.c48
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/motorcomm.c52
-rw-r--r--drivers/net/thunderbolt.c8
-rw-r--r--drivers/nfc/microread/i2c.c5
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c5
-rw-r--r--drivers/nfc/nxp-nci/i2c.c5
-rw-r--r--drivers/nfc/pn533/i2c.c5
-rw-r--r--drivers/nfc/pn544/i2c.c5
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c5
-rw-r--r--drivers/nfc/st-nci/i2c.c5
-rw-r--r--drivers/nfc/st21nfca/i2c.c5
-rw-r--r--drivers/ptp/ptp_idt82p33.c709
-rw-r--r--drivers/ptp/ptp_idt82p33.h21
-rw-r--r--include/trace/events/skb.h2
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--net/ethtool/ioctl.c3
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile4
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh106
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh45
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings2
44 files changed, 1988 insertions, 323 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1cd4e71916f8..6c4348245d1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2529,7 +2529,16 @@ static int bond_miimon_inspect(struct bonding *bond)
struct slave *slave;
bool ignore_updelay;
- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ } else {
+ struct bond_up_slave *usable_slaves;
+
+ usable_slaves = rcu_dereference(bond->usable_slaves);
+
+ if (usable_slaves && usable_slaves->count == 0)
+ ignore_updelay = true;
+ }
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 7d746cd9ca1b..1cb41c36bd47 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -29,8 +29,7 @@ static const struct regmap_config lan9303_i2c_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static int lan9303_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lan9303_i2c_probe(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev;
int ret;
@@ -106,7 +105,7 @@ static struct i2c_driver lan9303_i2c_driver = {
.name = "LAN9303_I2C",
.of_match_table = of_match_ptr(lan9303_i2c_of_match),
},
- .probe = lan9303_i2c_probe,
+ .probe_new = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
.shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index db4aec0a51dc..c1a633ca1e6d 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -14,8 +14,7 @@
KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
-static int ksz9477_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
struct regmap_config rc;
struct ksz_device *dev;
@@ -120,7 +119,7 @@ static struct i2c_driver ksz9477_i2c_driver = {
.name = "ksz9477-switch",
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
- .probe = ksz9477_i2c_probe,
+ .probe_new = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
.shutdown = ksz9477_i2c_shutdown,
.id_table = ksz9477_i2c_id,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 54065cdedd35..14ff6887a225 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -76,8 +76,7 @@ static const struct regmap_config xrs700x_i2c_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG
};
-static int xrs700x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int xrs700x_i2c_probe(struct i2c_client *i2c)
{
struct xrs700x *priv;
int ret;
@@ -148,7 +147,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
.name = "xrs700x-i2c",
.of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
},
- .probe = xrs700x_i2c_probe,
+ .probe_new = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
.shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index da9973b711f4..1a5fdd755e9e 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1839,9 +1839,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/
if (prior_data_len) {
int i = 0;
- u8 *data = NULL;
skb_frag_t *f;
- u8 *vaddr;
int frag_size = 0, frag_delta = 0;
while (remaining > 0) {
@@ -1853,24 +1851,24 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
i++;
}
f = &record->frags[i];
- vaddr = kmap_atomic(skb_frag_page(f));
-
- data = vaddr + skb_frag_off(f) + remaining;
frag_delta = skb_frag_size(f) - remaining;
if (frag_delta >= prior_data_len) {
- memcpy(prior_data, data, prior_data_len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ prior_data_len);
} else {
- memcpy(prior_data, data, frag_delta);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ frag_delta);
+
/* get the next page */
f = &record->frags[i + 1];
- vaddr = kmap_atomic(skb_frag_page(f));
- data = vaddr + skb_frag_off(f);
- memcpy(prior_data + frag_delta,
- data, (prior_data_len - frag_delta));
- kunmap_atomic(vaddr);
+
+ memcpy_from_page(prior_data + frag_delta,
+ skb_frag_page(f),
+ skb_frag_off(f),
+ prior_data_len - frag_delta);
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index f5f5f8dc3d19..2c586c2308ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -632,9 +632,9 @@ static const struct mlxsw_bus mlxsw_i2c_bus = {
.cmd_exec = mlxsw_i2c_cmd_exec,
};
-static int mlxsw_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mlxsw_i2c_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
struct mlxsw_i2c *mlxsw_i2c;
u8 status;
@@ -751,7 +751,7 @@ static void mlxsw_i2c_remove(struct i2c_client *client)
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
{
- i2c_driver->probe = mlxsw_i2c_probe;
+ i2c_driver->probe_new = mlxsw_i2c_probe;
i2c_driver->remove = mlxsw_i2c_remove;
return i2c_add_driver(i2c_driver);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 5fbbd479cfb0..5314c064ceae 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+
#include "lan966x_main.h"
static int lan966x_fdma_channel_active(struct lan966x *lan966x)
@@ -16,7 +19,7 @@ static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
if (unlikely(!page))
return NULL;
- db->dataptr = page_pool_get_dma_addr(page);
+ db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
return page;
}
@@ -72,12 +75,28 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
- .offset = 0,
+ .offset = XDP_PACKET_HEADROOM,
.max_len = rx->max_mtu -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
};
+ if (lan966x_xdp_present(lan966x))
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+
rx->page_pool = page_pool_create(&pp_params);
+
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ port = lan966x->ports[i];
+ xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
+ xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ }
+
return PTR_ERR_OR_ZERO(rx->page_pool);
}
@@ -372,11 +391,14 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
{
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_tx_dcb_buf *dcb_buf;
+ struct xdp_frame_bulk bq;
struct lan966x_db *db;
unsigned long flags;
bool clear = false;
int i;
+ xdp_frame_bulk_init(&bq);
+
spin_lock_irqsave(&lan966x->tx_lock, flags);
for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb_buf = &tx->dcbs_buf[i];
@@ -389,19 +411,35 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
continue;
dcb_buf->dev->stats.tx_packets++;
- dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
dcb_buf->used = false;
- dma_unmap_single(lan966x->dev,
- dcb_buf->dma_addr,
- dcb_buf->skb->len,
- DMA_TO_DEVICE);
- if (!dcb_buf->ptp)
- dev_kfree_skb_any(dcb_buf->skb);
+ if (dcb_buf->use_skb) {
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (!dcb_buf->ptp)
+ napi_consume_skb(dcb_buf->data.skb, weight);
+ } else {
+ if (dcb_buf->xdp_ndo)
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->len,
+ DMA_TO_DEVICE);
+
+ if (dcb_buf->xdp_ndo)
+ xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
+ else
+ xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
+ }
clear = true;
}
+ xdp_flush_frame_bulk(&bq);
+
if (clear)
lan966x_fdma_wakeup_netdev(lan966x);
@@ -432,11 +470,13 @@ static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
if (unlikely(!page))
return FDMA_ERROR;
- dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr,
+ dma_sync_single_for_cpu(lan966x->dev,
+ (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
FDMA_DCB_STATUS_BLOCKL(db->status),
DMA_FROM_DEVICE);
- lan966x_ifh_get_src_port(page_address(page), src_port);
+ lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
+ src_port);
if (WARN_ON(*src_port >= lan966x->num_phys_ports))
return FDMA_ERROR;
@@ -466,6 +506,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
skb_mark_for_recycle(skb);
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
lan966x_ifh_get_timestamp(skb->data, &timestamp);
@@ -505,6 +546,7 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
int dcb_reload = rx->dcb_index;
struct lan966x_rx_dcb *old_dcb;
struct lan966x_db *db;
+ bool redirect = false;
struct sk_buff *skb;
struct page *page;
int counter = 0;
@@ -527,6 +569,12 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx);
goto allocate_new;
+ case FDMA_REDIRECT:
+ redirect = true;
+ fallthrough;
+ case FDMA_TX:
+ lan966x_fdma_rx_advance_dcb(rx);
+ continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx);
@@ -563,6 +611,9 @@ allocate_new:
if (counter < weight && napi_complete_done(napi, counter))
lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+ if (redirect)
+ xdp_do_flush();
+
return counter;
}
@@ -607,14 +658,139 @@ static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
return -1;
}
+static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
+ int next_to_use, int len,
+ dma_addr_t dma_addr)
+{
+ struct lan966x_tx_dcb *next_dcb;
+ struct lan966x_db *next_db;
+
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(len);
+}
+
+static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+}
+
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ bool dma_map)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx *tx = &lan966x->tx;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ __be32 *ifh;
+ int ret = 0;
+
+ spin_lock(&lan966x->tx_lock);
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(port->dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Generate new IFH */
+ if (dma_map) {
+ if (xdpf->headroom < IFH_LEN_BYTES) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ ifh = xdpf->data - IFH_LEN_BYTES;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = dma_map_single(lan966x->dev,
+ xdpf->data - IFH_LEN_BYTES,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr);
+ } else {
+ ifh = page_address(page) + XDP_PACKET_HEADROOM;
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+
+ dma_addr = page_pool_get_dma_addr(page);
+ dma_sync_single_for_device(lan966x->dev,
+ dma_addr + XDP_PACKET_HEADROOM,
+ xdpf->len + IFH_LEN_BYTES,
+ DMA_TO_DEVICE);
+
+ /* Setup next dcb */
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use,
+ xdpf->len + IFH_LEN_BYTES,
+ dma_addr + XDP_PACKET_HEADROOM);
+ }
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->use_skb = false;
+ next_dcb_buf->data.xdpf = xdpf;
+ next_dcb_buf->xdp_ndo = dma_map;
+ next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = port->dev;
+
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
+
+out:
+ spin_unlock(&lan966x->tx_lock);
+
+ return ret;
+}
+
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
- struct lan966x_tx_dcb *next_dcb, *dcb;
struct lan966x_tx *tx = &lan966x->tx;
- struct lan966x_db *next_db;
int needed_headroom;
int needed_tailroom;
dma_addr_t dma_addr;
@@ -660,20 +836,14 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
}
/* Setup next dcb */
- next_dcb = &tx->dcbs[next_to_use];
- next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
-
- next_db = &next_dcb->db[0];
- next_db->dataptr = dma_addr;
- next_db->status = FDMA_DCB_STATUS_SOF |
- FDMA_DCB_STATUS_EOF |
- FDMA_DCB_STATUS_INTR |
- FDMA_DCB_STATUS_BLOCKO(0) |
- FDMA_DCB_STATUS_BLOCKL(skb->len);
+ lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
- next_dcb_buf->skb = skb;
+ next_dcb_buf->use_skb = true;
+ next_dcb_buf->data.skb = skb;
+ next_dcb_buf->xdp_ndo = false;
+ next_dcb_buf->len = skb->len;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
@@ -683,21 +853,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
- if (likely(lan966x->tx.activated)) {
- /* Connect current dcb to the next db */
- dcb = &tx->dcbs[tx->last_in_use];
- dcb->nextptr = tx->dma + (next_to_use *
- sizeof(struct lan966x_tx_dcb));
-
- lan966x_fdma_tx_reload(tx);
- } else {
- /* Because it is first time, then just activate */
- lan966x->tx.activated = true;
- lan966x_fdma_tx_activate(tx);
- }
-
- /* Move to next dcb because this last in use */
- tx->last_in_use = next_to_use;
+ /* Start the transmission */
+ lan966x_fdma_tx_start(tx, next_to_use);
return NETDEV_TX_OK;
@@ -786,19 +943,15 @@ static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
return lan966x_fdma_get_max_mtu(lan966x) +
IFH_LEN_BYTES +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- VLAN_HLEN * 2;
+ VLAN_HLEN * 2 +
+ XDP_PACKET_HEADROOM;
}
-int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
{
- int max_mtu;
int err;
u32 val;
- max_mtu = lan966x_fdma_get_max_frame(lan966x);
- if (max_mtu == lan966x->rx.max_mtu)
- return 0;
-
/* Disable the CPU port */
lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
QSYS_SW_PORT_MODE_PORT_ENA,
@@ -824,6 +977,25 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x)
return err;
}
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ if (max_mtu == lan966x->rx.max_mtu)
+ return 0;
+
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
+{
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ return __lan966x_fdma_reload(lan966x, max_mtu);
+}
+
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
{
if (lan966x->fdma_ndev)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 42be5d0f1f01..0aed244826d3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -302,13 +302,13 @@ err:
return NETDEV_TX_BUSY;
}
-static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
}
-static void lan966x_ifh_set_port(void *ifh, u64 bypass)
+void lan966x_ifh_set_port(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
@@ -469,6 +469,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_eth_ioctl = lan966x_port_ioctl,
.ndo_setup_tc = lan966x_tc_setup,
.ndo_bpf = lan966x_xdp,
+ .ndo_xdp_xmit = lan966x_xdp_xmit,
};
bool lan966x_netdevice_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index bc93051aa079..a0170a3fb976 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -105,11 +105,15 @@ enum macaccess_entry_type {
* FDMA_PASS, frame is valid and can be used
* FDMA_ERROR, something went wrong, stop getting more frames
* FDMA_DROP, frame is dropped, but continue to get more frames
+ * FDMA_TX, frame is given to TX, but continue to get more frames
+ * FDMA_REDIRECT, frame is given to TX, but continue to get more frames
*/
enum lan966x_fdma_action {
FDMA_PASS = 0,
FDMA_ERROR,
FDMA_DROP,
+ FDMA_TX,
+ FDMA_REDIRECT,
};
struct lan966x_port;
@@ -173,11 +177,17 @@ struct lan966x_rx {
};
struct lan966x_tx_dcb_buf {
- struct net_device *dev;
- struct sk_buff *skb;
dma_addr_t dma_addr;
- bool used;
- bool ptp;
+ struct net_device *dev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ } data;
+ u32 len;
+ u32 used : 1;
+ u32 ptp : 1;
+ u32 use_skb : 1;
+ u32 xdp_ndo : 1;
};
struct lan966x_tx {
@@ -359,6 +369,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
+void lan966x_ifh_set_port(void *ifh, u64 bypass);
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -459,12 +471,17 @@ u32 lan966x_ptp_get_period_ps(void);
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
+ struct xdp_frame *frame,
+ struct page *page,
+ bool dma_map);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_fdma_reload_page_pool(struct lan966x *lan966x);
int lan966x_lag_port_join(struct lan966x_port *port,
struct net_device *brport_dev,
@@ -555,6 +572,11 @@ int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int lan966x_xdp_run(struct lan966x_port *port,
struct page *page,
u32 data_len);
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags);
+bool lan966x_xdp_present(struct lan966x *lan966x);
static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
{
return !!port->xdp_prog;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
index e77d9f2aad2b..2e6f486ec67d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
@@ -11,6 +11,8 @@ static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct bpf_prog *old_prog;
+ bool old_xdp, new_xdp;
+ int err;
if (!lan966x->fdma) {
NL_SET_ERR_MSG_MOD(xdp->extack,
@@ -18,7 +20,20 @@ static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
return -EOPNOTSUPP;
}
+ old_xdp = lan966x_xdp_present(lan966x);
old_prog = xchg(&port->xdp_prog, xdp->prog);
+ new_xdp = lan966x_xdp_present(lan966x);
+
+ if (old_xdp == new_xdp)
+ goto out;
+
+ err = lan966x_fdma_reload_page_pool(lan966x);
+ if (err) {
+ xchg(&port->xdp_prog, old_prog);
+ return err;
+ }
+
+out:
if (old_prog)
bpf_prog_put(old_prog);
@@ -35,21 +50,57 @@ int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int lan966x_xdp_xmit(struct net_device *dev,
+ int n,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int nxmit = 0;
+
+ for (int i = 0; i < n; ++i) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, true);
+ if (err)
+ break;
+
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
{
struct bpf_prog *xdp_prog = port->xdp_prog;
struct lan966x *lan966x = port->lan966x;
+ struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 act;
xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
&port->xdp_rxq);
- xdp_prepare_buff(&xdp, page_address(page), IFH_LEN_BYTES,
+ xdp_prepare_buff(&xdp, page_address(page),
+ IFH_LEN_BYTES + XDP_PACKET_HEADROOM,
data_len - IFH_LEN_BYTES, false);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
switch (act) {
case XDP_PASS:
return FDMA_PASS;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf)
+ return FDMA_DROP;
+
+ return lan966x_fdma_xmit_xdpf(port, xdpf, page, false) ?
+ FDMA_DROP : FDMA_TX;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
+ return FDMA_DROP;
+
+ return FDMA_REDIRECT;
default:
bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
fallthrough;
@@ -61,6 +112,19 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
}
}
+bool lan966x_xdp_present(struct lan966x *lan966x)
+{
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ if (lan966x_xdp_port_present(lan966x->ports[p]))
+ return true;
+ }
+
+ return false;
+}
+
int lan966x_xdp_port_init(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index bd6bd380ba34..1ed304a816cc 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -12,6 +12,20 @@
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
+#define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
+
+/* Collect keysets and type ids for multiple rules per size */
+struct sparx5_wildcard_rule {
+ bool selected;
+ u8 value;
+ u8 mask;
+ enum vcap_keyfield_set keyset;
+};
+
+struct sparx5_multiple_rules {
+ struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
+};
+
struct sparx5_tc_flower_parse_usage {
struct flow_cls_offload *fco;
struct flow_rule *frule;
@@ -618,7 +632,7 @@ static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
{
int err;
- err = vcap_rule_add_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
if (err)
return err;
@@ -626,11 +640,190 @@ static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
return err;
}
+/* Collect all port keysets and apply the first of them, possibly wildcarded */
+static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
+ struct vcap_rule *vrule,
+ struct vcap_admin *admin,
+ u16 l3_proto,
+ struct sparx5_multiple_rules *multi)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_keyset_list portkeysetlist = {};
+ enum vcap_keyfield_set portkeysets[10] = {};
+ struct vcap_keyset_list matches = {};
+ enum vcap_keyfield_set keysets[10];
+ int idx, jdx, err = 0, count = 0;
+ struct sparx5_wildcard_rule *mru;
+ const struct vcap_set *kinfo;
+ struct vcap_control *vctrl;
+
+ vctrl = port->sparx5->vcap_ctrl;
+
+ /* Find the keysets that the rule can use */
+ matches.keysets = keysets;
+ matches.max = ARRAY_SIZE(keysets);
+ if (vcap_rule_find_keysets(vrule, &matches) == 0)
+ return -EINVAL;
+
+ /* Find the keysets that the port configuration supports */
+ portkeysetlist.max = ARRAY_SIZE(portkeysets);
+ portkeysetlist.keysets = portkeysets;
+ err = sparx5_vcap_get_port_keyset(ndev,
+ admin, vrule->vcap_chain_id,
+ l3_proto,
+ &portkeysetlist);
+ if (err)
+ return err;
+
+ /* Find the intersection of the two sets of keyset */
+ for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
+ kinfo = vcap_keyfieldset(vctrl, admin->vtype,
+ portkeysetlist.keysets[idx]);
+ if (!kinfo)
+ continue;
+
+ /* Find a port keyset that matches the required keys
+ * If there are multiple keysets then compose a type id mask
+ */
+ for (jdx = 0; jdx < matches.cnt; ++jdx) {
+ if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
+ continue;
+
+ mru = &multi->rule[kinfo->sw_per_item];
+ if (!mru->selected) {
+ mru->selected = true;
+ mru->keyset = portkeysetlist.keysets[idx];
+ mru->value = kinfo->type_id;
+ }
+ mru->value &= kinfo->type_id;
+ mru->mask |= kinfo->type_id;
+ ++count;
+ }
+ }
+ if (count == 0)
+ return -EPROTO;
+
+ if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
+ return -ENOENT;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ /* Align the mask to the combined value */
+ mru->mask ^= mru->value;
+ }
+
+ /* Set the chosen keyset on the rule and set a wildcarded type if there
+ * are more than one keyset
+ */
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ mru = &multi->rule[idx];
+ if (!mru->selected)
+ continue;
+
+ vcap_set_rule_set_keyset(vrule, mru->keyset);
+ if (count > 1)
+ /* Some keysets do not have a type field */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
+ mru->value,
+ ~mru->mask);
+ mru->selected = false; /* mark as done */
+ break; /* Stop here and add more rules later */
+ }
+ return err;
+}
+
+static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_wildcard_rule *rule)
+{
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK_RNG,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_TYPE,
+ };
+ struct vcap_rule *vrule;
+ int err;
+
+ /* Add an extra rule with a special user and the new keyset */
+ erule->user = VCAP_USER_TC_EXTRA;
+ vrule = vcap_copy_rule(erule);
+ if (IS_ERR(vrule))
+ return PTR_ERR(vrule);
+
+ /* Link the new rule to the existing rule with the cookie */
+ vrule->cookie = erule->cookie;
+ vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
+ err = vcap_set_rule_set_keyset(vrule, rule->keyset);
+ if (err) {
+ pr_err("%s:%d: could not set keyset %s in rule: %u\n",
+ __func__, __LINE__,
+ vcap_keyset_name(vctrl, rule->keyset),
+ vrule->id);
+ goto out;
+ }
+
+ /* Some keysets do not have a type field, so ignore return value */
+ vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
+
+ err = vcap_set_rule_set_actionset(vrule, erule->actionset);
+ if (err)
+ goto out;
+
+ err = sparx5_tc_add_rule_counter(admin, vrule);
+ if (err)
+ goto out;
+
+ err = vcap_val_rule(vrule, ETH_P_ALL);
+ if (err) {
+ pr_err("%s:%d: could not validate rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ vcap_set_tc_exterr(fco, vrule);
+ goto out;
+ }
+ err = vcap_add_rule(vrule);
+ if (err) {
+ pr_err("%s:%d: could not add rule: %u\n",
+ __func__, __LINE__, vrule->id);
+ goto out;
+ }
+out:
+ vcap_free_rule(vrule);
+ return err;
+}
+
+static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
+ struct flow_cls_offload *fco,
+ struct vcap_rule *erule,
+ struct vcap_admin *admin,
+ struct sparx5_multiple_rules *multi)
+{
+ int idx, err = 0;
+
+ for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
+ if (!multi->rule[idx].selected)
+ continue;
+
+ err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
+ &multi->rule[idx]);
+ if (err)
+ break;
+ }
+ return err;
+}
+
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_multiple_rules multi = {};
struct flow_action_entry *act;
struct vcap_control *vctrl;
struct flow_rule *frule;
@@ -700,6 +893,15 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
goto out;
}
}
+
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
+ &multi);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No matching port keyset for filter protocol and keys");
+ goto out;
+ }
+
/* provide the l3 protocol to guide the keyset selection */
err = vcap_val_rule(vrule, l3_proto);
if (err) {
@@ -710,6 +912,11 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Could not add the filter");
+
+ if (l3_proto == ETH_P_ALL)
+ err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
+ &multi);
+
out:
vcap_free_rule(vrule);
return err;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 0c4d4e6d51e6..a0c126ba9a87 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -7,11 +7,6 @@
* https://github.com/microchip-ung/sparx-5_reginfo
*/
-#include <linux/types.h>
-#include <linux/list.h>
-
-#include "vcap_api.h"
-#include "vcap_api_client.h"
#include "vcap_api_debugfs.h"
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
@@ -279,6 +274,19 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
return 0;
}
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist)
+{
+ int lookup;
+
+ lookup = sparx5_vcap_cid_to_lookup(cid);
+ return sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist, l3_proto);
+}
+
/* API callback used for validating a field keyset (check the port keysets) */
static enum vcap_keyfield_set
sparx5_vcap_validate_keyset(struct net_device *ndev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 8a6b7e3d2618..0a0f2412c980 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -10,6 +10,12 @@
#ifndef __SPARX5_VCAP_IMPL_H__
#define __SPARX5_VCAP_IMPL_H__
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */
@@ -65,4 +71,11 @@ enum vcap_is2_port_sel_arp {
VCAP_IS2_PS_ARP_ARP,
};
+/* Get the port keyset for the vcap lookup */
+int sparx5_vcap_get_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ struct vcap_keyset_list *kslist);
+
#endif /* __SPARX5_VCAP_IMPL_H__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index ac7a32ff755e..2f171d0b9187 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -173,6 +173,7 @@ const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
return NULL;
return kset;
}
+EXPORT_SYMBOL_GPL(vcap_keyfieldset);
/* Return the typegroup table for the matching keyset (using subword size) */
const struct vcap_typegroup *
@@ -824,8 +825,8 @@ vcap_find_keyset_keyfield(struct vcap_control *vctrl,
}
/* Match a list of keys against the keysets available in a vcap type */
-static bool vcap_rule_find_keysets(struct vcap_rule_internal *ri,
- struct vcap_keyset_list *matches)
+static bool _vcap_rule_find_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
{
const struct vcap_client_keyfield *ckf;
int keyset, found, keycount, map_size;
@@ -864,6 +865,16 @@ static bool vcap_rule_find_keysets(struct vcap_rule_internal *ri,
return matches->cnt > 0;
}
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+
+ return _vcap_rule_find_keysets(ri, matches);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_find_keysets);
+
/* Validate a rule with respect to available port keys */
int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
{
@@ -888,7 +899,7 @@ int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
matches.max = ARRAY_SIZE(keysets);
if (ri->data.keyset == VCAP_KFS_NO_VALUE) {
/* Iterate over rule keyfields and select keysets that fits */
- if (!vcap_rule_find_keysets(ri, &matches)) {
+ if (!_vcap_rule_find_keysets(ri, &matches)) {
ri->data.exterr = VCAP_ERR_NO_KEYSET_MATCH;
return -EINVAL;
}
@@ -1270,6 +1281,19 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
}
EXPORT_SYMBOL_GPL(vcap_del_rules);
+/* Find a client key field in a rule */
+static struct vcap_client_keyfield *
+vcap_find_keyfield(struct vcap_rule *rule, enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list)
+ if (ckf->ctrl.key == key)
+ return ckf;
+ return NULL;
+}
+
/* Find information on a key field in a rule */
const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
enum vcap_key_field key)
@@ -1434,6 +1458,19 @@ int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_key_u128);
+/* Find a client action field in a rule */
+static struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
+{
+ struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
+ struct vcap_client_actionfield *caf;
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list)
+ if (caf->ctrl.action == act)
+ return caf;
+ return NULL;
+}
+
static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
struct vcap_client_actionfield *field,
struct vcap_client_actionfield_data *data)
@@ -1772,6 +1809,148 @@ int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
}
EXPORT_SYMBOL_GPL(vcap_rule_get_counter);
+static int vcap_rule_mod_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ enum vcap_field_type ftype,
+ struct vcap_client_keyfield_data *data)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return vcap_rule_add_key(rule, key, ftype, data);
+ vcap_copy_from_client_keyfield(rule, field, data);
+ return 0;
+}
+
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask)
+{
+ struct vcap_client_keyfield_data data;
+
+ data.u32.value = value;
+ data.u32.mask = mask;
+ return vcap_rule_mod_key(rule, key, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_key_u32);
+
+static int vcap_rule_mod_action(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ enum vcap_field_type ftype,
+ struct vcap_client_actionfield_data *data)
+{
+ struct vcap_client_actionfield *field;
+
+ field = vcap_find_actionfield(rule, action);
+ if (!field)
+ return vcap_rule_add_action(rule, action, ftype, data);
+ vcap_copy_from_client_actionfield(rule, field, data);
+ return 0;
+}
+
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value)
+{
+ struct vcap_client_actionfield_data data;
+
+ data.u32.value = value;
+ return vcap_rule_mod_action(rule, action, VCAP_FIELD_U32, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_mod_action_u32);
+
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *ckf, *next_ckf;
+ const struct vcap_field *fields;
+ enum vcap_key_field key;
+ int err = 0;
+ int idx;
+
+ if (length > 0) {
+ err = -EEXIST;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ for (idx = 0; idx < length; ++idx)
+ if (key == keylist[idx]) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ idx++;
+ err = 0;
+ }
+ }
+ }
+ if (drop_unsupported) {
+ err = -EEXIST;
+ fields = vcap_keyfields(ri->vctrl, ri->admin->vtype,
+ rule->keyset);
+ if (!fields)
+ return err;
+ list_for_each_entry_safe(ckf, next_ckf,
+ &ri->data.keyfields, ctrl.list) {
+ key = ckf->ctrl.key;
+ if (fields[key].width == 0) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ err = 0;
+ }
+ }
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_filter_rule_keys);
+
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *erule)
+{
+ struct vcap_rule_internal *ri = to_intrule(erule);
+ struct vcap_client_actionfield *caf;
+ struct vcap_client_keyfield *ckf;
+ struct vcap_rule *rule;
+ int err;
+
+ err = vcap_api_check(ri->vctrl);
+ if (err)
+ return ERR_PTR(err);
+
+ rule = vcap_alloc_rule(ri->vctrl, ri->ndev, ri->data.vcap_chain_id,
+ ri->data.user, ri->data.priority, 0);
+ if (IS_ERR(rule))
+ return rule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ /* Add a key duplicate in the new rule */
+ err = vcap_rule_add_key(rule,
+ ckf->ctrl.key,
+ ckf->ctrl.type,
+ &ckf->data);
+ if (err)
+ goto err;
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ /* Add a action duplicate in the new rule */
+ err = vcap_rule_add_action(rule,
+ caf->ctrl.action,
+ caf->ctrl.type,
+ &caf->data);
+ if (err)
+ goto err;
+ }
+ return rule;
+err:
+ vcap_free_rule(rule);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(vcap_copy_rule);
+
#ifdef CONFIG_VCAP_KUNIT_TEST
#include "vcap_api_kunit.c"
#endif
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 654ef8fa6d62..93a0fcb12a81 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -168,6 +168,8 @@ int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto);
int vcap_add_rule(struct vcap_rule *rule);
/* Delete rule in a VCAP instance */
int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id);
+/* Make a full copy of an existing rule with a new rule id */
+struct vcap_rule *vcap_copy_rule(struct vcap_rule *rule);
/* Update the keyset for the rule */
int vcap_set_rule_set_keyset(struct vcap_rule *rule,
@@ -213,7 +215,13 @@ bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
/* Provide all rules via a callback interface */
int vcap_rule_iter(struct vcap_control *vctrl,
int (*callback)(void *, struct vcap_rule *), void *arg);
-
+/* Match a list of keys against the keysets available in a vcap type */
+bool vcap_rule_find_keysets(struct vcap_rule *rule,
+ struct vcap_keyset_list *matches);
+/* Return the keyset information for the keyset */
+const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ enum vcap_keyfield_set keyset);
/* Copy to host byte order */
void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
@@ -226,6 +234,10 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin);
/* Add a keyset to a keyset list */
bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
enum vcap_keyfield_set keyset);
+/* Drop keys in a keylist and any keys that are not supported by the keyset */
+int vcap_filter_rule_keys(struct vcap_rule *rule,
+ enum vcap_key_field keylist[], int length,
+ bool drop_unsupported);
/* map keyset id to a string with the keyset name */
const char *vcap_keyset_name(struct vcap_control *vctrl,
@@ -234,4 +246,12 @@ const char *vcap_keyset_name(struct vcap_control *vctrl,
const char *vcap_keyfield_name(struct vcap_control *vctrl,
enum vcap_key_field key);
+/* Modify a 32 bit key field with value and mask in the rule */
+int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
+ u32 value, u32 mask);
+/* Modify a 32 bit action field with value in the rule */
+int vcap_rule_mod_action_u32(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ u32 value);
+
#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index d9c7ca988b76..5df00e940333 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -192,22 +192,22 @@ static bool vcap_verify_keystream_keyset(struct vcap_control *vctrl,
vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
vcap_decode_field(keystream, &iter, typefld->width, (u8 *)&value);
- return (value == info->type_id);
+ return (value & mask) == (info->type_id & mask);
}
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the keyset
+ * are in sync and correct, return the list of matching keysets
*/
-static enum
-vcap_keyfield_set vcap_find_keystream_keyset(struct vcap_control *vctrl,
- enum vcap_type vt,
- u32 *keystream,
- u32 *mskstream,
- bool mask, int sw_max)
+static int
+vcap_find_keystream_keysets(struct vcap_control *vctrl,
+ enum vcap_type vt,
+ u32 *keystream,
+ u32 *mskstream,
+ bool mask, int sw_max,
+ struct vcap_keyset_list *kslist)
{
const struct vcap_set *keyfield_set;
int sw_count, idx;
- bool res;
sw_count = vcap_find_keystream_typegroup_sw(vctrl, vt, keystream, mask,
sw_max);
@@ -219,11 +219,12 @@ vcap_keyfield_set vcap_find_keystream_keyset(struct vcap_control *vctrl,
if (keyfield_set[idx].sw_per_item != sw_count)
continue;
- res = vcap_verify_keystream_keyset(vctrl, vt, keystream,
- mskstream, idx);
- if (res)
- return idx;
+ if (vcap_verify_keystream_keyset(vctrl, vt, keystream,
+ mskstream, idx))
+ vcap_keyset_list_add(kslist, idx);
}
+ if (kslist->cnt > 0)
+ return 0;
return -EINVAL;
}
@@ -296,13 +297,14 @@ vcap_find_actionstream_actionset(struct vcap_control *vctrl,
return -EINVAL;
}
-/* Read key data from a VCAP address and discover if there is a rule keyset
+/* Read key data from a VCAP address and discover if there are any rule keysets
* here
*/
-static int vcap_addr_keyset(struct vcap_control *vctrl,
- struct net_device *ndev,
- struct vcap_admin *admin,
- int addr)
+static int vcap_addr_keysets(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ struct vcap_admin *admin,
+ int addr,
+ struct vcap_keyset_list *kslist)
{
enum vcap_type vt = admin->vtype;
int keyset_sw_regs, idx;
@@ -320,9 +322,10 @@ static int vcap_addr_keyset(struct vcap_control *vctrl,
}
if (key == 0 && mask == 0)
return -EINVAL;
- /* Decode and locate the keyset */
- return vcap_find_keystream_keyset(vctrl, vt, admin->cache.keystream,
- admin->cache.maskstream, false, 0);
+ /* Decode and locate the keysets */
+ return vcap_find_keystream_keysets(vctrl, vt, admin->cache.keystream,
+ admin->cache.maskstream, false, 0,
+ kslist);
}
static int vcap_read_rule(struct vcap_rule_internal *ri)
@@ -471,9 +474,11 @@ static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
struct vcap_control *vctrl = ri->vctrl;
struct vcap_stream_iter kiter, miter;
struct vcap_admin *admin = ri->admin;
+ enum vcap_keyfield_set keysets[10];
const struct vcap_field *keyfield;
enum vcap_type vt = admin->vtype;
const struct vcap_typegroup *tgt;
+ struct vcap_keyset_list matches;
enum vcap_keyfield_set keyset;
int idx, res, keyfield_count;
u32 *maskstream;
@@ -483,16 +488,22 @@ static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
keystream = admin->cache.keystream;
maskstream = admin->cache.maskstream;
- res = vcap_find_keystream_keyset(vctrl, vt, keystream, maskstream,
- false, 0);
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
+ res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
+ false, 0, &matches);
if (res < 0) {
- pr_err("%s:%d: could not find valid keyset: %d\n",
+ pr_err("%s:%d: could not find valid keysets: %d\n",
__func__, __LINE__, res);
return -EINVAL;
}
- keyset = res;
- out->prf(out->dst, " keyset: %s\n",
- vcap_keyset_name(vctrl, ri->data.keyset));
+ keyset = matches.keysets[0];
+ out->prf(out->dst, " keysets:");
+ for (idx = 0; idx < matches.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(vctrl, matches.keysets[idx]));
+ out->prf(out->dst, "\n");
out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw);
out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs);
keyfield_count = vcap_keyfield_count(vctrl, vt, keyset);
@@ -647,11 +658,12 @@ static int vcap_show_admin_raw(struct vcap_control *vctrl,
struct vcap_admin *admin,
struct vcap_output_print *out)
{
+ enum vcap_keyfield_set keysets[10];
enum vcap_type vt = admin->vtype;
+ struct vcap_keyset_list kslist;
struct vcap_rule_internal *ri;
const struct vcap_set *info;
- int keyset;
- int addr;
+ int addr, idx;
int ret;
if (list_empty(&admin->rules))
@@ -664,24 +676,32 @@ static int vcap_show_admin_raw(struct vcap_control *vctrl,
ri = list_first_entry(&admin->rules, struct vcap_rule_internal, list);
/* Go from higher to lower addresses searching for a keyset */
+ kslist.keysets = keysets;
+ kslist.max = ARRAY_SIZE(keysets);
for (addr = admin->last_valid_addr; addr >= admin->first_valid_addr;
--addr) {
- keyset = vcap_addr_keyset(vctrl, ri->ndev, admin, addr);
- if (keyset < 0)
+ kslist.cnt = 0;
+ ret = vcap_addr_keysets(vctrl, ri->ndev, admin, addr, &kslist);
+ if (ret < 0)
continue;
- info = vcap_keyfieldset(vctrl, vt, keyset);
+ info = vcap_keyfieldset(vctrl, vt, kslist.keysets[0]);
if (!info)
continue;
- if (addr % info->sw_per_item)
+ if (addr % info->sw_per_item) {
pr_info("addr: %d X%d error rule, keyset: %s\n",
addr,
info->sw_per_item,
- vcap_keyset_name(vctrl, keyset));
- else
- out->prf(out->dst, " addr: %d, X%d rule, keyset: %s\n",
- addr,
- info->sw_per_item,
- vcap_keyset_name(vctrl, keyset));
+ vcap_keyset_name(vctrl, kslist.keysets[0]));
+ } else {
+ out->prf(out->dst, " addr: %d, X%d rule, keysets:",
+ addr,
+ info->sw_per_item);
+ for (idx = 0; idx < kslist.cnt; ++idx)
+ out->prf(out->dst, " %s",
+ vcap_keyset_name(vctrl,
+ kslist.keysets[idx]));
+ out->prf(out->dst, "\n");
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index ed455dad3a14..cf594668d5d9 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -316,24 +316,34 @@ static void vcap_api_addr_keyset_test(struct kunit *test)
.actionstream = actdata,
},
};
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_keyset_list matches;
int ret, idx, addr;
vcap_test_api_init(&admin);
/* Go from higher to lower addresses searching for a keyset */
+ matches.keysets = keysets;
+ matches.cnt = 0;
+ matches.max = ARRAY_SIZE(keysets);
for (idx = ARRAY_SIZE(keydata) - 1, addr = 799; idx > 0;
--idx, --addr) {
admin.cache.keystream = &keydata[idx];
admin.cache.maskstream = &mskdata[idx];
- ret = vcap_addr_keyset(&test_vctrl, &test_netdev, &admin, addr);
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
KUNIT_EXPECT_EQ(test, -EINVAL, ret);
}
/* Finally we hit the start of the rule */
admin.cache.keystream = &keydata[idx];
admin.cache.maskstream = &mskdata[idx];
- ret = vcap_addr_keyset(&test_vctrl, &test_netdev, &admin, addr);
- KUNIT_EXPECT_EQ(test, VCAP_KFS_MAC_ETYPE, ret);
+ matches.cnt = 0;
+ ret = vcap_addr_keysets(&test_vctrl, &test_netdev, &admin,
+ addr, &matches);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, matches.cnt, 1);
+ KUNIT_EXPECT_EQ(test, matches.keysets[0], VCAP_KFS_MAC_ETYPE);
}
static void vcap_api_show_admin_raw_test(struct kunit *test)
@@ -362,7 +372,7 @@ static void vcap_api_show_admin_raw_test(struct kunit *test)
.prf = (void *)test_prf,
};
const char *test_expected =
- " addr: 786, X6 rule, keyset: VCAP_KFS_MAC_ETYPE\n";
+ " addr: 786, X6 rule, keysets: VCAP_KFS_MAC_ETYPE\n";
int ret;
vcap_test_api_init(&admin);
@@ -442,7 +452,7 @@ static const char * const test_admin_expect[] = {
" chain_id: 0\n",
" user: 0\n",
" priority: 0\n",
- " keyset: VCAP_KFS_MAC_ETYPE\n",
+ " keysets: VCAP_KFS_MAC_ETYPE\n",
" keyset_sw: 6\n",
" keyset_sw_regs: 2\n",
" ETYPE_LEN_IS: W1: 1/1\n",
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index f48d93f374af..76a31215ebfb 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -1197,7 +1197,7 @@ static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
- ret = vcap_rule_find_keysets(&ri, &matches);
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
KUNIT_EXPECT_EQ(test, true, ret);
KUNIT_EXPECT_EQ(test, 1, matches.cnt);
@@ -1244,7 +1244,7 @@ static void vcap_api_rule_find_keyset_failed_test(struct kunit *test)
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
- ret = vcap_rule_find_keysets(&ri, &matches);
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, matches.cnt);
@@ -1291,7 +1291,7 @@ static void vcap_api_rule_find_keyset_many_test(struct kunit *test)
for (idx = 0; idx < ARRAY_SIZE(ckf); idx++)
list_add_tail(&ckf[idx].ctrl.list, &ri.data.keyfields);
- ret = vcap_rule_find_keysets(&ri, &matches);
+ ret = vcap_rule_find_keysets(&ri.data, &matches);
KUNIT_EXPECT_EQ(test, true, ret);
KUNIT_EXPECT_EQ(test, 6, matches.cnt);
@@ -1954,6 +1954,198 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, true, ret);
}
+static void vcap_api_filter_unsupported_keys_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS2,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_MAC_ETYPE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_ARP_ADDR_SPACE_OK_IS, /* arp keys are not in keyset */
+ VCAP_KF_ARP_PROTO_SPACE_OK_IS,
+ VCAP_KF_ARP_LEN_OK_IS,
+ VCAP_KF_ARP_TGT_MATCH_IS,
+ VCAP_KF_ARP_SENDER_MATCH_IS,
+ VCAP_KF_ARP_OPCODE_UNKNOWN_IS,
+ VCAP_KF_ARP_OPCODE,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_8021Q_DEI_CLS,
+ VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 14, ARRAY_SIZE(keylist));
+
+ /* Drop unsupported keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, NULL, 0, true);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 7, idx);
+}
+
+static void vcap_api_filter_keylist_test(struct kunit *test)
+{
+ struct vcap_admin admin = {
+ .vtype = VCAP_TYPE_IS0,
+ };
+ struct vcap_rule_internal ri = {
+ .admin = &admin,
+ .vctrl = &test_vctrl,
+ .data.keyset = VCAP_KFS_NORMAL_7TUPLE,
+ };
+ enum vcap_key_field keylist[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field droplist[] = {
+ VCAP_KF_8021Q_TPID1,
+ VCAP_KF_8021Q_PCP1,
+ VCAP_KF_8021Q_DEI1,
+ VCAP_KF_8021Q_VID1,
+ VCAP_KF_8021Q_TPID2,
+ VCAP_KF_8021Q_PCP2,
+ VCAP_KF_8021Q_DEI2,
+ VCAP_KF_8021Q_VID2,
+ VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L4_SPORT,
+ VCAP_KF_L4_RNG,
+ };
+ enum vcap_key_field expected[] = {
+ VCAP_KF_TYPE,
+ VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_GEN_IDX,
+ VCAP_KF_IF_IGR_PORT_MASK_SEL,
+ VCAP_KF_IF_IGR_PORT_MASK,
+ VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_BC_IS,
+ VCAP_KF_8021Q_VLAN_TAGS,
+ VCAP_KF_8021Q_TPID0,
+ VCAP_KF_8021Q_PCP0,
+ VCAP_KF_8021Q_DEI0,
+ VCAP_KF_8021Q_VID0,
+ VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_SMAC,
+ VCAP_KF_IP_MC_IS,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_KF_ETYPE,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_KF_IP4_IS,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_OPTIONS_IS,
+ VCAP_KF_L3_DSCP,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_KF_TCP_IS,
+ };
+ struct vcap_client_keyfield *ckf, *next;
+ bool ret;
+ int idx;
+
+ /* Add all keys to the rule */
+ INIT_LIST_HEAD(&ri.data.keyfields);
+ for (idx = 0; idx < ARRAY_SIZE(keylist); idx++) {
+ ckf = kzalloc(sizeof(*ckf), GFP_KERNEL);
+ if (ckf) {
+ ckf->ctrl.key = keylist[idx];
+ list_add_tail(&ckf->ctrl.list, &ri.data.keyfields);
+ }
+ }
+
+ KUNIT_EXPECT_EQ(test, 38, ARRAY_SIZE(keylist));
+
+ /* Drop listed keys from the rule */
+ ret = vcap_filter_rule_keys(&ri.data, droplist, ARRAY_SIZE(droplist),
+ false);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
+ /* Check remaining keys in the rule */
+ idx = 0;
+ list_for_each_entry_safe(ckf, next, &ri.data.keyfields, ctrl.list) {
+ KUNIT_EXPECT_EQ(test, expected[idx], ckf->ctrl.key);
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ ++idx;
+ }
+ KUNIT_EXPECT_EQ(test, 26, idx);
+}
+
static struct kunit_suite vcap_api_rule_remove_test_suite = {
.name = "VCAP_API_Rule_Remove_Testsuite",
.test_cases = vcap_api_rule_remove_test_cases,
@@ -1984,6 +2176,8 @@ static struct kunit_suite vcap_api_rule_counter_test_suite = {
static struct kunit_case vcap_api_support_test_cases[] = {
KUNIT_CASE(vcap_api_next_lookup_basic_test),
KUNIT_CASE(vcap_api_next_lookup_advanced_test),
+ KUNIT_CASE(vcap_api_filter_unsupported_keys_test),
+ KUNIT_CASE(vcap_api_filter_keylist_test),
{}
};
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index 18a9a0cd9606..9ac1b1d55f22 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -59,10 +59,6 @@ void vcap_iter_update(struct vcap_stream_iter *itr);
/* Keyset and keyfield functionality */
-/* Return the keyset information for the keyset */
-const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
- enum vcap_type vt,
- enum vcap_keyfield_set keyset);
/* Return the number of keyfields in the keyset */
int vcap_keyfield_count(struct vcap_control *vctrl,
enum vcap_type vt, enum vcap_keyfield_set keyset);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c5f88f7a7a04..4ed4082836a9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -207,11 +207,11 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
- vaddr = kmap_atomic(skb_frag_page(f));
+ vaddr = kmap_local_page(skb_frag_page(f));
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
}
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0a9d13d7976f..4167e768a86a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7562,31 +7562,31 @@ static int __init stmmac_cmdline_opt(char *str)
if (!str || !*str)
return 1;
while ((opt = strsep(&str, ",")) != NULL) {
- if (!strncmp(opt, "debug:", 6)) {
+ if (sysfs_streq(opt, "debug:")) {
if (kstrtoint(opt + 6, 0, &debug))
goto err;
- } else if (!strncmp(opt, "phyaddr:", 8)) {
+ } else if (sysfs_streq(opt, "phyaddr:")) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
+ } else if (sysfs_streq(opt, "buf_sz:")) {
if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
- } else if (!strncmp(opt, "tc:", 3)) {
+ } else if (sysfs_streq(opt, "tc:")) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
- } else if (!strncmp(opt, "watchdog:", 9)) {
+ } else if (sysfs_streq(opt, "watchdog:")) {
if (kstrtoint(opt + 9, 0, &watchdog))
goto err;
- } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ } else if (sysfs_streq(opt, "flow_ctrl:")) {
if (kstrtoint(opt + 10, 0, &flow_ctrl))
goto err;
- } else if (!strncmp(opt, "pause:", 6)) {
+ } else if (sysfs_streq(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause))
goto err;
- } else if (!strncmp(opt, "eee_timer:", 10)) {
+ } else if (sysfs_streq(opt, "eee_timer:")) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
- } else if (!strncmp(opt, "chain_mode:", 11)) {
+ } else if (sysfs_streq(opt, "chain_mode:")) {
if (kstrtoint(opt + 11, 0, &chain_mode))
goto err;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0aca193d9550..4ef05bad4613 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -90,8 +90,6 @@
#include <linux/uaccess.h>
#include <linux/jiffies.h>
-#define cas_page_map(x) kmap_atomic((x))
-#define cas_page_unmap(x) kunmap_atomic((x))
#define CAS_NCPUS num_online_cpus()
#define cas_skb_release(x) netif_rx(x)
@@ -1915,7 +1913,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
int off, swivel = RX_SWIVEL_OFF_VAL;
struct cas_page *page;
struct sk_buff *skb;
- void *addr, *crcaddr;
+ void *crcaddr;
__sum16 csum;
char *p;
@@ -1936,7 +1934,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb_reserve(skb, swivel);
p = skb->data;
- addr = crcaddr = NULL;
+ crcaddr = NULL;
if (hlen) { /* always copy header pages */
i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
@@ -1948,12 +1946,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i += cp->crc_size;
dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
i, DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, i);
+ memcpy(p, page_address(page->buffer) + off, i);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
RX_USED_ADD(page, 0x100);
p += hlen;
swivel = 0;
@@ -1984,12 +1980,11 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
/* make sure we always copy a header */
swivel = 0;
if (p == (char *) skb->data) { /* not split */
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, RX_COPY_MIN);
+ memcpy(p, page_address(page->buffer) + off,
+ RX_COPY_MIN);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
off += RX_COPY_MIN;
swivel = RX_COPY_MIN;
RX_USED_ADD(page, cp->mtu_stride);
@@ -2036,10 +2031,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
RX_USED_ADD(page, hlen + cp->crc_size);
}
- if (cp->crc_size) {
- addr = cas_page_map(page->buffer);
- crcaddr = addr + off + hlen;
- }
+ if (cp->crc_size)
+ crcaddr = page_address(page->buffer) + off + hlen;
} else {
/* copying packet */
@@ -2061,12 +2054,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i += cp->crc_size;
dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
i, DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr + off, i);
+ memcpy(p, page_address(page->buffer) + off, i);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr + off, i,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
if (p == (char *) skb->data) /* not split */
RX_USED_ADD(page, cp->mtu_stride);
else
@@ -2081,20 +2072,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
page->dma_addr,
dlen + cp->crc_size,
DMA_FROM_DEVICE);
- addr = cas_page_map(page->buffer);
- memcpy(p, addr, dlen + cp->crc_size);
+ memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
dma_sync_single_for_device(&cp->pdev->dev,
page->dma_addr,
dlen + cp->crc_size,
DMA_FROM_DEVICE);
- cas_page_unmap(addr);
RX_USED_ADD(page, dlen + cp->crc_size);
}
end_copy_pkt:
- if (cp->crc_size) {
- addr = NULL;
+ if (cp->crc_size)
crcaddr = skb->data + alloclen;
- }
+
skb_put(skb, alloclen);
}
@@ -2103,8 +2091,6 @@ end_copy_pkt:
/* checksum includes FCS. strip it out. */
csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
csum_unfold(csum)));
- if (addr)
- cas_page_unmap(addr);
}
skb->protocol = eth_type_trans(skb, cp->dev);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -2793,18 +2779,14 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
if (unlikely(tabort)) {
- void *addr;
-
/* NOTE: len is always > tabort */
cas_write_txd(cp, ring, entry, mapping, len - tabort,
ctrl, 0);
entry = TX_DESC_NEXT(ring, entry);
-
- addr = cas_page_map(skb_frag_page(fragp));
- memcpy(tx_tiny_buf(cp, ring, entry),
- addr + skb_frag_off(fragp) + len - tabort,
- tabort);
- cas_page_unmap(addr);
+ memcpy_from_page(tx_tiny_buf(cp, ring, entry),
+ skb_frag_page(fragp),
+ skb_frag_off(fragp) + len - tabort,
+ tabort);
mapping = tx_tiny_map(cp, ring, entry, tentry);
len = tabort;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 80fde5f06fce..a6211b95ed17 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1085,13 +1085,13 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
u8 *vaddr;
if (nc < ncookies) {
- vaddr = kmap_atomic(skb_frag_page(f));
+ vaddr = kmap_local_page(skb_frag_page(f));
blen = skb_frag_size(f);
blen += 8 - (blen & 7);
err = ldc_map_single(lp, vaddr + skb_frag_off(f),
blen, cookies + nc, ncookies - nc,
map_perm);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
} else {
err = -EMSGSIZE;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 040c8bf6d05b..af00cf44cd97 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -260,7 +260,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
- Currently supports the YT8511, YT8521 Gigabit Ethernet PHYs.
+ Currently supports the YT8511, YT8521, YT8531S Gigabit Ethernet PHYs.
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index bd1ab5d0631f..685190db72de 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521 PHY driver.
+ * Motorcomm 8511/8521/8531S PHY driver.
*
* Author: Peter Geis <[email protected]>
* Author: Frank <[email protected]>
@@ -12,9 +12,10 @@
#include <linux/phy.h>
#define PHY_ID_YT8511 0x0000010a
-#define PHY_ID_YT8521 0x0000011A
+#define PHY_ID_YT8521 0x0000011A
+#define PHY_ID_YT8531S 0x4F51E91A
-/* YT8521 Register Overview
+/* YT8521/YT8531S Register Overview
* UTP Register space | FIBER Register space
* ------------------------------------------------------------
* | UTP MII | FIBER MII |
@@ -147,7 +148,7 @@
#define YT8521_LINK_TIMER_CFG2_REG 0xA5
#define YT8521_LTCR_EN_AUTOSEN BIT(15)
-/* 0xA000, 0xA001, 0xA003 ,and 0xA006 ~ 0xA00A are common ext registers
+/* 0xA000, 0xA001, 0xA003, 0xA006 ~ 0xA00A and 0xA012 are common ext registers
* of yt8521 phy. There is no need to switch reg space when operating these
* registers.
*/
@@ -221,6 +222,9 @@
*/
#define YTPHY_WCR_TYPE_PULSE BIT(0)
+#define YT8531S_SYNCE_CFG_REG 0xA012
+#define YT8531S_SCR_SYNCE_ENABLE BIT(6)
+
/* Extended Register end */
struct yt8521_priv {
@@ -648,6 +652,26 @@ static int yt8521_probe(struct phy_device *phydev)
}
/**
+ * yt8531s_probe() - read chip config then set suitable polling_mode
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * returns 0 or negative errno code
+ */
+static int yt8531s_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable SyncE clock output by default */
+ ret = ytphy_modify_ext_with_lock(phydev, YT8531S_SYNCE_CFG_REG,
+ YT8531S_SCR_SYNCE_ENABLE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* same as yt8521_probe */
+ return yt8521_probe(phydev);
+}
+
+/**
* ytphy_utp_read_lpa() - read LPA then setup lp_advertising for utp
* @phydev: a pointer to a &struct phy_device
*
@@ -1750,11 +1774,28 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.suspend = yt8521_suspend,
.resume = yt8521_resume,
},
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
+ .name = "YT8531S Gigabit Ethernet",
+ .get_features = yt8521_get_features,
+ .probe = yt8531s_probe,
+ .read_page = yt8521_read_page,
+ .write_page = yt8521_write_page,
+ .get_wol = ytphy_get_wol,
+ .set_wol = ytphy_set_wol,
+ .config_aneg = yt8521_config_aneg,
+ .aneg_done = yt8521_aneg_done,
+ .config_init = yt8521_config_init,
+ .read_status = yt8521_read_status,
+ .soft_reset = yt8521_soft_reset,
+ .suspend = yt8521_suspend,
+ .resume = yt8521_resume,
+ },
};
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521 PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531S PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -1762,6 +1803,7 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
{ /* sentinal */ }
};
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index a52ee2bf5575..b20cd370b7f2 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1051,7 +1051,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
*len = skb_frag_size(frag);
- return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
+ return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag);
}
static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
@@ -1109,7 +1109,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
dest += len;
if (unmap) {
- kunmap_atomic(src);
+ kunmap_local(src);
unmap = false;
}
@@ -1147,7 +1147,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
dest += len;
if (unmap) {
- kunmap_atomic(src);
+ kunmap_local(src);
unmap = false;
}
@@ -1162,7 +1162,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
memcpy(dest, src, data_len);
if (unmap)
- kunmap_atomic(src);
+ kunmap_local(src);
if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
goto err_drop;
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 5eaa18f81355..e72b358a2a12 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -231,8 +231,7 @@ static const struct nfc_phy_ops i2c_phy_ops = {
.disable = microread_i2c_disable,
};
-static int microread_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int microread_i2c_probe(struct i2c_client *client)
{
struct microread_i2c_phy *phy;
int r;
@@ -287,7 +286,7 @@ static struct i2c_driver microread_i2c_driver = {
.driver = {
.name = MICROREAD_I2C_DRIVER_NAME,
},
- .probe = microread_i2c_probe,
+ .probe_new = microread_i2c_probe,
.remove = microread_i2c_remove,
.id_table = microread_i2c_id,
};
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 24436c9e54c9..c3ab49df1e35 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -181,8 +181,7 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
return 0;
}
-static int nfcmrvl_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nfcmrvl_i2c_probe(struct i2c_client *client)
{
const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_i2c_drv_data *drv_data;
@@ -257,7 +256,7 @@ static const struct i2c_device_id nfcmrvl_i2c_id_table[] = {
MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table);
static struct i2c_driver nfcmrvl_i2c_driver = {
- .probe = nfcmrvl_i2c_probe,
+ .probe_new = nfcmrvl_i2c_probe,
.id_table = nfcmrvl_i2c_id_table,
.remove = nfcmrvl_i2c_remove,
.driver = {
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index ec6446511984..d4c299be7949 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -263,8 +263,7 @@ static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = {
{ }
};
-static int nxp_nci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nxp_nci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct nxp_nci_i2c_phy *phy;
@@ -349,7 +348,7 @@ static struct i2c_driver nxp_nci_i2c_driver = {
.acpi_match_table = ACPI_PTR(acpi_id),
.of_match_table = of_nxp_nci_i2c_match,
},
- .probe = nxp_nci_i2c_probe,
+ .probe_new = nxp_nci_i2c_probe,
.id_table = nxp_nci_i2c_id_table,
.remove = nxp_nci_i2c_remove,
};
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index ddf3db286bad..1503a98f0405 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -163,8 +163,7 @@ static const struct pn533_phy_ops i2c_phy_ops = {
};
-static int pn533_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn533_i2c_probe(struct i2c_client *client)
{
struct pn533_i2c_phy *phy;
struct pn533 *priv;
@@ -260,7 +259,7 @@ static struct i2c_driver pn533_i2c_driver = {
.name = PN533_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_pn533_i2c_match),
},
- .probe = pn533_i2c_probe,
+ .probe_new = pn533_i2c_probe,
.id_table = pn533_i2c_id_table,
.remove = pn533_i2c_remove,
};
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 9e754abcfa2a..8b0d910bee06 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -866,8 +866,7 @@ static const struct acpi_gpio_mapping acpi_pn544_gpios[] = {
{ },
};
-static int pn544_hci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pn544_hci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct pn544_i2c_phy *phy;
@@ -954,7 +953,7 @@ static struct i2c_driver pn544_hci_i2c_driver = {
.of_match_table = of_match_ptr(of_pn544_i2c_match),
.acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
},
- .probe = pn544_hci_i2c_probe,
+ .probe_new = pn544_hci_i2c_probe,
.id_table = pn544_hci_i2c_id_table,
.remove = pn544_hci_i2c_remove,
};
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index ecdee838d25d..2517ae71f9a4 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -177,8 +177,7 @@ static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
return 0;
}
-static int s3fwrn5_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s3fwrn5_i2c_probe(struct i2c_client *client)
{
struct s3fwrn5_i2c_phy *phy;
int ret;
@@ -262,7 +261,7 @@ static struct i2c_driver s3fwrn5_i2c_driver = {
.name = S3FWRN5_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
},
- .probe = s3fwrn5_i2c_probe,
+ .probe_new = s3fwrn5_i2c_probe,
.remove = s3fwrn5_i2c_remove,
.id_table = s3fwrn5_i2c_id_table,
};
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 89fa24d71bef..6b5eed8a1fbe 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -195,8 +195,7 @@ static const struct acpi_gpio_mapping acpi_st_nci_gpios[] = {
{},
};
-static int st_nci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st_nci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct st_nci_i2c_phy *phy;
@@ -284,7 +283,7 @@ static struct i2c_driver st_nci_i2c_driver = {
.of_match_table = of_match_ptr(of_st_nci_i2c_match),
.acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match),
},
- .probe = st_nci_i2c_probe,
+ .probe_new = st_nci_i2c_probe,
.id_table = st_nci_i2c_id_table,
.remove = st_nci_i2c_remove,
};
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 76b55986bcf8..55f7a2391bb1 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -487,8 +487,7 @@ static const struct acpi_gpio_mapping acpi_st21nfca_gpios[] = {
{},
};
-static int st21nfca_hci_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st21nfca_hci_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct st21nfca_i2c_phy *phy;
@@ -598,7 +597,7 @@ static struct i2c_driver st21nfca_hci_i2c_driver = {
.of_match_table = of_match_ptr(of_st21nfca_i2c_match),
.acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match),
},
- .probe = st21nfca_hci_i2c_probe,
+ .probe_new = st21nfca_hci_i2c_probe,
.id_table = st21nfca_hci_i2c_id_table,
.remove = st21nfca_hci_i2c_remove,
};
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index 97c1be44e323..afc76c22271a 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -27,6 +27,8 @@ MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILENAME);
+#define EXTTS_PERIOD_MS (95)
+
/* Module Parameters */
static u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
module_param(phase_snap_threshold, uint, 0);
@@ -36,6 +38,8 @@ MODULE_PARM_DESC(phase_snap_threshold,
static char *firmware;
module_param(firmware, charp, 0);
+static struct ptp_pin_desc pin_config[MAX_PHC_PLL][MAX_TRIG_CLK];
+
static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr,
u8 *buf, u16 count)
{
@@ -121,24 +125,270 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
return 0;
}
-static int _idt82p33_gettime(struct idt82p33_channel *channel,
- struct timespec64 *ts)
+static int idt82p33_set_tod_trigger(struct idt82p33_channel *channel,
+ u8 trigger, bool write)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+ u8 cfg;
+
+ if (trigger > WR_TRIG_SEL_MAX)
+ return -EINVAL;
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
+ &cfg, sizeof(cfg));
+
+ if (err)
+ return err;
+
+ if (write == true)
+ trigger = (trigger << WRITE_TRIGGER_SHIFT) |
+ (cfg & READ_TRIGGER_MASK);
+ else
+ trigger = (trigger << READ_TRIGGER_SHIFT) |
+ (cfg & WRITE_TRIGGER_MASK);
+
+ return idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+}
+
+static int idt82p33_get_extts(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ int err;
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ /* Since trigger is not self clearing itself, we have to poll tod_sts */
+ if (memcmp(buf, channel->extts_tod_sts, TOD_BYTE_COUNT) == 0)
+ return -EAGAIN;
+
+ memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
+
+ idt82p33_byte_array_to_timespec(ts, buf);
+
+ if (channel->discard_next_extts) {
+ channel->discard_next_extts = false;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int map_ref_to_tod_trig_sel(int ref, u8 *trigger)
+{
+ int err = 0;
+
+ switch (ref) {
+ case 0:
+ *trigger = HW_TOD_TRIG_SEL_IN12;
+ break;
+ case 1:
+ *trigger = HW_TOD_TRIG_SEL_IN13;
+ break;
+ case 2:
+ *trigger = HW_TOD_TRIG_SEL_IN14;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static bool is_one_shot(u8 mask)
+{
+ /* Treat single bit PLL masks as continuous trigger */
+ if ((mask == 1) || (mask == 2))
+ return false;
+ else
+ return true;
+}
+
+static int arm_tod_read_with_trigger(struct idt82p33_channel *channel, u8 trigger)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
+ int err;
+
+ /* Remember the current tod_sts before setting the trigger */
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ memcpy(channel->extts_tod_sts, buf, TOD_BYTE_COUNT);
+
+ err = idt82p33_set_tod_trigger(channel, trigger, false);
+
+ if (err)
+ dev_err(idt82p33->dev, "%s: err = %d", __func__, err);
+
+ return err;
+}
+
+static int idt82p33_extts_enable(struct idt82p33_channel *channel,
+ struct ptp_clock_request *rq, int on)
+{
+ u8 index = rq->extts.index;
+ struct idt82p33 *idt82p33;
+ u8 mask = 1 << index;
+ int err = 0;
+ u8 old_mask;
u8 trigger;
+ int ref;
+
+ idt82p33 = channel->idt82p33;
+ old_mask = idt82p33->extts_mask;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on falling edge */
+ if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_FALLING_EDGE))
+ return -EOPNOTSUPP;
+
+ if (index >= MAX_PHC_PLL)
+ return -EINVAL;
+
+ if (on) {
+ /* Return if it was already enabled */
+ if (idt82p33->extts_mask & mask)
+ return 0;
+
+ /* Use the pin configured for the channel */
+ ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->plln);
+
+ if (ref < 0) {
+ dev_err(idt82p33->dev, "%s: No valid pin found for Pll%d!\n",
+ __func__, channel->plln);
+ return -EBUSY;
+ }
+
+ err = map_ref_to_tod_trig_sel(ref, &trigger);
+
+ if (err) {
+ dev_err(idt82p33->dev,
+ "%s: Unsupported ref %d!\n", __func__, ref);
+ return err;
+ }
+
+ err = arm_tod_read_with_trigger(&idt82p33->channel[index], trigger);
+
+ if (err == 0) {
+ idt82p33->extts_mask |= mask;
+ idt82p33->channel[index].tod_trigger = trigger;
+ idt82p33->event_channel[index] = channel;
+ idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
+
+ if (old_mask)
+ return 0;
+
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+ }
+ } else {
+ idt82p33->extts_mask &= ~mask;
+ idt82p33->extts_single_shot = is_one_shot(idt82p33->extts_mask);
+
+ if (idt82p33->extts_mask == 0)
+ cancel_delayed_work(&idt82p33->extts_work);
+ }
+
+ return err;
+}
+
+static int idt82p33_extts_check_channel(struct idt82p33 *idt82p33, u8 todn)
+{
+ struct idt82p33_channel *event_channel;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ int err;
+
+ err = idt82p33_get_extts(&idt82p33->channel[todn], &ts);
+ if (err == 0) {
+ event_channel = idt82p33->event_channel[todn];
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts);
+ ptp_clock_event(event_channel->ptp_clock,
+ &event);
+ }
+ return err;
+}
+
+static u8 idt82p33_extts_enable_mask(struct idt82p33_channel *channel,
+ u8 extts_mask, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 trigger = channel->tod_trigger;
+ u8 mask;
int err;
+ int i;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+ if (extts_mask == 0)
+ return 0;
+ if (enable == false)
+ cancel_delayed_work_sync(&idt82p33->extts_work);
- err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
- &trigger, sizeof(trigger));
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if ((extts_mask & mask) == 0)
+ continue;
+ if (enable) {
+ err = arm_tod_read_with_trigger(&idt82p33->channel[i], trigger);
+ if (err)
+ dev_err(idt82p33->dev,
+ "%s: Arm ToD read trigger failed, err = %d",
+ __func__, err);
+ } else {
+ err = idt82p33_extts_check_channel(idt82p33, i);
+ if (err == 0 && idt82p33->extts_single_shot)
+ /* trigger happened so we won't re-enable it */
+ extts_mask &= ~mask;
+ }
+ }
+
+ if (enable)
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ return extts_mask;
+}
+
+static int _idt82p33_gettime(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 old_mask = idt82p33->extts_mask;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 new_mask = 0;
+ int err;
+
+ /* Disable extts */
+ if (old_mask)
+ new_mask = idt82p33_extts_enable_mask(channel, old_mask, false);
+
+ err = idt82p33_set_tod_trigger(channel, HW_TOD_RD_TRIG_SEL_LSB_TOD_STS,
+ false);
if (err)
return err;
+ channel->discard_next_extts = true;
+
if (idt82p33->calculate_overhead_flag)
idt82p33->start_time = ktime_get_raw();
@@ -147,6 +397,10 @@ static int _idt82p33_gettime(struct idt82p33_channel *channel,
if (err)
return err;
+ /* Re-enable extts */
+ if (new_mask)
+ idt82p33_extts_enable_mask(channel, new_mask, true);
+
idt82p33_byte_array_to_timespec(ts, buf);
return 0;
@@ -165,19 +419,16 @@ static int _idt82p33_settime(struct idt82p33_channel *channel,
struct timespec64 local_ts = *ts;
char buf[TOD_BYTE_COUNT];
s64 dynamic_overhead_ns;
- unsigned char trigger;
int err;
u8 i;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
-
- err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
- &trigger, sizeof(trigger));
-
+ err = idt82p33_set_tod_trigger(channel, HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ true);
if (err)
return err;
+ channel->discard_next_extts = true;
+
if (idt82p33->calculate_overhead_flag) {
dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
- ktime_to_ns(idt82p33->start_time);
@@ -202,7 +453,8 @@ static int _idt82p33_settime(struct idt82p33_channel *channel,
return err;
}
-static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
+static int _idt82p33_adjtime_immediate(struct idt82p33_channel *channel,
+ s64 delta_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
struct timespec64 ts;
@@ -226,6 +478,60 @@ static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
return err;
}
+static int _idt82p33_adjtime_internal_triggered(struct idt82p33_channel *channel,
+ s64 delta_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ char buf[TOD_BYTE_COUNT];
+ struct timespec64 ts;
+ const u8 delay_ns = 32;
+ s32 remainder;
+ s64 ns;
+ int err;
+
+ err = _idt82p33_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ if (ts.tv_nsec > (NSEC_PER_SEC - 5 * NSEC_PER_MSEC)) {
+ /* Too close to miss next trigger, so skip it */
+ mdelay(6);
+ ns = (ts.tv_sec + 2) * NSEC_PER_SEC + delta_ns + delay_ns;
+ } else
+ ns = (ts.tv_sec + 1) * NSEC_PER_SEC + delta_ns + delay_ns;
+
+ ts = ns_to_timespec64(ns);
+ idt82p33_timespec_to_byte_array(&ts, buf);
+
+ /*
+ * Store the new time value.
+ */
+ err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* Schedule to implement the workaround in one second */
+ (void)div_s64_rem(delta_ns, NSEC_PER_SEC, &remainder);
+ if (remainder != 0)
+ schedule_delayed_work(&channel->adjtime_work, HZ);
+
+ return idt82p33_set_tod_trigger(channel, HW_TOD_TRIG_SEL_TOD_PPS, true);
+}
+
+static void idt82p33_adjtime_workaround(struct work_struct *work)
+{
+ struct idt82p33_channel *channel = container_of(work,
+ struct idt82p33_channel,
+ adjtime_work.work);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(idt82p33->lock);
+ /* Workaround for TOD-to-output alignment issue */
+ _idt82p33_adjtime_internal_triggered(channel, 0);
+ mutex_unlock(idt82p33->lock);
+}
+
static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
@@ -233,25 +539,22 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
int err, i;
s64 fcw;
- if (scaled_ppm == channel->current_freq_ppb)
- return 0;
-
/*
- * Frequency Control Word unit is: 1.68 * 10^-10 ppm
+ * Frequency Control Word unit is: 1.6861512 * 10^-10 ppm
*
* adjfreq:
- * ppb * 10^9
- * FCW = ----------
- * 168
+ * ppb * 10^14
+ * FCW = -----------
+ * 16861512
*
* adjfine:
- * scaled_ppm * 5^12
- * FCW = -------------
- * 168 * 2^4
+ * scaled_ppm * 5^12 * 10^5
+ * FCW = ------------------------
+ * 16861512 * 2^4
*/
- fcw = scaled_ppm * 244140625ULL;
- fcw = div_s64(fcw, 2688);
+ fcw = scaled_ppm * 762939453125ULL;
+ fcw = div_s64(fcw, 8430756LL);
for (i = 0; i < 5; i++) {
buf[i] = fcw & 0xff;
@@ -266,26 +569,84 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
buf, sizeof(buf));
- if (err == 0)
- channel->current_freq_ppb = scaled_ppm;
-
return err;
}
+/* ppb = scaled_ppm * 125 / 2^13 */
+static s32 idt82p33_ddco_scaled_ppm(long current_ppm, s32 ddco_ppb)
+{
+ s64 scaled_ppm = div_s64(((s64)ddco_ppb << 13), 125);
+ s64 max_scaled_ppm = div_s64(((s64)DCO_MAX_PPB << 13), 125);
+
+ current_ppm += scaled_ppm;
+
+ if (current_ppm > max_scaled_ppm)
+ current_ppm = max_scaled_ppm;
+ else if (current_ppm < -max_scaled_ppm)
+ current_ppm = -max_scaled_ppm;
+
+ return (s32)current_ppm;
+}
+
+static int idt82p33_stop_ddco(struct idt82p33_channel *channel)
+{
+ int err;
+
+ err = _idt82p33_adjfine(channel, channel->current_freq);
+ if (err)
+ return err;
+
+ channel->ddco = false;
+
+ return 0;
+}
+
+static int idt82p33_start_ddco(struct idt82p33_channel *channel, s32 delta_ns)
+{
+ s32 current_ppm = channel->current_freq;
+ u32 duration_ms = MSEC_PER_SEC;
+ s32 ppb;
+ int err;
+
+ /* If the ToD correction is less than 5 nanoseconds, then skip it.
+ * The error introduced by the ToD adjustment procedure would be bigger
+ * than the required ToD correction
+ */
+ if (abs(delta_ns) < DDCO_THRESHOLD_NS)
+ return 0;
+
+ /* For most cases, keep ddco duration 1 second */
+ ppb = delta_ns;
+ while (abs(ppb) > DCO_MAX_PPB) {
+ duration_ms *= 2;
+ ppb /= 2;
+ }
+
+ err = _idt82p33_adjfine(channel,
+ idt82p33_ddco_scaled_ppm(current_ppm, ppb));
+ if (err)
+ return err;
+
+ /* schedule the worker to cancel ddco */
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(duration_ms) - 1);
+ channel->ddco = true;
+
+ return 0;
+}
+
static int idt82p33_measure_one_byte_write_overhead(
struct idt82p33_channel *channel, s64 *overhead_ns)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
ktime_t start, stop;
+ u8 trigger = 0;
s64 total_ns;
- u8 trigger;
int err;
u8 i;
total_ns = 0;
*overhead_ns = 0;
- trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
- HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
@@ -307,8 +668,41 @@ static int idt82p33_measure_one_byte_write_overhead(
return err;
}
+static int idt82p33_measure_one_byte_read_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ ktime_t start, stop;
+ u8 trigger = 0;
+ s64 total_ns;
+ int err;
+ u8 i;
+
+ total_ns = 0;
+ *overhead_ns = 0;
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ stop = ktime_get_raw();
+
+ if (err)
+ return err;
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ *overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
static int idt82p33_measure_tod_write_9_byte_overhead(
- struct idt82p33_channel *channel)
+ struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
u8 buf[TOD_BYTE_COUNT];
@@ -368,7 +762,7 @@ static int idt82p33_measure_settime_gettime_gap_overhead(
static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
{
- s64 trailing_overhead_ns, one_byte_write_ns, gap_ns;
+ s64 trailing_overhead_ns, one_byte_write_ns, gap_ns, one_byte_read_ns;
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
@@ -388,12 +782,19 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
if (err)
return err;
+ err = idt82p33_measure_one_byte_read_overhead(channel,
+ &one_byte_read_ns);
+
+ if (err)
+ return err;
+
err = idt82p33_measure_tod_write_9_byte_overhead(channel);
if (err)
return err;
- trailing_overhead_ns = gap_ns - (2 * one_byte_write_ns);
+ trailing_overhead_ns = gap_ns - 2 * one_byte_write_ns
+ - one_byte_read_ns;
idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
@@ -462,6 +863,20 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
&sync_cnfg, sizeof(sync_cnfg));
}
+static long idt82p33_work_handler(struct ptp_clock_info *ptp)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(idt82p33->lock);
+ (void)idt82p33_stop_ddco(channel);
+ mutex_unlock(idt82p33->lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
+}
+
static int idt82p33_output_enable(struct idt82p33_channel *channel,
bool enable, unsigned int outn)
{
@@ -480,40 +895,10 @@ static int idt82p33_output_enable(struct idt82p33_channel *channel,
return idt82p33_write(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
}
-static int idt82p33_output_mask_enable(struct idt82p33_channel *channel,
- bool enable)
-{
- u16 mask;
- int err;
- u8 outn;
-
- mask = channel->output_mask;
- outn = 0;
-
- while (mask) {
- if (mask & 0x1) {
- err = idt82p33_output_enable(channel, enable, outn);
- if (err)
- return err;
- }
-
- mask >>= 0x1;
- outn++;
- }
-
- return 0;
-}
-
static int idt82p33_perout_enable(struct idt82p33_channel *channel,
bool enable,
struct ptp_perout_request *perout)
{
- unsigned int flags = perout->flags;
-
- /* Enable/disable output based on output_mask */
- if (flags == PEROUT_ENABLE_OUTPUT_MASK)
- return idt82p33_output_mask_enable(channel, enable);
-
/* Enable/disable individual output instead */
return idt82p33_output_enable(channel, enable, perout->index);
}
@@ -546,14 +931,15 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
u8 i;
for (i = 0; i < MAX_PHC_PLL; i++) {
-
channel = &idt82p33->channel[i];
-
+ cancel_delayed_work_sync(&channel->adjtime_work);
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
}
+
+
static int idt82p33_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -564,7 +950,8 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
mutex_lock(idt82p33->lock);
- if (rq->type == PTP_CLK_REQ_PEROUT) {
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
if (!on)
err = idt82p33_perout_enable(channel, false,
&rq->perout);
@@ -575,6 +962,12 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
else
err = idt82p33_perout_enable(channel, true,
&rq->perout);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = idt82p33_extts_enable(channel, rq, on);
+ break;
+ default:
+ break;
}
mutex_unlock(idt82p33->lock);
@@ -634,13 +1027,22 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
+ if (channel->ddco == true)
+ return 0;
+
+ if (scaled_ppm == channel->current_freq)
+ return 0;
+
mutex_lock(idt82p33->lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
+
+ if (err == 0)
+ channel->current_freq = scaled_ppm;
mutex_unlock(idt82p33->lock);
+
if (err)
dev_err(idt82p33->dev,
"Failed in %s with err %d!\n", __func__, err);
-
return err;
}
@@ -651,14 +1053,21 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
struct idt82p33 *idt82p33 = channel->idt82p33;
int err;
+ if (channel->ddco == true)
+ return -EBUSY;
+
mutex_lock(idt82p33->lock);
if (abs(delta_ns) < phase_snap_threshold) {
+ err = idt82p33_start_ddco(channel, delta_ns);
mutex_unlock(idt82p33->lock);
- return 0;
+ return err;
}
- err = _idt82p33_adjtime(channel, delta_ns);
+ /* Use more accurate internal 1pps triggered write first */
+ err = _idt82p33_adjtime_internal_triggered(channel, delta_ns);
+ if (err && delta_ns > IMMEDIATE_SNAP_THRESHOLD_NS)
+ err = _idt82p33_adjtime_immediate(channel, delta_ns);
mutex_unlock(idt82p33->lock);
@@ -703,8 +1112,10 @@ static int idt82p33_settime(struct ptp_clock_info *ptp,
return err;
}
-static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
+static int idt82p33_channel_init(struct idt82p33 *idt82p33, u32 index)
{
+ struct idt82p33_channel *channel = &idt82p33->channel[index];
+
switch (index) {
case 0:
channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
@@ -730,22 +1141,60 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
return -EINVAL;
}
- channel->current_freq_ppb = 0;
+ channel->plln = index;
+ channel->current_freq = 0;
+ channel->idt82p33 = idt82p33;
+ INIT_DELAYED_WORK(&channel->adjtime_work, idt82p33_adjtime_workaround);
return 0;
}
-static void idt82p33_caps_init(struct ptp_clock_info *caps)
+static int idt82p33_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps,
+ struct ptp_pin_desc *pin_cfg, u8 max_pins)
+{
+ struct ptp_pin_desc *ppd;
+ int i;
+
caps->owner = THIS_MODULE;
caps->max_adj = DCO_MAX_PPB;
- caps->n_per_out = 11;
- caps->adjphase = idt82p33_adjwritephase;
+ caps->n_per_out = MAX_PER_OUT;
+ caps->n_ext_ts = MAX_PHC_PLL,
+ caps->n_pins = max_pins,
+ caps->adjphase = idt82p33_adjwritephase,
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
+ caps->verify = idt82p33_verify_pin;
+ caps->do_aux_work = idt82p33_work_handler;
+
+ snprintf(caps->name, sizeof(caps->name), "IDT 82P33 PLL%u", index);
+
+ caps->pin_config = pin_cfg;
+
+ for (i = 0; i < max_pins; ++i) {
+ ppd = &pin_cfg[i];
+
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ ppd->chan = index;
+ snprintf(ppd->name, sizeof(ppd->name), "in%d", 12 + i);
+ }
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -758,7 +1207,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
channel = &idt82p33->channel[index];
- err = idt82p33_channel_init(channel, index);
+ err = idt82p33_channel_init(idt82p33, index);
if (err) {
dev_err(idt82p33->dev,
"Channel_init failed in %s with err %d!\n",
@@ -766,11 +1215,8 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
return err;
}
- channel->idt82p33 = idt82p33;
-
- idt82p33_caps_init(&channel->caps);
- snprintf(channel->caps.name, sizeof(channel->caps.name),
- "IDT 82P33 PLL%u", index);
+ idt82p33_caps_init(index, &channel->caps,
+ pin_config[index], MAX_TRIG_CLK);
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
@@ -805,17 +1251,46 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
return 0;
}
+static int idt82p33_reset(struct idt82p33 *idt82p33, bool cold)
+{
+ int err;
+ u8 cfg = SOFT_RESET_EN;
+
+ if (cold == true)
+ goto cold_reset;
+
+ err = idt82p33_read(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
+ if (err) {
+ dev_err(idt82p33->dev,
+ "Soft reset failed with err %d!\n", err);
+ return err;
+ }
+
+ cfg |= SOFT_RESET_EN;
+
+cold_reset:
+ err = idt82p33_write(idt82p33, REG_SOFT_RESET, &cfg, sizeof(cfg));
+ if (err)
+ dev_err(idt82p33->dev,
+ "Cold reset failed with err %d!\n", err);
+ return err;
+}
+
static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
{
+ char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idt82p33_fwrc *rec;
u8 loaddr, page, val;
int err;
s32 len;
- dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME);
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
- err = request_firmware(&fw, FW_FILENAME, idt82p33->dev);
+ dev_info(idt82p33->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idt82p33->dev);
if (err) {
dev_err(idt82p33->dev,
@@ -863,6 +1338,46 @@ out:
return err;
}
+static void idt82p33_extts_check(struct work_struct *work)
+{
+ struct idt82p33 *idt82p33 = container_of(work, struct idt82p33,
+ extts_work.work);
+ struct idt82p33_channel *channel;
+ int err;
+ u8 mask;
+ int i;
+
+ if (idt82p33->extts_mask == 0)
+ return;
+
+ mutex_lock(idt82p33->lock);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if ((idt82p33->extts_mask & mask) == 0)
+ continue;
+
+ err = idt82p33_extts_check_channel(idt82p33, i);
+
+ if (err == 0) {
+ /* trigger clears itself, so clear the mask */
+ if (idt82p33->extts_single_shot) {
+ idt82p33->extts_mask &= ~mask;
+ } else {
+ /* Re-arm */
+ channel = &idt82p33->channel[i];
+ arm_tod_read_with_trigger(channel, channel->tod_trigger);
+ }
+ }
+ }
+
+ if (idt82p33->extts_mask)
+ schedule_delayed_work(&idt82p33->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ mutex_unlock(idt82p33->lock);
+}
static int idt82p33_probe(struct platform_device *pdev)
{
@@ -885,25 +1400,33 @@ static int idt82p33_probe(struct platform_device *pdev)
idt82p33->pll_mask = DEFAULT_PLL_MASK;
idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idt82p33->extts_mask = 0;
+ INIT_DELAYED_WORK(&idt82p33->extts_work, idt82p33_extts_check);
mutex_lock(idt82p33->lock);
- err = idt82p33_load_firmware(idt82p33);
+ /* cold reset before loading firmware */
+ idt82p33_reset(idt82p33, true);
+ err = idt82p33_load_firmware(idt82p33);
if (err)
dev_warn(idt82p33->dev,
"loading firmware failed with %d\n", err);
+ /* soft reset after loading firmware */
+ idt82p33_reset(idt82p33, false);
+
if (idt82p33->pll_mask) {
for (i = 0; i < MAX_PHC_PLL; i++) {
- if (idt82p33->pll_mask & (1 << i)) {
+ if (idt82p33->pll_mask & (1 << i))
err = idt82p33_enable_channel(idt82p33, i);
- if (err) {
- dev_err(idt82p33->dev,
- "Failed in %s with err %d!\n",
- __func__, err);
- break;
- }
+ else
+ err = idt82p33_channel_init(idt82p33, i);
+ if (err) {
+ dev_err(idt82p33->dev,
+ "Failed in %s with err %d!\n",
+ __func__, err);
+ break;
}
}
} else {
@@ -928,6 +1451,8 @@ static int idt82p33_remove(struct platform_device *pdev)
{
struct idt82p33 *idt82p33 = platform_get_drvdata(pdev);
+ cancel_delayed_work_sync(&idt82p33->extts_work);
+
idt82p33_ptp_clock_unregister_all(idt82p33);
return 0;
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
index 0ea1c35c0f9f..8fcb0b17d207 100644
--- a/drivers/ptp/ptp_idt82p33.h
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -13,6 +13,8 @@
#define FW_FILENAME "idt82p33xxx.bin"
#define MAX_PHC_PLL (2)
+#define MAX_TRIG_CLK (3)
+#define MAX_PER_OUT (11)
#define TOD_BYTE_COUNT (10)
#define DCO_MAX_PPB (92000)
#define MAX_MEASURMENT_COUNT (5)
@@ -20,7 +22,6 @@
#define IMMEDIATE_SNAP_THRESHOLD_NS (50000)
#define DDCO_THRESHOLD_NS (5)
#define IDT82P33_MAX_WRITE_COUNT (512)
-#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
#define PLLMASK_ADDR_HI 0xFF
#define PLLMASK_ADDR_LO 0xA5
@@ -60,8 +61,18 @@ struct idt82p33_channel {
struct ptp_clock *ptp_clock;
struct idt82p33 *idt82p33;
enum pll_mode pll_mode;
- s32 current_freq_ppb;
+ /* Workaround for TOD-to-output alignment issue */
+ struct delayed_work adjtime_work;
+ s32 current_freq;
+ /* double dco mode */
+ bool ddco;
u8 output_mask;
+ /* last input trigger for extts */
+ u8 tod_trigger;
+ bool discard_next_extts;
+ u8 plln;
+ /* remember last tod_sts for extts */
+ u8 extts_tod_sts[TOD_BYTE_COUNT];
u16 dpll_tod_cnfg;
u16 dpll_tod_trigger;
u16 dpll_tod_sts;
@@ -76,6 +87,12 @@ struct idt82p33 {
struct idt82p33_channel channel[MAX_PHC_PLL];
struct device *dev;
u8 pll_mask;
+ /* Polls for external time stamps */
+ u8 extts_mask;
+ bool extts_single_shot;
+ struct delayed_work extts_work;
+ /* Remember the ptp channel to report extts */
+ struct idt82p33_channel *event_channel[MAX_PHC_PLL];
/* Mutex to protect operations from being interrupted */
struct mutex *lock;
struct regmap *regmap;
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 50a974f7dfb4..25ab1ff9423d 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -42,7 +42,7 @@ TRACE_EVENT(kfree_skb,
__entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%p reason: %s",
+ TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s",
__entry->skbaddr, __entry->protocol, __entry->location,
__print_symbolic(__entry->reason,
DEFINE_DROP_REASON(FN, FNe)))
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f2ba5787055a..3ae3399f3651 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -434,7 +434,7 @@ out_free:
static int __init test_rhashtable_max(struct test_obj *array,
unsigned int entries)
{
- unsigned int i, insert_retries = 0;
+ unsigned int i;
int err;
test_rht_params.max_size = roundup_pow_of_two(entries / 8);
@@ -447,9 +447,7 @@ static int __init test_rhashtable_max(struct test_obj *array,
obj->value.id = i * 2;
err = insert_retry(&ht, obj, test_rht_params);
- if (err > 0)
- insert_retries += err;
- else if (err)
+ if (err < 0)
return err;
}
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 99272a67525c..c2f1a542e6fa 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2013,7 +2013,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
} else {
/* Driver expects to be called at twice the frequency in rc */
int n = rc * 2, interval = HZ / n;
- u64 count = n * id.data, i = 0;
+ u64 count = mul_u32_u32(n, id.data);
+ u64 i = 0;
do {
rtnl_lock();
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index 6b8d2e2f23c2..0f3921908b07 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -5,7 +5,9 @@ TEST_PROGS := \
bond-arp-interval-causes-panic.sh \
bond-break-lacpdu-tx.sh \
bond-lladdr-target.sh \
- dev_addr_lists.sh
+ dev_addr_lists.sh \
+ mode-1-recovery-updelay.sh \
+ mode-2-recovery-updelay.sh
TEST_FILES := \
lag_lib.sh \
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
index 16c7fb858ac1..2a268b17b61f 100644
--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+NAMESPACES=""
+
# Test that a link aggregation device (bonding, team) removes the hardware
# addresses that it adds on its underlying devices.
test_LAG_cleanup()
@@ -59,3 +61,107 @@ test_LAG_cleanup()
log_test "$driver cleanup mode $mode"
}
+
+# Build a generic 2 node net namespace with 2 connections
+# between the namespaces
+#
+# +-----------+ +-----------+
+# | node1 | | node2 |
+# | | | |
+# | | | |
+# | eth0 +-------+ eth0 |
+# | | | |
+# | eth1 +-------+ eth1 |
+# | | | |
+# +-----------+ +-----------+
+lag_setup2x2()
+{
+ local state=${1:-down}
+ local namespaces="lag_node1 lag_node2"
+
+ # create namespaces
+ for n in ${namespaces}; do
+ ip netns add ${n}
+ done
+
+ # wire up namespaces
+ ip link add name lag1 type veth peer name lag1-end
+ ip link set dev lag1 netns lag_node1 $state name eth0
+ ip link set dev lag1-end netns lag_node2 $state name eth0
+
+ ip link add name lag1 type veth peer name lag1-end
+ ip link set dev lag1 netns lag_node1 $state name eth1
+ ip link set dev lag1-end netns lag_node2 $state name eth1
+
+ NAMESPACES="${namespaces}"
+}
+
+# cleanup all lag related namespaces and remove the bonding module
+lag_cleanup()
+{
+ for n in ${NAMESPACES}; do
+ ip netns delete ${n} >/dev/null 2>&1 || true
+ done
+ modprobe -r bonding
+}
+
+SWITCH="lag_node1"
+CLIENT="lag_node2"
+CLIENTIP="172.20.2.1"
+SWITCHIP="172.20.2.2"
+
+lag_setup_network()
+{
+ lag_setup2x2 "down"
+
+ # create switch
+ ip netns exec ${SWITCH} ip link add br0 up type bridge
+ ip netns exec ${SWITCH} ip link set eth0 master br0 up
+ ip netns exec ${SWITCH} ip link set eth1 master br0 up
+ ip netns exec ${SWITCH} ip addr add ${SWITCHIP}/24 dev br0
+}
+
+lag_reset_network()
+{
+ ip netns exec ${CLIENT} ip link del bond0
+ ip netns exec ${SWITCH} ip link set eth0 up
+ ip netns exec ${SWITCH} ip link set eth1 up
+}
+
+create_bond()
+{
+ # create client
+ ip netns exec ${CLIENT} ip link set eth0 down
+ ip netns exec ${CLIENT} ip link set eth1 down
+
+ ip netns exec ${CLIENT} ip link add bond0 type bond $@
+ ip netns exec ${CLIENT} ip link set eth0 master bond0
+ ip netns exec ${CLIENT} ip link set eth1 master bond0
+ ip netns exec ${CLIENT} ip link set bond0 up
+ ip netns exec ${CLIENT} ip addr add ${CLIENTIP}/24 dev bond0
+}
+
+test_bond_recovery()
+{
+ RET=0
+
+ create_bond $@
+
+ # verify connectivity
+ ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ check_err $? "No connectivity"
+
+ # force the links of the bond down
+ ip netns exec ${SWITCH} ip link set eth0 down
+ sleep 2
+ ip netns exec ${SWITCH} ip link set eth0 up
+ ip netns exec ${SWITCH} ip link set eth1 down
+
+ # re-verify connectivity
+ ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+
+ local rc=$?
+ check_err $rc "Bond failed to recover"
+ log_test "$1 ($2) bond recovery"
+ lag_reset_network
+}
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
new file mode 100755
index 000000000000..ad4c845a4ac7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+# When the bond is configured with down/updelay and the link state of
+# slave members flaps if there are no remaining members up the bond
+# should immediately select a member to bring up. (from bonding.txt
+# section 13.1 paragraph 4)
+#
+# +-------------+ +-----------+
+# | client | | switch |
+# | | | |
+# | +--------| link1 |-----+ |
+# | | +-------+ | |
+# | | | | | |
+# | | +-------+ | |
+# | | bond | link2 | Br0 | |
+# +-------------+ +-----------+
+# 172.20.2.1 172.20.2.2
+
+
+REQUIRE_MZ=no
+REQUIRE_JQ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/lag_lib.sh
+
+cleanup()
+{
+ lag_cleanup
+}
+
+trap cleanup 0 1 2
+
+lag_setup_network
+test_bond_recovery mode 1 miimon 100 updelay 0
+test_bond_recovery mode 1 miimon 100 updelay 200
+test_bond_recovery mode 1 miimon 100 updelay 500
+test_bond_recovery mode 1 miimon 100 updelay 1000
+test_bond_recovery mode 1 miimon 100 updelay 2000
+test_bond_recovery mode 1 miimon 100 updelay 5000
+test_bond_recovery mode 1 miimon 100 updelay 10000
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
new file mode 100755
index 000000000000..2330d37453f9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+# When the bond is configured with down/updelay and the link state of
+# slave members flaps if there are no remaining members up the bond
+# should immediately select a member to bring up. (from bonding.txt
+# section 13.1 paragraph 4)
+#
+# +-------------+ +-----------+
+# | client | | switch |
+# | | | |
+# | +--------| link1 |-----+ |
+# | | +-------+ | |
+# | | | | | |
+# | | +-------+ | |
+# | | bond | link2 | Br0 | |
+# +-------------+ +-----------+
+# 172.20.2.1 172.20.2.2
+
+
+REQUIRE_MZ=no
+REQUIRE_JQ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/lag_lib.sh
+
+cleanup()
+{
+ lag_cleanup
+}
+
+trap cleanup 0 1 2
+
+lag_setup_network
+test_bond_recovery mode 2 miimon 100 updelay 0
+test_bond_recovery mode 2 miimon 100 updelay 200
+test_bond_recovery mode 2 miimon 100 updelay 500
+test_bond_recovery mode 2 miimon 100 updelay 1000
+test_bond_recovery mode 2 miimon 100 updelay 2000
+test_bond_recovery mode 2 miimon 100 updelay 5000
+test_bond_recovery mode 2 miimon 100 updelay 10000
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
index 867e118223cd..6091b45d226b 100644
--- a/tools/testing/selftests/drivers/net/bonding/settings
+++ b/tools/testing/selftests/drivers/net/bonding/settings
@@ -1 +1 @@
-timeout=60
+timeout=120