aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/hash.c88
-rw-r--r--drivers/net/bonding/bond_3ad.c54
-rw-r--r--drivers/net/bonding/bond_alb.c34
-rw-r--r--drivers/net/bonding/bond_main.c157
-rw-r--r--drivers/net/bonding/bond_netlink.c157
-rw-r--r--drivers/net/bonding/bond_options.c326
-rw-r--r--drivers/net/bonding/bond_sysfs.c353
-rw-r--r--drivers/net/bonding/bonding.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c146
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c364
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c29
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c271
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c12
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c449
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h62
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c797
-rw-r--r--drivers/net/ethernet/sfc/rx.c27
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c74
-rw-r--r--drivers/net/fddi/skfp/fplustm.c24
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h96
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fddi/skfp/srf.c24
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c27
-rw-r--r--include/asm-generic/hash.h9
-rw-r--r--include/linux/hash.h36
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/net/sctp/structs.h59
-rw-r--r--include/uapi/linux/if_link.h8
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--lib/Makefile2
-rw-r--r--lib/hash.c38
-rw-r--r--net/core/dev.c21
-rw-r--r--net/ipv4/devinet.c41
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/openvswitch/flow_table.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_api.c5
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/port.c9
-rw-r--r--net/tipc/socket.c33
66 files changed, 3791 insertions, 1306 deletions
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
new file mode 100644
index 000000000000..e8c58f88b1d4
--- /dev/null
+++ b/arch/x86/include/asm/hash.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_HASH_H
+#define _ASM_X86_HASH_H
+
+struct fast_hash_ops;
+extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
+
+#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 992d63bb154f..eabcb6e6a900 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hash.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
new file mode 100644
index 000000000000..3056702e81fb
--- /dev/null
+++ b/arch/x86/lib/hash.c
@@ -0,0 +1,88 @@
+/*
+ * Some portions derived from code covered by the following notice:
+ *
+ * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/hash.h>
+
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/hash.h>
+
+static inline u32 crc32_u32(u32 crc, u32 val)
+{
+ asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
+ return crc;
+}
+
+static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i, tmp = 0;
+
+ for (i = 0; i < len / 4; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ switch (3 - (len & 0x03)) {
+ case 0:
+ tmp |= *((const u8 *) p32 + 2) << 16;
+ /* fallthrough */
+ case 1:
+ tmp |= *((const u8 *) p32 + 1) << 8;
+ /* fallthrough */
+ case 2:
+ tmp |= *((const u8 *) p32);
+ seed = crc32_u32(tmp, seed);
+ default:
+ break;
+ }
+
+ return seed;
+}
+
+static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ return seed;
+}
+
+void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+ if (cpu_has_xmm4_2) {
+ ops->hash = intel_crc4_2_hash;
+ ops->hash2 = intel_crc4_2_hash2;
+ }
+}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..58c2249a3324 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -147,11 +147,12 @@ static inline struct aggregator *__get_first_agg(struct port *port)
struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
- // If there's no bond for this port, or bond has no slaves
+ /* If there's no bond for this port, or bond has no slaves */
if (bond == NULL)
return NULL;
- first_slave = bond_first_slave(bond);
-
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ rcu_read_unlock();
return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
@@ -702,9 +703,13 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave, iter)
- if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter)
+ if (SLAVE_AD_INFO(slave).aggregator.is_active) {
+ rcu_read_unlock();
return &(SLAVE_AD_INFO(slave).aggregator);
+ }
+ rcu_read_unlock();
return NULL;
}
@@ -1471,7 +1476,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- bond_for_each_slave(bond, slave, iter) {
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
agg->is_active = 0;
@@ -1505,7 +1511,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active->is_active = 1;
}
- // if there is new best aggregator, activate it
+ /* if there is new best aggregator, activate it */
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->aggregator_identifier, best->num_of_ports,
@@ -1516,7 +1522,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@ -1526,10 +1532,11 @@ static void ad_agg_selection_logic(struct aggregator *agg)
agg->is_individual, agg->is_active);
}
- // check if any partner replys
+ /* check if any partner replys */
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->bond->dev->name : "NULL");
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1541,7 +1548,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
- // disable the ports that were related to the former active_aggregator
+ /* disable the ports that were related to the former active_aggregator */
if (active) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
@@ -1565,6 +1572,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
+ rcu_read_unlock();
+
bond_3ad_set_carrier(bond);
}
@@ -2069,17 +2078,18 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct port *port;
read_lock(&bond->lock);
+ rcu_read_lock();
- //check if there are any slaves
+ /* check if there are any slaves */
if (!bond_has_slaves(bond))
goto re_arm;
- // check if agg_select_timer timer after initialize is timed out
+ /* check if agg_select_timer timer after initialize is timed out */
if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
- slave = bond_first_slave(bond);
+ slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
- // select the active aggregator for the bond
+ /* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
pr_warning("%s: Warning: bond's first port is uninitialized\n",
@@ -2093,8 +2103,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_3ad_set_carrier(bond);
}
- // for each port run the state machines
- bond_for_each_slave(bond, slave, iter) {
+ /* for each port run the state machines */
+ bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
pr_warning("%s: Warning: Found an uninitialized port\n",
@@ -2114,7 +2124,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
ad_mux_machine(port);
ad_tx_machine(port);
- // turn off the BEGIN bit, since we already handled it
+ /* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
@@ -2122,9 +2132,9 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
-
+ rcu_read_unlock();
read_unlock(&bond->lock);
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
/**
@@ -2303,7 +2313,9 @@ int bond_3ad_set_carrier(struct bonding *bond)
struct aggregator *active;
struct slave *first_slave;
- first_slave = bond_first_slave(bond);
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ rcu_read_unlock();
if (!first_slave)
return 0;
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2250b063ab89..759ddeebe390 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -469,7 +469,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
/* slave being removed should not be active at this point
*
- * Caller must hold bond lock for read
+ * Caller must hold rtnl.
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
@@ -815,7 +815,7 @@ static void rlb_rebalance(struct bonding *bond)
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
client_info->ntt = 1;
@@ -1494,14 +1494,14 @@ void bond_alb_monitor(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
}
+ rcu_read_lock();
+
bond_info->tx_rebalance_counter++;
bond_info->lp_counter++;
@@ -1514,7 +1514,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1527,7 +1527,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1551,11 +1551,9 @@ void bond_alb_monitor(struct work_struct *work)
* dev_set_promiscuity requires rtnl and
* nothing else. Avoid race with bond_close.
*/
- read_unlock(&bond->lock);
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
+ rcu_read_unlock();
+ if (!rtnl_trylock())
goto re_arm;
- }
bond_info->rlb_promisc_timeout_counter = 0;
@@ -1567,7 +1565,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->primary_is_promisc = 0;
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
if (bond_info->rlb_rebalance) {
@@ -1589,11 +1587,9 @@ void bond_alb_monitor(struct work_struct *work)
}
}
}
-
+ rcu_read_unlock();
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
-
- read_unlock(&bond->lock);
}
/* assumption: called before the slave is attached to the bond
@@ -1679,14 +1675,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
- * If new_slave is not NULL, caller must hold RTNL, bond->lock for
- * read and curr_slave_lock for write. Processing here may sleep, so
- * no other locks may be held.
+ * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
+ * for write. Processing here may sleep, so no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
@@ -1722,7 +1715,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
ASSERT_RTNL();
@@ -1748,11 +1740,9 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* swap mac address */
alb_swap_mac_addr(swap_slave, new_slave);
alb_fasten_mac_swap(bond, swap_slave, new_slave);
- read_lock(&bond->lock);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- read_lock(&bond->lock);
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..c0456cc86610 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -591,33 +591,22 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
+ struct bonding *bond = container_of(work, struct bonding,
+ mcast_work.work);
+
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
- rtnl_unlock();
- /* We use curr_slave_lock to protect against concurrent access to
- * igmp_retrans from multiple running instances of this function and
- * bond_change_active_slave
- */
- write_lock_bh(&bond->curr_slave_lock);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
- write_unlock_bh(&bond->curr_slave_lock);
-}
-
-static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
-{
- struct bonding *bond = container_of(work, struct bonding,
- mcast_work.work);
-
- bond_resend_igmp_join_requests(bond);
+ rtnl_unlock();
}
/* Flush bond's hardware addresses from slave
@@ -697,14 +686,12 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
*
* Perform special MAC address swapping for fail_over_mac settings
*
- * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
+ * Called with RTNL, curr_slave_lock for write_bh.
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
u8 tmp_mac[ETH_ALEN];
@@ -715,9 +702,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
case BOND_FOM_ACTIVE:
if (new_active) {
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
bond_set_dev_addr(bond->dev, new_active->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
break;
@@ -731,7 +716,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
return;
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (old_active) {
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
@@ -761,7 +745,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
@@ -821,7 +804,11 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = bond->curr_active_slave;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ rcu_read_unlock();
pr_debug("bond_should_notify_peers: bond %s slave %s\n",
bond->dev->name, slave ? slave->dev->name : "NULL");
@@ -846,8 +833,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
- * If new_active is not NULL, caller must hold bond->lock for read and
- * curr_slave_lock for write_bh.
+ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
@@ -916,14 +902,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
}
@@ -949,7 +933,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
- * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
+ * Caller must hold curr_slave_lock for write_bh.
*/
void bond_select_active_slave(struct bonding *bond)
{
@@ -1594,11 +1578,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1618,19 +1600,13 @@ err_detach:
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
- write_lock_bh(&bond->lock);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
- bond_change_active_slave(bond, NULL);
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- } else {
- write_unlock_bh(&bond->lock);
}
slave_disable_netpoll(new_slave);
@@ -1695,20 +1671,16 @@ static int __bond_release_one(struct net_device *bond_dev,
}
block_netpoll_tx();
- write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
pr_info("%s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
return -EINVAL;
}
- write_unlock_bh(&bond->lock);
-
/* release the slave from its bond */
bond->slave_cnt--;
@@ -1720,12 +1692,10 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* must be called before the slave is
- * detached from the list
- */
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- }
+
+ write_unlock_bh(&bond->lock);
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
@@ -1748,8 +1718,11 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
- if (oldcurrent == slave)
+ if (oldcurrent == slave) {
+ write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
+ write_unlock_bh(&bond->curr_slave_lock);
+ }
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
@@ -1757,9 +1730,7 @@ static int __bond_release_one(struct net_device *bond_dev,
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
- write_unlock_bh(&bond->lock);
bond_alb_deinit_slave(bond, slave);
- write_lock_bh(&bond->lock);
}
if (all) {
@@ -1770,15 +1741,11 @@ static int __bond_release_one(struct net_device *bond_dev,
* is no concern that another slave add/remove event
* will interfere.
*/
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- write_lock_bh(&bond->lock);
}
if (!bond_has_slaves(bond)) {
@@ -1793,7 +1760,6 @@ static int __bond_release_one(struct net_device *bond_dev,
}
}
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
synchronize_rcu();
@@ -1928,7 +1894,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2126,41 +2092,35 @@ void bond_mii_monitor(struct work_struct *work)
bool should_notify_peers = false;
unsigned long delay;
- read_lock(&bond->lock);
-
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delay = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_miimon_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock(); /* might sleep, hold no other locks */
- read_lock(&bond->lock);
- }
+ } else
+ rcu_read_unlock();
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -2422,12 +2382,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
struct list_head *iter;
int do_failover = 0;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond))
goto re_arm;
- oldcurrent = bond->curr_active_slave;
+ rcu_read_lock();
+
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2436,7 +2396,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2498,7 +2458,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_arp_send_all(bond, slave);
}
+ rcu_read_unlock();
+
if (do_failover) {
+ /* the bond_select_active_slave must hold RTNL
+ * and curr_slave_lock for write.
+ */
+ if (!rtnl_trylock())
+ goto re_arm;
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2506,14 +2473,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
+ rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
-
- read_unlock(&bond->lock);
}
/*
@@ -2522,7 +2488,7 @@ re_arm:
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
@@ -2531,7 +2497,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2593,7 +2559,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
- * Called with RTNL and bond->lock for read.
+ * Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
@@ -2668,19 +2634,20 @@ do_failover:
/*
* Send ARP probes for active-backup mode ARP monitor.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct slave *slave, *before = NULL, *new_slave = NULL,
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
struct list_head *iter;
bool found = false;
read_lock(&bond->curr_slave_lock);
- if (bond->current_arp_slave && bond->curr_active_slave)
+ if (curr_arp_slave && bond->curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
+ curr_arp_slave->dev->name,
bond->curr_active_slave->dev->name);
if (bond->curr_active_slave) {
@@ -2696,15 +2663,15 @@ static void bond_ab_arp_probe(struct bonding *bond)
* for becoming the curr_active_slave
*/
- if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond_first_slave(bond);
- if (!bond->current_arp_slave)
+ if (!curr_arp_slave) {
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
return;
}
- bond_set_slave_inactive_flags(bond->current_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2727,7 +2694,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
- if (slave == bond->current_arp_slave)
+ if (slave == curr_arp_slave)
found = true;
}
@@ -2741,8 +2708,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_active_flags(new_slave);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
- bond->current_arp_slave = new_slave;
-
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
}
void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2752,43 +2718,38 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bool should_notify_peers = false;
int delta_in_ticks;
- read_lock(&bond->lock);
-
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_ab_arp_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 40e7b1cb4aea..d7d84db9eed7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_netlink.c - Netlink interface for bonding
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,6 +24,14 @@
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
+ [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
+ [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -40,16 +49,20 @@ static int bond_changelink(struct net_device *bond_dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct bonding *bond = netdev_priv(bond_dev);
+ int miimon = 0;
int err;
- if (data && data[IFLA_BOND_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BOND_MODE]) {
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
err = bond_option_mode_set(bond, mode);
if (err)
return err;
}
- if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ if (data[IFLA_BOND_ACTIVE_SLAVE]) {
int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
struct net_device *slave_dev;
@@ -65,6 +78,82 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_MIIMON]) {
+ miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
+
+ err = bond_option_miimon_set(bond, miimon);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_UPDELAY]) {
+ int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
+
+ err = bond_option_updelay_set(bond, updelay);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_DOWNDELAY]) {
+ int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
+
+ err = bond_option_downdelay_set(bond, downdelay);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_USE_CARRIER]) {
+ int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
+
+ err = bond_option_use_carrier_set(bond, use_carrier);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_INTERVAL]) {
+ int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
+
+ if (arp_interval && miimon) {
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ err = bond_option_arp_interval_set(bond, arp_interval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_IP_TARGET]) {
+ __be32 targets[BOND_MAX_ARP_TARGETS] = { 0, };
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
+ __be32 target = nla_get_u32(attr);
+ targets[i++] = target;
+ }
+
+ err = bond_option_arp_ip_targets_set(bond, targets, i);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_VALIDATE]) {
+ int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
+
+ if (arp_validate && miimon) {
+ pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ err = bond_option_arp_validate_set(bond, arp_validate);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
+ int arp_all_targets =
+ nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
+
+ err = bond_option_arp_all_targets_set(bond, arp_all_targets);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -83,7 +172,17 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
static size_t bond_get_size(const struct net_device *bond_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
- nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
+ /* IFLA_BOND_ARP_IP_TARGET */
+ nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
+ 0;
}
static int bond_fill_info(struct sk_buff *skb,
@@ -91,11 +190,57 @@ static int bond_fill_info(struct sk_buff *skb,
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = bond_option_active_slave_get(bond);
+ struct nlattr *targets;
+ int i, targets_added;
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ goto nla_put_failure;
+
+ if (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
+ bond->params.updelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
+ bond->params.downdelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
+ goto nla_put_failure;
+
+ targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
+ if (bond->params.arp_targets[i]) {
+ nla_put_u32(skb, i, bond->params.arp_targets[i]);
+ targets_added = 1;
+ }
+ }
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
- (slave_dev &&
- nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets))
goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ea6f640782b7..600779e5904f 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_options.c - bonding options
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,7 +107,6 @@ int bond_option_active_slave_set(struct bonding *bond,
}
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
/* check to see if we are clearing active */
@@ -141,7 +141,329 @@ int bond_option_active_slave_set(struct bonding *bond,
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
unblock_netpoll_tx();
return ret;
}
+
+int bond_option_miimon_set(struct bonding *bond, int miimon)
+{
+ if (miimon < 0) {
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, miimon, 0, INT_MAX);
+ return -EINVAL;
+ }
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, miimon);
+ bond->params.miimon = miimon;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (miimon && bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate)
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!miimon) {
+ cancel_delayed_work_sync(&bond->mii_work);
+ } else {
+ cancel_delayed_work_sync(&bond->arp_work);
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+ return 0;
+}
+
+int bond_option_updelay_set(struct bonding *bond, int updelay)
+{
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (updelay < 0) {
+ pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, updelay, 0, INT_MAX);
+ return -EINVAL;
+ } else {
+ if ((updelay % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ bond->dev->name, updelay,
+ bond->params.miimon,
+ (updelay / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.updelay = updelay / bond->params.miimon;
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ }
+
+ return 0;
+}
+
+int bond_option_downdelay_set(struct bonding *bond, int downdelay)
+{
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (downdelay < 0) {
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, downdelay, 0, INT_MAX);
+ return -EINVAL;
+ } else {
+ if ((downdelay % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+ bond->dev->name, downdelay,
+ bond->params.miimon,
+ (downdelay / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.downdelay = downdelay / bond->params.miimon;
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ }
+
+ return 0;
+}
+
+int bond_option_use_carrier_set(struct bonding *bond, int use_carrier)
+{
+ if ((use_carrier == 0) || (use_carrier == 1)) {
+ bond->params.use_carrier = use_carrier;
+ pr_info("%s: Setting use_carrier to %d.\n",
+ bond->dev->name, use_carrier);
+ } else {
+ pr_info("%s: Ignoring invalid use_carrier value %d.\n",
+ bond->dev->name, use_carrier);
+ }
+
+ return 0;
+}
+
+int bond_option_arp_interval_set(struct bonding *bond, int arp_interval)
+{
+ if (arp_interval < 0) {
+ pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ bond->dev->name, arp_interval, INT_MAX);
+ return -EINVAL;
+ }
+ if (BOND_NO_USES_ARP(bond->params.mode)) {
+ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ return -EINVAL;
+ }
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, arp_interval);
+ bond->params.arp_interval = arp_interval;
+ if (arp_interval) {
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0])
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the ARP timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!arp_interval) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
+ cancel_delayed_work_sync(&bond->arp_work);
+ } else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
+ __be32 target,
+ unsigned long last_rx)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = target;
+ }
+}
+
+static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ int ind;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip(targets, target) != -1) { /* dup */
+ pr_err("%s: ARP target %pI4 is already present\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, 0); /* first free slot */
+ if (ind == -1) {
+ pr_err("%s: ARP target table is full!\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+
+ _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
+
+ return 0;
+}
+
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ int ret;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ ret = _bond_option_arp_ip_target_add(bond, target);
+ write_unlock_bh(&bond->lock);
+
+ return ret;
+}
+
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+ unsigned long *targets_rx;
+ int ind, i;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, target);
+ if (ind == -1) {
+ pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (ind == 0 && !targets[1] && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+ &target);
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, iter) {
+ targets_rx = slave->target_last_arp_rx;
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets_rx[i] = targets_rx[i+1];
+ targets_rx[i] = 0;
+ }
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets[i] = targets[i+1];
+ targets[i] = 0;
+
+ write_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 *targets,
+ int count)
+{
+ int i, ret = 0;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ /* clear table */
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ _bond_options_arp_ip_target_set(bond, i, 0, 0);
+
+ if (count == 0 && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ for (i = 0; i < count; i++) {
+ ret = _bond_option_arp_ip_target_add(bond, targets[i]);
+ if (ret)
+ break;
+ }
+
+ write_unlock_bh(&bond->lock);
+ return ret;
+}
+
+int bond_option_arp_validate_set(struct bonding *bond, int arp_validate)
+{
+ if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ pr_err("%s: arp_validate only supported in active-backup mode.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+ pr_info("%s: setting arp_validate to %s (%d).\n",
+ bond->dev->name, arp_validate_tbl[arp_validate].modename,
+ arp_validate);
+
+ if (bond->dev->flags & IFF_UP) {
+ if (!arp_validate)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
+ bond->params.arp_validate = arp_validate;
+
+ return 0;
+}
+
+int bond_option_arp_all_targets_set(struct bonding *bond, int arp_all_targets)
+{
+ pr_info("%s: setting arp_all_targets to %s (%d).\n",
+ bond->dev->name, arp_all_targets_tbl[arp_all_targets].modename,
+ arp_all_targets);
+
+ bond->params.arp_all_targets = arp_all_targets;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index e46467683e82..6368d299d5a6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -358,35 +358,21 @@ static ssize_t bonding_store_arp_validate(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
- if (!rtnl_trylock())
- return restart_syscall();
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("%s: arp_validate only supported in active-backup mode.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- pr_info("%s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_arp_validate_set(bond, new_value);
+ if (!ret)
+ ret = count;
- if (bond->dev->flags & IFF_UP) {
- if (!new_value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
- bond->params.arp_validate = new_value;
-out:
rtnl_unlock();
return ret;
@@ -413,7 +399,7 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value;
+ int new_value, ret;
new_value = bond_parse_parm(buf, arp_all_targets_tbl);
if (new_value < 0) {
@@ -421,13 +407,17 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
bond->dev->name, buf);
return -EINVAL;
}
- pr_info("%s: setting arp_all_targets to %s (%d).\n",
- bond->dev->name, arp_all_targets_tbl[new_value].modename,
- new_value);
- bond->params.arp_all_targets = new_value;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_arp_all_targets_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
@@ -506,60 +496,21 @@ static ssize_t bonding_store_arp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int new_value, ret;
- if (!rtnl_trylock())
- return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no arp_interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
- bond->dev->name, new_value, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (BOND_NO_USES_ARP(bond->params.mode)) {
- pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
- bond->dev->name, bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.arp_interval = new_value;
- if (new_value) {
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the ARP timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- if (bond->params.arp_validate)
- bond->recv_probe = NULL;
- cancel_delayed_work_sync(&bond->arp_work);
- } else {
- /* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
+ bond->dev->name);
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_arp_interval_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -591,81 +542,29 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- struct slave *slave;
- __be32 newtarget, *targets;
- unsigned long *targets_rx;
- int ind, i, j, ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
+ __be32 target;
+ int ret = -EPERM;
- targets = bond->params.arp_targets;
- if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
- pr_err("%s: invalid ARP target %pI4 specified for addition\n",
- bond->dev->name, &newtarget);
- goto out;
+ if (!in4_pton(buf + 1, -1, (u8 *)&target, -1, NULL)) {
+ pr_err("%s: invalid ARP target %pI4 specified\n",
+ bond->dev->name, &target);
+ return -EPERM;
}
- /* look for adds */
- if (buf[0] == '+') {
- if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
- pr_err("%s: ARP target %pI4 is already present\n",
- bond->dev->name, &newtarget);
- goto out;
- }
- ind = bond_get_targets_ip(targets, 0); /* first free slot */
- if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
- goto out;
- }
-
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
- &newtarget);
- /* not to race with bond_arp_rcv */
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter)
- slave->target_last_arp_rx[ind] = jiffies;
- targets[ind] = newtarget;
- write_unlock_bh(&bond->lock);
- } else if (buf[0] == '-') {
- ind = bond_get_targets_ip(targets, newtarget);
- if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
- bond->dev->name);
-
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &newtarget);
+ if (!rtnl_trylock())
+ return restart_syscall();
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter) {
- targets_rx = slave->target_last_arp_rx;
- j = ind;
- for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
- targets_rx[j] = targets_rx[j+1];
- targets_rx[j] = 0;
- }
- for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
- targets[i] = targets[i+1];
- targets[i] = 0;
- write_unlock_bh(&bond->lock);
- } else {
+ if (buf[0] == '+')
+ ret = bond_option_arp_ip_target_add(bond, target);
+ else if (buf[0] == '-')
+ ret = bond_option_arp_ip_target_rem(bond, target);
+ else
pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
bond->dev->name);
- ret = -EPERM;
- goto out;
- }
- ret = count;
-out:
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -689,44 +588,21 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no down delay value specified.\n", bond->dev->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (new_value < 0) {
- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.downdelay = new_value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_downdelay_set(bond, new_value);
+ if (!ret)
+ ret = count;
-out:
rtnl_unlock();
return ret;
}
@@ -747,44 +623,22 @@ static ssize_t bonding_store_updelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no up delay value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.updelay = new_value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ bond->dev->name);
+ return -EINVAL;
}
-out:
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_updelay_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -970,55 +824,22 @@ static ssize_t bonding_store_miimon(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no miimon value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (new_value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate)
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- cancel_delayed_work_sync(&bond->mii_work);
- } else {
- cancel_delayed_work_sync(&bond->arp_work);
- queue_delayed_work(bond->wq, &bond->mii_work, 0);
- }
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_miimon_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
rtnl_unlock();
return ret;
}
@@ -1057,7 +878,6 @@ static ssize_t bonding_store_primary(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode)) {
@@ -1097,7 +917,6 @@ static ssize_t bonding_store_primary(struct device *d,
bond->dev->name, ifname, bond->dev->name);
out:
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
unblock_netpoll_tx();
rtnl_unlock();
@@ -1145,11 +964,9 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
new_value);
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
unblock_netpoll_tx();
out:
rtnl_unlock();
@@ -1175,25 +992,23 @@ static ssize_t bonding_store_carrier(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
-
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no use_carrier value specified.\n",
bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.use_carrier = new_value;
- pr_info("%s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
- } else {
- pr_info("%s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
+ return -EINVAL;
}
-out:
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_use_carrier_set(bond, new_value);
+ if (!ret)
+ ret = count;
+
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a74c92c83ead..8f0d6d0c383b 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -101,6 +101,10 @@
netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
NULL)
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
@@ -439,6 +443,17 @@ int bond_netlink_init(void);
void bond_netlink_fini(void);
int bond_option_mode_set(struct bonding *bond, int mode);
int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+int bond_option_miimon_set(struct bonding *bond, int miimon);
+int bond_option_updelay_set(struct bonding *bond, int updelay);
+int bond_option_downdelay_set(struct bonding *bond, int downdelay);
+int bond_option_use_carrier_set(struct bonding *bond, int use_carrier);
+int bond_option_arp_interval_set(struct bonding *bond, int arp_interval);
+int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 *targets,
+ int count);
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
+int bond_option_arp_validate_set(struct bonding *bond, int arp_validate);
+int bond_option_arp_all_targets_set(struct bonding *bond, int arp_all_targets);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 001d7cfc9129..40706c15cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -133,6 +133,7 @@ enum i40e_state_t {
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -199,6 +200,7 @@ struct i40e_pf {
u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
@@ -501,6 +503,7 @@ int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -530,6 +533,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 9c675b5f1466..9a59dda6b5ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1462,19 +1462,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "forcing EMPR\n");
- i40e_do_reset(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index dc6c41fade9e..98c1ef563bf0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -119,7 +119,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index fd3e379de6da..d6681f6bf291 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -193,32 +193,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_DA) {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_DA;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -260,12 +276,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -732,7 +750,6 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
netif_info(pf, hw, netdev, "register test\n");
*data = i40e_diag_reg_test(&pf->hw);
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
return *data;
}
@@ -780,20 +797,18 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
netif_info(pf, drv, netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
+
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -803,6 +818,12 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -816,7 +837,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
- clear_bit(__I40E_TESTING, &pf->state);
netif_info(pf, drv, netdev, "testing finished\n");
}
@@ -1452,6 +1472,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FDIR_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 1)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1476,6 +1584,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..bcedf3fb1143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -47,10 +47,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a6291e23fe5f..fba0aada062a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -48,7 +48,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
@@ -3966,6 +3969,15 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, pf->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, pf->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
@@ -3982,6 +3994,7 @@ static int i40e_open(struct net_device *netdev)
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4119,6 +4132,19 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
+/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
* @e: event info posted on ARQ
@@ -4363,6 +4389,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4385,7 +4412,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4393,6 +4420,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4757,8 +4787,9 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
/**
* i40e_reset_and_rebuild - reset and rebuid using a saved config
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
**/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf)
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
@@ -4806,7 +4837,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf)
}
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4897,7 +4928,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
ret = i40e_prep_for_reset(pf);
if (!ret)
- i40e_reset_and_rebuild(pf);
+ i40e_reset_and_rebuild(pf, false);
}
/**
@@ -5078,11 +5109,12 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
/**
* i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
* @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
* On success: returns 0
**/
-static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi)
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
{
int size;
int ret = 0;
@@ -5094,12 +5126,14 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi)
return -ENOMEM;
vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
- /* allocate memory for q_vector pointers */
- size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(size, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
}
return ret;
@@ -5169,7 +5203,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- ret = i40e_vsi_alloc_arrays(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, true);
if (ret)
goto err_rings;
@@ -5191,15 +5225,18 @@ unlock_pf:
/**
* i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
* @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
*
* On error: returns error code (negative)
* On success: returns 0
**/
-static void i40e_vsi_free_arrays(struct i40e_vsi *vsi)
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
{
/* free the ring and vector containers */
- kfree(vsi->q_vectors);
- vsi->q_vectors = NULL;
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
kfree(vsi->tx_rings);
vsi->tx_rings = NULL;
vsi->rx_rings = NULL;
@@ -5241,7 +5278,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- i40e_vsi_free_arrays(vsi);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5391,15 +5428,18 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
@@ -5680,6 +5720,42 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ if (pf->queues_left < (queue_count - pf->rss_size)) {
+ dev_info(&pf->pdev->dev,
+ "Not enough queues to do RSS on %d queues: remaining queues %d\n",
+ queue_count, pf->queues_left);
+ return pf->rss_size;
+ }
+ i40e_prep_for_reset(pf);
+
+ pf->num_lan_qps += (queue_count - pf->rss_size);
+ pf->queues_left -= (queue_count - pf->rss_size);
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5718,8 +5794,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
} else {
pf->rss_size = 1;
}
@@ -5861,7 +5936,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
int etherdev_size;
etherdev_size = sizeof(struct i40e_netdev_priv);
- netdev = alloc_etherdev_mq(etherdev_size, vsi->num_queue_pairs);
+ netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
if (!netdev)
return -ENOMEM;
@@ -6254,6 +6329,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6896,11 +7034,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6920,7 +7060,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_fdir_setup(pf);
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6931,8 +7071,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
@@ -6971,20 +7113,65 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
+
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
+
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
+
+ goto fc_complete;
+
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
return ret;
}
@@ -6998,7 +7185,7 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
int num_tc0;
num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+ num_tc0 = min_t(int, num_tc0, num_online_cpus());
num_tc0 = rounddown_pow_of_two(num_tc0);
return num_tc0;
@@ -7049,7 +7236,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->rss_size = i40e_set_rss_size(pf, queues_left);
queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
+ pf->num_lan_qps = pf->rss_size_max;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
!(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -7068,7 +7255,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
+ pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -7084,7 +7271,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size;
+ pf->num_lan_qps = pf->rss_size_max;
} else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
@@ -7104,7 +7291,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
return;
}
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
+ pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
} else {
dev_info(&pf->pdev->dev,
@@ -7126,6 +7313,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7278,6 +7466,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
@@ -7355,7 +7546,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7558,7 +7749,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7621,9 +7816,95 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
+}
+
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, false); /* No WoL support yet */
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ pci_wake_from_d3(pdev, false); /* No WoL support yet */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
}
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7635,6 +7916,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8ae644570263..72a6028d24e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -49,6 +49,10 @@
#define I40E_VF_DEVICE_ID 0x154C
#define I40E_VF_HV_DEVICE_ID 0x1571
+#define i40e_is_40G_device(d) ((d) == I40E_QSFP_A_DEVICE_ID || \
+ (d) == I40E_QSFP_B_DEVICE_ID || \
+ (d) == I40E_QSFP_C_DEVICE_ID)
+
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0000
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index e7bdd47bafcf..68e1f8eff9b2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -521,6 +521,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
0, true, false);
}
+
if (!f) {
dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
ret = -ENOMEM;
@@ -763,6 +764,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -1777,30 +1779,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1973,9 +1951,6 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b3d9327f78b9..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2080,6 +2080,7 @@ static void port_start(struct mv643xx_eth_private *mp)
mv643xx_eth_get_settings(mp->dev, &cmd);
phy_init_hw(mp->phy);
mv643xx_eth_set_settings(mp->dev, &cmd);
+ phy_start(mp->phy);
}
/*
@@ -2275,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
-
+ if (mp->phy)
+ phy_stop(mp->phy);
free_irq(dev->irq, dev);
port_reset(mp);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..dfff8e7a55e2 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "workarounds.h"
+#include "selftest.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ efx_ptp_probe(efx, NULL);
+
return 0;
fail3:
@@ -277,11 +280,17 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
return rc;
}
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ efx_ptp_remove(efx);
+
efx_mcdi_mon_remove(efx);
- /* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
if (nic_data->wc_membase)
@@ -901,6 +911,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
stats[EF10_STAT_rx_good_bytes] =
stats[EF10_STAT_rx_bytes] -
stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1242,7 +1253,6 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
fail:
WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
}
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1266,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1265,7 +1275,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1461,8 +1472,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -1480,14 +1492,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
+ WARN_ON(rc);
return;
-
-fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1508,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1510,7 +1517,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1655,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
/* IRQ return is ignored */
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1669,7 +1669,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1678,7 +1678,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1765,6 +1766,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
/* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) {
WARN_ON(rx_bytes != 0);
@@ -1772,10 +1775,13 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
return 0;
}
- if (unlikely(rx_queue->scatter_n != 0)) {
- /* Scattered packet completions cannot be
- * merged, so something has gone wrong.
- */
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
efx_ef10_handle_rx_bad_lbits(
rx_queue, next_ptr_lbits,
(rx_queue->removed_count +
@@ -1901,7 +1907,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the
* queue. Refill it here
*/
- efx_fast_push_rx_descriptors(&channel->rx_queue);
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break;
default:
netif_err(efx, hw, efx->net_dev,
@@ -2257,6 +2263,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
outbuf, sizeof(outbuf), NULL);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
return rc;
}
@@ -3195,6 +3203,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
#ifdef CONFIG_SFC_MTD
struct efx_ef10_nvram_type_info {
@@ -3213,6 +3302,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
@@ -3320,6 +3410,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
@@ -3336,6 +3539,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3549,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
- /* TODO: test_chip */
+ .test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3397,11 +3601,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3617,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 12f60e782868..3b5dca97ec0e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -585,7 +592,7 @@ static void efx_start_datapath(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
- NET_IP_ALIGN + efx->rx_dma_len);
+ efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
@@ -639,12 +646,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
}
+ efx_ptp_start_datapath(efx);
+
if (netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
}
@@ -659,6 +670,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
+ efx_ptp_stop_datapath(efx);
+
/* Stop RX refill */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -1047,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1066,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1095,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1671,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1698,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -2196,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2209,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2385,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2397,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2435,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2528,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2542,8 +2658,12 @@ static int efx_init_struct(struct efx_nic *efx,
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2584,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2628,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2654,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2669,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2696,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2792,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..a653786fbbe7 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MC_BIST,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..fb8993806167 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.int", NULL);
}
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..76699f4e6e04 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2247,6 +2247,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
struct falcon_board *board;
int rc;
+ efx->primary = efx; /* only one usable function per controller */
+
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
@@ -2593,6 +2595,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
void falcon_stop_nic_stats(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2682,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2765,6 +2776,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..984e85ee76f6 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3e3784..0d5d7b5325e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
unsigned int cmd;
size_t inlen;
size_t outlen;
+ bool quiet;
efx_mcdi_async_completer *complete;
unsigned long cookie;
/* followed by request/response buffer */
@@ -50,6 +51,7 @@ struct efx_mcdi_async_param {
static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
@@ -100,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
return 0;
}
@@ -190,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
@@ -237,6 +245,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
}
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ rmb();
+ if (!efx->type->mcdi_poll_response(efx))
+ return false;
+
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return true;
+}
+
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +295,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
- rmb();
- if (efx->type->mcdi_poll_response(efx))
+ if (efx_mcdi_poll_once(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- spin_lock_bh(&mcdi->iface_lock);
- efx_mcdi_read_response_header(efx);
- spin_unlock_bh(&mcdi->iface_lock);
-
/* Return rc=0 like wait_event_timeout() */
return 0;
}
@@ -391,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
struct efx_nic *efx = mcdi->efx;
struct efx_mcdi_async_param *async;
- size_t hdr_len, data_len;
+ size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
int rc;
if (cmpxchg(&mcdi->state,
@@ -433,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
outbuf = (efx_dword_t *)(async + 1);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
async->complete(efx, async->cookie, rc, outbuf, data_len);
kfree(async);
@@ -508,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- int rc;
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc)
- return rc;
- return efx_mcdi_rpc_finish(efx, cmd, inlen,
- outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -532,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
}
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context. It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen, size_t outlen,
- efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
struct efx_mcdi_async_param *async;
@@ -570,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
GFP_ATOMIC);
if (!async)
@@ -578,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
async->cmd = cmd;
async->inlen = inlen;
async->outlen = outlen;
+ async->quiet = quiet;
async->complete = complete;
async->cookie = cookie;
memcpy(async + 1, inbuf, inlen);
@@ -606,71 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
return rc;
}
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
-
- if (mcdi->mode == MCDI_MODE_POLL)
- rc = efx_mcdi_poll(efx);
- else
- rc = efx_mcdi_await_completion(efx);
-
- if (rc != 0) {
- /* Close the race with efx_mcdi_ev_cpl() executing just too late
- * and completing a request we've just cancelled, by ensuring
- * that the seqno check therein fails.
- */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- ++mcdi->credits;
- spin_unlock_bh(&mcdi->iface_lock);
-
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
- } else {
- size_t hdr_len, data_len;
-
- /* At the very least we need a memory barrier here to ensure
- * we pick up changes from efx_mcdi_ev_cpl(). Protect against
- * a spurious efx_mcdi_ev_cpl() running concurrently by
- * acquiring the iface_lock. */
- spin_lock_bh(&mcdi->iface_lock);
- rc = mcdi->resprc;
- hdr_len = mcdi->resp_hdr_len;
- data_len = mcdi->resp_data_len;
- spin_unlock_bh(&mcdi->iface_lock);
-
- BUG_ON(rc > 0);
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- if (rc == 0) {
- efx->type->mcdi_read_response(efx, outbuf, hdr_len,
- min(outlen, data_len));
- if (outlen_actual != NULL)
- *outlen_actual = data_len;
- } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
- ; /* Don't reset if MC_CMD_REBOOT returns EIO */
- else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
- } else
- netif_dbg(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
- if (rc == -EIO || rc == -EINTR) {
- msleep(MCDI_STATUS_SLEEP_MS);
- efx_mcdi_poll_reboot(efx);
- mcdi->new_epoch = true;
- }
- }
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
- efx_mcdi_release(mcdi);
- return rc;
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -815,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
spin_unlock(&mcdi->iface_lock);
}
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -848,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- netif_info(efx, hw, efx->net_dev,
- "MC Scheduler error address=0x%x\n", data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO);
break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
@@ -867,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
case MCDI_EVENT_CODE_TX_FLUSH:
case MCDI_EVENT_CODE_RX_FLUSH:
/* Two flush events will be sent: one to the same event
@@ -981,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
/* We currently assume we have control of the external link
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
- outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
- (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1078,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1201,7 +1366,7 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1216,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
retry = 2;
do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
- if (rc)
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
return rc;
+ }
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EIO;
@@ -1300,18 +1469,11 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
}
static int efx_mcdi_reset_port(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return rc;
+ return efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
}
static int efx_mcdi_reset_mc(struct efx_nic *efx)
@@ -1328,7 +1490,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1430,13 +1591,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1477,13 +1631,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1513,13 +1660,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1539,14 +1679,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1566,13 +1702,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1590,13 +1719,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1611,13 +1733,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 15816cacb548..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition {
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
+ u32 fn_flags;
};
#ifdef CONFIG_SFC_MCDI_MON
@@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
int efx_mcdi_poll_reboot(struct efx_nic *efx);
void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
*/
#define MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d72ad4fc3617..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
};
static const struct {
@@ -33,13 +42,13 @@ static const struct {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
@@ -47,36 +56,42 @@ static const struct {
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
- SENSOR(IN_VREF, "ref. voltage", IN, -1),
- SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
- SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
- SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
- SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
- SENSOR(FAN_0, NULL, COOL, -1),
- SENSOR(FAN_1, NULL, COOL, -1),
- SENSOR(FAN_2, NULL, COOL, -1),
- SENSOR(FAN_3, NULL, COOL, -1),
- SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
- SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
- SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
- SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
- SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
- SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
- SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
- SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
- "Controller int. temp. raw (at ADC)", IN, -1),
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
- "Controller int. temp. (via ADC)", TEMP, -1),
+ "Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
- const char *name = NULL, *state_txt;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
- if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
if (!name)
name = "No sensor name available";
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- type, name, state_txt, value);
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_MCDI_MON
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
#define MC_CMD_ERR_MAC_EXIST 0x1009
/* Slave core not present */
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
#define MC_CMD_ERR_CODE_OFST 0
@@ -390,6 +392,8 @@
* AOE_ERR_DATA)
*/
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
/* enum: the MC has detected an uncorrectable error */
#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -481,15 +489,32 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
#define FCDI_EVENT_CODE_TIMED_READ 0x5
/* enum: One or more PPS IN events */
#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define FCDI_EXTENDED_EVENT_LENMIN 16
-#define FCDI_EXTENDED_EVENT_LENMAX 248
-#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
/***********************************/
@@ -642,6 +683,10 @@
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Entering the main image via a copy of a single word from and to this
* address indicates that it should not attempt to start the datapath CPUs.
* This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x16
+#define MC_CMD_PTP_OP_MAX 0x1b
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1088,15 +1203,29 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
/* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
/* enum: Timestamp trigger GPIO not working */
#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
@@ -1198,6 +1341,62 @@
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
#define MC_CMD_MEDIA_SFP_PLUS 0x5
/* enum: 10GBaseT. */
#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
/* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
/* ID */
@@ -4226,7 +4452,7 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -6826,6 +7076,10 @@
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
@@ -6942,39 +7196,68 @@
/***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
/***********************************/
@@ -7026,6 +7309,15 @@
#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
/* Requested operation */
@@ -7135,6 +7512,37 @@
/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
efx->link_state.up = false;
- } else {
+ else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- }
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return;
- }
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return true;
- }
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
- int period = enable ? 1000 : 0;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, !!enable,
MAC_STATS_IN_CLEAR, clear,
- MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
MAC_STATS_IN_PERIODIC_CLEAR, 0,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
return rc;
}
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
}
int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717ac3e8..653b8782c956 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
+struct hwtstamp_config;
struct efx_self_tests;
@@ -368,6 +369,13 @@ enum efx_rx_alloc_method {
RX_ALLOC_METHOD_PAGE = 2,
};
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
+};
+
/**
* struct efx_channel - An Efx channel
*
@@ -407,6 +415,9 @@ enum efx_rx_alloc_method {
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
*/
struct efx_channel {
struct efx_nic *efx;
@@ -445,6 +456,10 @@ struct efx_channel {
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
};
/**
@@ -520,15 +535,6 @@ enum nic_state {
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
/* Forward declaration */
struct efx_nic;
@@ -651,6 +657,13 @@ struct vfdi_status;
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -683,6 +696,8 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -692,6 +707,8 @@ struct vfdi_status;
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@@ -761,6 +778,7 @@ struct vfdi_status;
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -775,6 +793,9 @@ struct efx_nic {
/* The following fields should be written very rarely */
char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
struct pci_dev *pci_dev;
unsigned int port_num;
const struct efx_nic_type *type;
@@ -816,6 +837,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
unsigned int rx_buffer_truesize;
@@ -825,6 +847,7 @@ struct efx_nic {
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
+ int rx_packet_ts_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -849,10 +872,14 @@ struct efx_nic {
struct work_struct mac_work;
bool port_enabled;
+ bool mc_bist_for_other_fn;
bool port_initialized;
struct net_device *net_dev;
struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
@@ -904,6 +931,8 @@ struct efx_nic {
struct efx_ptp_data *ptp_data;
+ char *vpd_sn;
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -956,6 +985,7 @@ struct efx_mtd_partition {
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
@@ -1034,6 +1064,12 @@ struct efx_mtd_partition {
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1043,6 +1079,7 @@ struct efx_mtd_partition {
* @max_dma_mask: Maximum possible DMA mask
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1052,6 +1089,7 @@ struct efx_mtd_partition {
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1074,6 +1112,7 @@ struct efx_nic_type {
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1152,6 +1191,9 @@ struct efx_nic_type {
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1162,6 +1204,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
@@ -1170,6 +1213,7 @@ struct efx_nic_type {
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
}
}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 33852e824f12..5d9e2dc121f7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -554,13 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -773,6 +789,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index afd4d3a50460..084e2d44790f 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
#define SYNCHRONISATION_GRANULARITY_NS 200
/* Minimum permitted length of a (corrected) synchronisation time */
-#define MIN_SYNCHRONISATION_NS 120
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
/* Maximum permitted length of a (corrected) synchronisation time */
#define MAX_SYNCHRONISATION_NS 1000
@@ -195,31 +195,35 @@ struct efx_ptp_event_rx {
/**
* struct efx_ptp_timeset - Synchronisation between host and MC
* @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
* @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
* host end time being seen
* @window: Difference of host_end and host_start
* @valid: Whether this timeset is valid
*/
struct efx_ptp_timeset {
u32 host_start;
- u32 seconds;
- u32 nanoseconds;
+ u32 major;
+ u32 minor;
u32 host_end;
- u32 waitns;
+ u32 wait;
u32 window; /* Derived: end - start, allowing for wrap */
};
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
* @rxq: Receive queue (awaiting timestamps)
* @txq: Transmit queue
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -230,46 +234,39 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
* @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
* @pps_work: pps work task for handling pps events
* @pps_workwq: pps work queue
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
- * @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
- * @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
- * @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- * Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
* @timeset: Last set of synchronisation statistics.
*/
struct efx_ptp_data {
+ struct efx_nic *efx;
struct efx_channel *channel;
+ bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
+ bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -280,14 +277,22 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
- unsigned last_sync_ns;
- unsigned base_sync_ns;
- bool base_sync_valid;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -307,19 +312,191 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
- efx->ptp_data->channel->channel);
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
/* Disable MCDI PTP support.
@@ -330,11 +507,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -402,11 +587,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
unsigned start_ns, end_ns;
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
- timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
- timeset->nanoseconds = MCDI_DWORD(data,
- PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
- timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -435,62 +619,68 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
MCDI_VAR_ARRAY_LEN(response_length,
PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
- unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
struct timespec delta;
+ ktime_t mc_time;
if (number_readings == 0)
return -EAGAIN;
- /* Read the set of results and increment stats for any results that
- * appera to be erroneous.
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform.
*/
for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- }
- /* Find the last good host-MC synchronization result. The MC times
- * when it finishes reading the host time so the corrected window time
- * should be fairly constant for a given platform.
- */
- total = 0;
- for (i = 0; i < number_readings; i++)
- if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
- unsigned win;
-
- win = ptp->timeset[i].window - ptp->timeset[i].waitns;
- if (win >= MIN_SYNCHRONISATION_NS &&
- win < MAX_SYNCHRONISATION_NS) {
- total += ptp->timeset[i].window;
- ngood++;
- last_good = i;
- }
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window >= SYNCHRONISATION_GRANULARITY_NS &&
+ corrected < MAX_SYNCHRONISATION_NS &&
+ corrected >= ptp->min_synchronisation_ns) {
+ ngood++;
+ last_good = i;
}
+ }
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns\n",
- ptp->base_sync_ns);
+ "PTP no suitable synchronisations\n");
return -EAGAIN;
}
- /* Average minimum this synchronisation */
- ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
- if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
- ptp->base_sync_valid = true;
- ptp->base_sync_ns = ptp->last_sync_ns;
- }
+ /* Convert the NIC time into kernel time. No correction is required-
+ * this time is the output of a firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
- delta.tv_nsec =
- ptp->timeset[last_good].nanoseconds +
+ delta = ktime_to_timespec(mc_time);
+ delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
@@ -600,9 +790,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
memset(&timestamps, 0, sizeof(timestamps));
- timestamps.hwtstamp = ktime_set(
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
skb_tstamp_tx(skb, &timestamps);
@@ -620,6 +811,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
struct list_head *cursor;
struct list_head *next;
+ if (ptp->rx_ts_inline)
+ return;
+
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
if (!list_empty(&ptp->evt_list)) {
@@ -635,6 +829,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -648,6 +847,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
struct efx_ptp_match *match;
enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
spin_lock_bh(&ptp->evt_lock);
evts_waiting = !list_empty(&ptp->evt_list);
spin_unlock_bh(&ptp->evt_lock);
@@ -676,6 +877,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -684,13 +890,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
*/
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- bool rc = false;
struct sk_buff *skb;
while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -701,12 +904,12 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (efx_ptp_match_rx(efx, skb) ==
PTP_PACKET_STATE_MATCHED) {
- rc = true;
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ if (net_ratelimit())
+ netif_warn(efx, rx_err, efx->net_dev,
+ "PTP packet - no timestamp seen\n");
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -714,8 +917,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
break;
}
}
-
- return rc;
}
/* Complete processing of a received packet */
@@ -726,13 +927,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
local_bh_enable();
}
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_filter_spec rxfilter;
int rc;
- ptp->reset_required = false;
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
@@ -765,40 +980,53 @@ static int efx_ptp_start(struct efx_nic *efx)
goto fail;
ptp->rxfilter_general = rc;
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
rc = efx_ptp_enable(efx);
if (rc != 0)
- goto fail2;
+ goto fail;
ptp->evt_frag_idx = 0;
ptp->current_adjfreq = 0;
- ptp->rxfilter_installed = true;
return 0;
-fail2:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
-
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
static int efx_ptp_stop(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- int rc = efx_ptp_disable(efx);
struct list_head *cursor;
struct list_head *next;
+ int rc;
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
- }
+ if (ptp == NULL)
+ return 0;
+
+ rc = efx_ptp_disable(efx);
+
+ efx_ptp_remove_multicast_filters(efx);
/* Make sure RX packets are really delivered */
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -809,16 +1037,24 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
+ ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
}
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+ if (efx->ptp_data && efx->ptp_data->enabled)
+ return efx_ptp_start(efx);
+ return 0;
+}
+
static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
container_of(work, struct efx_ptp_data, pps_work);
- struct efx_nic *efx = ptp->channel->efx;
+ struct efx_nic *efx = ptp->efx;
struct ptp_clock_event ptp_evt;
if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -829,13 +1065,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
ptp_clock_event(ptp->phc_clock, &ptp_evt);
}
-/* Process any pending transmissions and timestamp any received packets.
- */
static void efx_ptp_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp_data =
container_of(work, struct efx_ptp_data, work);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
struct sk_buff *skb;
struct sk_buff_head tempq;
@@ -848,42 +1082,50 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_drop_time_expired_events(efx);
__skb_queue_head_init(&tempq);
- if (efx_ptp_process_events(efx, &tempq) ||
- !skb_queue_empty(&ptp_data->txq)) {
+ efx_ptp_process_events(efx, &tempq);
- while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
- }
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
}
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime = efx_phc_gettime,
+ .settime = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
- struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp;
int rc = 0;
unsigned int pos;
- channel->irq_moderation = 0;
- channel->rx_queue.core_index = 0;
-
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
return -ENOMEM;
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
- ptp->channel = channel;
skb_queue_head_init(&ptp->rxq);
skb_queue_head_init(&ptp->txq);
ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -901,34 +1143,34 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+ ptp->evt_overflow = false;
- ptp->phc_clock_info.owner = THIS_MODULE;
- snprintf(ptp->phc_clock_info.name,
- sizeof(ptp->phc_clock_info.name),
- "%pm", efx->net_dev->perm_addr);
- ptp->phc_clock_info.max_adj = MAX_PPB;
- ptp->phc_clock_info.n_alarm = 0;
- ptp->phc_clock_info.n_ext_ts = 0;
- ptp->phc_clock_info.n_per_out = 0;
- ptp->phc_clock_info.pps = 1;
- ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
- ptp->phc_clock_info.adjtime = efx_phc_adjtime;
- ptp->phc_clock_info.gettime = efx_phc_gettime;
- ptp->phc_clock_info.settime = efx_phc_settime;
- ptp->phc_clock_info.enable = efx_phc_enable;
-
- ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
- &efx->pci_dev->dev);
- if (IS_ERR(ptp->phc_clock)) {
- rc = PTR_ERR(ptp->phc_clock);
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
goto fail3;
- }
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
ptp->nic_ts_enabled = false;
@@ -949,14 +1191,27 @@ fail1:
return rc;
}
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
if (!efx->ptp_data)
return;
- (void)efx_ptp_disable(channel->efx);
+ (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work);
@@ -964,15 +1219,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- ptp_clock_unregister(efx->ptp_data->phc_clock);
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
destroy_workqueue(efx->ptp_data->workwq);
- destroy_workqueue(efx->ptp_data->pps_workwq);
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
}
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
static void efx_ptp_get_channel_name(struct efx_channel *channel,
char *buf, size_t len)
{
@@ -989,7 +1251,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb->len >= PTP_MIN_LENGTH &&
skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
likely(skb->protocol == htons(ETH_P_IP)) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ skb_headlen(skb) >=
+ skb_transport_offset(skb) + sizeof(struct udphdr) &&
udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
}
@@ -1049,14 +1315,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
- struct skb_shared_hwtstamps *timestamps;
-
match->state = PTP_PACKET_STATE_UNMATCHED;
- /* Clear all timestamps held: filled in later */
- timestamps = skb_hwtstamps(skb);
- memset(timestamps, 0, sizeof(*timestamps));
-
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
@@ -1101,12 +1361,17 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
- unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
- int rc;
+ int rc = 0;
if (enable_wanted) {
/* Change of mode requires disable */
@@ -1123,7 +1388,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
* succeed.
*/
efx->ptp_data->mode = new_mode;
- rc = efx_ptp_start(efx);
+ if (netif_running(efx->net_dev))
+ rc = efx_ptp_start(efx);
if (rc == 0) {
rc = efx_ptp_synchronize(efx,
PTP_SYNC_ATTEMPTS * 2);
@@ -1145,8 +1411,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
- bool enable_wanted = false;
- unsigned int new_mode;
int rc;
if (init->flags)
@@ -1156,63 +1420,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
- new_mode = efx->ptp_data->mode;
- /* Determine whether any PTP HW operations are required */
- switch (init->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V1;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Although these three are accepted only IPV4 packets will be
- * timestamped
- */
- init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /* Non-IP + IPv6 timestamping not supported */
- return -ERANGE;
- break;
- default:
- return -ERANGE;
- }
-
- if (init->tx_type != HWTSTAMP_TX_OFF)
- enable_wanted = true;
-
- /* Old versions of the firmware do not support the improved
- * UUID filtering option (SF bug 33070). If the firmware does
- * not accept the enhanced mode, fall back to the standard PTP
- * v2 UUID filtering.
- */
- rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
- if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
- rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
- if (rc != 0)
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
return rc;
efx->ptp_data->config = *init;
-
return 0;
}
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
if (!ptp)
return;
@@ -1220,15 +1441,11 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
@@ -1279,6 +1496,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
struct efx_ptp_event_rx *evt = NULL;
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
if (ptp->evt_frag_idx != 3) {
ptp_event_failure(efx, 3);
return;
@@ -1297,15 +1517,21 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
MCDI_EVENT_SRC) << 8) |
(EFX_QWORD_FIELD(ptp->evt_frags[0],
MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = ktime_set(
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else {
- netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+ } else if (!ptp->evt_overflow) {
+ /* Log a warning message and set the event overflow flag.
+ * The message won't be logged again until the event queue
+ * becomes empty.
+ */
+ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+ ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
@@ -1369,12 +1595,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
}
}
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1398,24 +1711,26 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (rc != 0)
return rc;
- ptp_data->current_adjfreq = delta;
+ ptp_data->current_adjfreq = adjustment_ns;
return 0;
}
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
+ u32 nic_major, nic_minor;
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
- struct timespec delta_ts = ns_to_timespec(delta);
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -1425,10 +1740,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
+ ktime_t kt;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1438,8 +1754,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
if (rc != 0)
return rc;
- ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
- ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec(kt);
return 0;
}
@@ -1491,7 +1809,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.keep_eventq = false,
};
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
{
/* Check whether PTP is implemented on this NIC. The DISABLE
* operation will succeed if and only if it is implemented.
@@ -1500,3 +1818,20 @@ void efx_ptp_probe(struct efx_nic *efx)
efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
&efx_ptp_channel_type;
}
+
+void efx_ptp_start_datapath(struct efx_nic *efx)
+{
+ if (efx_ptp_restart(efx))
+ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
+}
+
+void efx_ptp_stop_datapath(struct efx_nic *efx)
+{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
+ efx_ptp_stop(efx);
+}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e686fc23..1fde9b8ac456 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
@@ -189,9 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
- rx_buf->page_offset = page_offset + NET_IP_ALIGN;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do {
- rc = efx_init_rx_buffers(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
@@ -475,14 +476,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
if (unlikely(skb == NULL))
return NULL;
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
- memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
@@ -619,6 +624,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ efx_rx_skb_attach_timestamp(channel, skb);
+
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_reset;
}
- if ((tests->registers < 0) && !rc_test)
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
+ int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..f65db356fe09 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
/**************************************************************************
*
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
* Device reset
*
**************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
- efx_ptp_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -458,6 +506,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +887,6 @@ fail:
/**************************************************************************
*
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
@@ -878,6 +915,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
@@ -939,6 +977,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +996,11 @@ const struct efx_nic_type siena_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
};
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index f83993590174..d918d8a42667 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -55,14 +55,14 @@ static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
-#define CHECK_NPP() { unsigned k = 10000 ;\
+#define CHECK_NPP() { unsigned int k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
-#define CHECK_CAM() { unsigned k = 10 ;\
+#define CHECK_CAM() { unsigned int k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
@@ -356,25 +356,25 @@ static void set_formac_addr(struct s_smc *smc)
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
- outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
- outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
- outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
- outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
- outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
- outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
- outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
- outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+ outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
+ outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
}
static void set_int(char *p, int l)
@@ -394,10 +394,10 @@ static void set_int(char *p, int l)
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
- unsigned off, int len)
+ unsigned int off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
-/* unsigned off; start address within buffer memory */
+/* unsigned int off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f6..4ee360d2dc62 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -92,33 +92,33 @@
union rx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned rx_length :16 ; /* frame length lower/upper byte */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
#else
- unsigned rx_msvalid:1 ; /* memory status valid */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
#endif
} r ;
long i ;
@@ -162,23 +162,23 @@ union rx_descr {
union tx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned tx_length:16 ; /* frame length lower/upper byte */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_more :1 ; /* more frame in chain */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_more :1 ; /* more frame in chain */
#else
- unsigned tx_more :1 ; /* more frame in chain */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_more :1 ; /* more frame in chain */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
#endif
} t ;
long i ;
@@ -202,13 +202,13 @@ union tx_descr {
union tx_pointer {
struct t {
#ifdef LITTLE_ENDIAN
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
#else
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
#endif
} t ;
long i ;
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12f..9edada85ed02 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -900,7 +900,7 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
rdf->version.v_pad2 = 0 ;
/* set P13 */
- if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header))
len = frame_len ;
else
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27a..cc27dea3414e 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -73,7 +73,7 @@ void smt_init_evc(struct s_smc *smc)
{
struct s_srf_evc *evc ;
const struct evc_init *init ;
- int i ;
+ unsigned int i ;
int index ;
int offset ;
@@ -84,7 +84,7 @@ void smt_init_evc(struct s_smc *smc)
evc = smc->evcs ;
init = evc_inits ;
- for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+ for (i = 0 ; i < MAX_INIT_EVC ; i++) {
for (index = 0 ; index < init->n ; index++) {
evc->evc_code = init->code ;
evc->evc_para = init->para ;
@@ -98,7 +98,7 @@ void smt_init_evc(struct s_smc *smc)
init++ ;
}
- if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+ if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
}
@@ -139,7 +139,7 @@ void smt_init_evc(struct s_smc *smc)
offset++ ;
}
#ifdef DEBUG
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (!evc->evc_cond_state) {
SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
@@ -160,10 +160,10 @@ void smt_init_evc(struct s_smc *smc)
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
{
- int i ;
+ unsigned int i ;
struct s_srf_evc *evc ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
return evc;
}
@@ -335,9 +335,9 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
static void clear_all_rep(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
evc->evc_rep_required = FALSE ;
if (SMT_IS_CONDITION(evc->evc_code))
*evc->evc_cond_state = FALSE ;
@@ -348,10 +348,10 @@ static void clear_all_rep(struct s_smc *smc)
static void clear_reported(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
smc->srf.any_report = FALSE ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (*evc->evc_cond_state == FALSE)
evc->evc_rep_required = FALSE ;
@@ -375,7 +375,7 @@ static void smt_send_srf(struct s_smc *smc)
struct s_srf_evc *evc ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SMbuf *mb ;
- int i ;
+ unsigned int i ;
static const struct fddi_addr SMT_SRF_DA = {
{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
@@ -405,7 +405,7 @@ static void smt_send_srf(struct s_smc *smc)
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_rep_required) {
smt_add_para(smc,&pcon,evc->evc_para,
(int)evc->evc_index,0) ;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e3c778ea9bf..bd37e45c89c0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -894,6 +894,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -907,6 +909,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -920,6 +924,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -933,6 +939,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = {.owner = THIS_MODULE,},
},
{
@@ -946,6 +954,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -961,6 +971,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.get_wol = &m88e1318_get_wol,
.set_wol = &m88e1318_set_wol,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -974,6 +986,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -987,6 +1001,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1000,6 +1016,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1013,6 +1031,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1026,6 +1046,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e3dd69100da8..dea609f86aee 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -739,7 +739,7 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0;
+ int needs_aneg = 0, do_suspend = 0;
int err = 0;
mutex_lock(&phydev->lock);
@@ -854,6 +854,7 @@ void phy_state_machine(struct work_struct *work)
phydev->link = 0;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
+ do_suspend = 1;
}
break;
case PHY_RESUMING:
@@ -912,6 +913,9 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
+ if (do_suspend)
+ phy_suspend(phydev);
+
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5a619f0dcf73..4eb5bba1db5e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -624,6 +624,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
phy_detach(phydev);
+ phy_resume(phydev);
+
return err;
}
@@ -669,6 +671,7 @@ void phy_detach(struct phy_device *phydev)
{
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_suspend(phydev);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
@@ -679,6 +682,30 @@ void phy_detach(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_detach);
+int phy_suspend(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+ struct ethtool_wolinfo wol;
+
+ /* If the device has WOL enabled, we cannot suspend the PHY */
+ wol.cmd = ETHTOOL_GWOL;
+ phy_ethtool_get_wol(phydev, &wol);
+ if (wol.wolopts)
+ return -EBUSY;
+
+ if (phydrv->suspend)
+ return phydrv->suspend(phydev);
+ return 0;
+}
+
+int phy_resume(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+
+ if (phydrv->resume)
+ return phydrv->resume(phydev);
+ return 0;
+}
/* Generic PHY support and helper functions */
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
new file mode 100644
index 000000000000..05cb3421ee7e
--- /dev/null
+++ b/include/asm-generic/hash.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+struct arch_hash_ops;
+static inline void setup_arch_fast_hash(struct arch_hash_ops *ops)
+{
+}
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index f09a0ae4d858..bd1754c7ecef 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,6 +15,7 @@
*/
#include <asm/types.h>
+#include <asm/hash.h>
#include <linux/compiler.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -78,4 +79,39 @@ static inline u32 hash32_ptr(const void *ptr)
#endif
return (u32)val;
}
+
+struct fast_hash_ops {
+ u32 (*hash)(const void *data, u32 len, u32 seed);
+ u32 (*hash2)(const u32 *data, u32 len, u32 seed);
+};
+
+/**
+ * arch_fast_hash - Caclulates a hash over a given buffer that can have
+ * arbitrary size. This function will eventually use an
+ * architecture-optimized hashing implementation if
+ * available, and trades off distribution for speed.
+ *
+ * @data: buffer to hash
+ * @len: length of buffer in bytes
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
+
+/**
+ * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
+ * size that is of a multiple of 32bit words. This
+ * function will eventually use an architecture-
+ * optimized hashing implementation if available,
+ * and trades off distribution for speed.
+ *
+ * @data: buffer to hash (must be 32bit padded)
+ * @len: number of 32bit words
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
+
#endif /* _LINUX_HASH_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5260d2eae2e6..2c74d20dad34 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2907,6 +2907,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
void *netdev_adjacent_get_private(struct list_head *adj_list);
+void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 90a666e0884b..73384ff3b5e5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -548,6 +548,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
int phy_init_hw(struct phy_device *phydev);
+int phy_suspend(struct phy_device *phydev);
+int phy_resume(struct phy_device *phydev);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e416d6ac9c70..e8b2ff5c2804 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1360,12 +1360,6 @@ struct sctp_association {
/* This is all information about our peer. */
struct {
- /* rwnd
- *
- * Peer Rwnd : Current calculated value of the peer's rwnd.
- */
- __u32 rwnd;
-
/* transport_addr_list
*
* Peer : A list of SCTP transport addresses that the
@@ -1383,6 +1377,12 @@ struct sctp_association {
*/
struct list_head transport_addr_list;
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
/* transport_count
*
* Peer : A count of the number of peer addresses
@@ -1465,6 +1465,20 @@ struct sctp_association {
*/
struct sctp_tsnmap tsn_map;
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __be16 addip_disabled_mask;
+
+ /* These are capabilities which our peer advertised. */
+ __u8 ecn_capable:1, /* Can peer do ECN? */
+ ipv4_address:1, /* Peer understands IPv4 addresses? */
+ ipv6_address:1, /* Peer understands IPv6 addresses? */
+ hostname_address:1, /* Peer understands DNS addresses? */
+ asconf_capable:1, /* Does peer support ADDIP? */
+ prsctp_capable:1, /* Can peer do PR-SCTP? */
+ auth_capable:1; /* Is peer doing SCTP-AUTH? */
+
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
@@ -1479,25 +1493,11 @@ struct sctp_association {
__u32 sack_cnt;
__u32 sack_generation;
- /* These are capabilities which our peer advertised. */
- __u8 ecn_capable:1, /* Can peer do ECN? */
- ipv4_address:1, /* Peer understands IPv4 addresses? */
- ipv6_address:1, /* Peer understands IPv6 addresses? */
- hostname_address:1, /* Peer understands DNS addresses? */
- asconf_capable:1, /* Does peer support ADDIP? */
- prsctp_capable:1, /* Can peer do PR-SCTP? */
- auth_capable:1; /* Is peer doing SCTP-AUTH? */
-
__u32 adaptation_ind; /* Adaptation Code point. */
- /* This mask is used to disable sending the ASCONF chunk
- * with specified parameter to peer.
- */
- __be16 addip_disabled_mask;
-
struct sctp_inithdr_host i;
- int cookie_len;
void *cookie;
+ int cookie_len;
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
* C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
@@ -1529,14 +1529,14 @@ struct sctp_association {
*/
sctp_state_t state;
- /* The cookie life I award for any cookie. */
- ktime_t cookie_life;
-
/* Overall : The overall association error count.
* Error Count : [Clear this any time I get something.]
*/
int overall_error_count;
+ /* The cookie life I award for any cookie. */
+ ktime_t cookie_life;
+
/* These are the association's initial, max, and min RTO values.
* These values will be initialized by system defaults, but can
* be modified via the SCTP_RTOINFO socket option.
@@ -1591,10 +1591,9 @@ struct sctp_association {
/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
+ __u32 sackfreq;
/* SACK delay timeout */
unsigned long sackdelay;
- __u32 sackfreq;
-
unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
@@ -1602,12 +1601,12 @@ struct sctp_association {
/* Transport to which SHUTDOWN chunk was last sent. */
struct sctp_transport *shutdown_last_sent_to;
- /* How many times have we resent a SHUTDOWN */
- int shutdown_retries;
-
/* Transport to which INIT chunk was last sent. */
struct sctp_transport *init_last_sent_to;
+ /* How many times have we resent a SHUTDOWN */
+ int shutdown_retries;
+
/* Next TSN : The next TSN number to be assigned to a new
* : DATA chunk. This is sent in the INIT or INIT
* : ACK chunk to the peer and incremented each
@@ -1818,8 +1817,8 @@ struct sctp_association {
* after reaching 4294967295.
*/
__u32 addip_serial;
- union sctp_addr *asconf_addr_del_pending;
int src_out_of_asoc_ok;
+ union sctp_addr *asconf_addr_del_pending;
struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 6db460121f84..a897b7e22541 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -331,6 +331,14 @@ enum {
IFLA_BOND_UNSPEC,
IFLA_BOND_MODE,
IFLA_BOND_ACTIVE_SLAVE,
+ IFLA_BOND_MIIMON,
+ IFLA_BOND_UPDELAY,
+ IFLA_BOND_DOWNDELAY,
+ IFLA_BOND_USE_CARRIER,
+ IFLA_BOND_ARP_INTERVAL,
+ IFLA_BOND_ARP_IP_TARGET,
+ IFLA_BOND_ARP_VALIDATE,
+ IFLA_BOND_ARP_ALL_TARGETS,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 64804a798b0c..bd969d77ce52 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -14,6 +14,7 @@ enum {
NETCONFA_FORWARDING,
NETCONFA_RP_FILTER,
NETCONFA_MC_FORWARDING,
+ NETCONFA_PROXY_ARP,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/lib/Makefile b/lib/Makefile
index a459c31e8c6b..d0f79c547d97 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o
+ percpu-refcount.o percpu_ida.o hash.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
diff --git a/lib/hash.c b/lib/hash.c
new file mode 100644
index 000000000000..b89f06a2d606
--- /dev/null
+++ b/lib/hash.c
@@ -0,0 +1,38 @@
+/* General purpose hashing library
+ *
+ * That's a start of a kernel hashing library, which can be extended
+ * with further algorithms in future. arch_fast_hash{2,}() will
+ * eventually resolve to an architecture optimized implementation.
+ *
+ * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
+ * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
+ * Copyright 2013 Thomas Graf <tgraf@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#include <linux/jhash.h>
+#include <linux/hash.h>
+
+static struct fast_hash_ops arch_hash_ops __read_mostly = {
+ .hash = jhash,
+ .hash2 = jhash2,
+};
+
+u32 arch_fast_hash(const void *data, u32 len, u32 seed)
+{
+ return arch_hash_ops.hash(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash);
+
+u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+{
+ return arch_hash_ops.hash2(data, len, seed);
+}
+EXPORT_SYMBOL_GPL(arch_fast_hash2);
+
+static int __init hashlib_init(void)
+{
+ setup_arch_fast_hash(&arch_hash_ops);
+ return 0;
+}
+early_initcall(hashlib_init);
diff --git a/net/core/dev.c b/net/core/dev.c
index c95d664b2b42..9d4369ece679 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4544,6 +4544,27 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
+ * netdev_lower_get_first_private_rcu - Get the first ->private from the
+ * lower neighbour list, RCU
+ * variant
+ * @dev: device
+ *
+ * Gets the first netdev_adjacent->private from the dev's lower neighbour
+ * list. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_first_private_rcu(struct net_device *dev)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_first_or_null_rcu(&dev->adj_list.lower,
+ struct netdev_adjacent, list);
+ if (lower)
+ return lower->private;
+ return NULL;
+}
+EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
+
+/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 84956f5f0135..de03fe7002d0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1696,6 +1696,8 @@ static int inet_netconf_msgsize_devconf(int type)
size += nla_total_size(4);
if (type == -1 || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4);
+ if (type == -1 || type == NETCONFA_PROXY_ARP)
+ size += nla_total_size(4);
return size;
}
@@ -1732,6 +1734,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
+ if ((type == -1 || type == NETCONFA_PROXY_ARP) &&
+ nla_put_s32(skb, NETCONFA_PROXY_ARP,
+ IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -1769,6 +1775,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
[NETCONFA_RP_FILTER] = { .len = sizeof(int) },
+ [NETCONFA_PROXY_ARP] = { .len = sizeof(int) },
};
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -1950,6 +1957,19 @@ static void inet_forward_change(struct net *net)
}
}
+static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
+{
+ if (cnf == net->ipv4.devconf_dflt)
+ return NETCONFA_IFINDEX_DEFAULT;
+ else if (cnf == net->ipv4.devconf_all)
+ return NETCONFA_IFINDEX_ALL;
+ else {
+ struct in_device *idev
+ = container_of(cnf, struct in_device, cnf);
+ return idev->dev->ifindex;
+ }
+}
+
static int devinet_conf_proc(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -1962,6 +1982,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
struct ipv4_devconf *cnf = ctl->extra1;
struct net *net = ctl->extra2;
int i = (int *)ctl->data - cnf->data;
+ int ifindex;
set_bit(i, cnf->state);
@@ -1971,23 +1992,19 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net);
+
if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
new_value != old_value) {
- int ifindex;
-
- if (cnf == net->ipv4.devconf_dflt)
- ifindex = NETCONFA_IFINDEX_DEFAULT;
- else if (cnf == net->ipv4.devconf_all)
- ifindex = NETCONFA_IFINDEX_ALL;
- else {
- struct in_device *idev =
- container_of(cnf, struct in_device,
- cnf);
- ifindex = idev->dev->ifindex;
- }
+ ifindex = devinet_conf_ifindex(net, cnf);
inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
ifindex, cnf);
}
+ if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
+ new_value != old_value) {
+ ifindex = devinet_conf_ifindex(net, cnf);
+ inet_netconf_notify_devconf(net, NETCONFA_PROXY_ARP,
+ ifindex, cnf);
+ }
}
return ret;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 7540a0ed75ae..6fb4162fa785 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -160,8 +160,8 @@ out:
static int ipv6_exthdrs_len(struct ipv6hdr *iph,
const struct net_offload **opps)
{
- struct ipv6_opt_hdr *opth = NULL;
- int len = 0, proto, optlen;
+ struct ipv6_opt_hdr *opth = (void *)iph;
+ int len = 0, proto, optlen = sizeof(*iph);
proto = iph->nexthdr;
for (;;) {
@@ -172,10 +172,7 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
- if (opth == NULL)
- opth = (void *)(iph+1);
- else
- opth = (void *)opth + optlen;
+ opth = (void *)opth + optlen;
optlen = ipv6_optlen(opth);
len += optlen;
proto = opth->nexthdr;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index e42542706087..0e720c316070 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -25,7 +25,7 @@
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
-#include <linux/jhash.h>
+#include <linux/hash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
@@ -362,7 +362,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start,
/* Make sure number of hash bytes are multiple of u32. */
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
- return jhash2(hash_key, hash_u32s, 0);
+ return arch_fast_hash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9d70f1349926..cc803c63059a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -310,7 +310,7 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
static u16 packet_pick_tx_queue(struct net_device *dev)
{
- return (u16) smp_processor_id() % dev->real_num_tx_queues;
+ return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
}
/* register_prot_hook must be invoked with the po->bind_lock held,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 547b4a88ae2a..c31190e29b90 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -273,8 +273,11 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
void qdisc_list_add(struct Qdisc *q)
{
+ struct Qdisc *root = qdisc_dev(q)->qdisc;
+
+ WARN_ON_ONCE(root == &noop_qdisc);
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
- list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
+ list_add_tail(&q->list, &root->list);
}
EXPORT_SYMBOL(qdisc_list_add);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6a91d7d48ade..32bb942d2faa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -718,8 +718,8 @@ static void attach_default_qdiscs(struct net_device *dev)
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {
- qdisc->ops->attach(qdisc);
dev->qdisc = qdisc;
+ qdisc->ops->attach(qdisc);
}
}
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 94895d4e86ab..1ff477b0450d 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -47,7 +47,7 @@
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 09dcd54b04e1..92a1533af4e0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -148,8 +148,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
*/
static struct sub_seq *tipc_subseq_alloc(u32 cnt)
{
- struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
- return sseq;
+ return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
}
/**
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c081a7632302..5fd4c8cec08e 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -832,17 +832,14 @@ exit:
*/
int __tipc_disconnect(struct tipc_port *tp_ptr)
{
- int res;
-
if (tp_ptr->connected) {
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
tipc_nodesub_unsubscribe(&tp_ptr->subscription);
- res = 0;
- } else {
- res = -ENOTCONN;
+ return 0;
}
- return res;
+
+ return -ENOTCONN;
}
/*
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b61851bb927..83f466e57fea 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -239,7 +239,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
int tipc_sock_create_local(int type, struct socket **res)
{
int rc;
- struct sock *sk;
rc = sock_create_lite(AF_TIPC, type, 0, res);
if (rc < 0) {
@@ -248,8 +247,6 @@ int tipc_sock_create_local(int type, struct socket **res)
}
tipc_sk_create(&init_net, *res, 0, 1);
- sk = (*res)->sk;
-
return 0;
}
@@ -754,16 +751,14 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
/* Handle special cases where there is no connection */
if (unlikely(sock->state != SS_CONNECTED)) {
- if (sock->state == SS_UNCONNECTED) {
+ res = -ENOTCONN;
+
+ if (sock->state == SS_UNCONNECTED)
res = send_packet(NULL, sock, m, total_len);
- goto exit;
- } else if (sock->state == SS_DISCONNECTING) {
+ else if (sock->state == SS_DISCONNECTING)
res = -EPIPE;
- goto exit;
- } else {
- res = -ENOTCONN;
- goto exit;
- }
+
+ goto exit;
}
if (unlikely(m->msg_name)) {
@@ -1311,14 +1306,12 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- unsigned int limit;
if (msg_connected(msg))
- limit = sysctl_tipc_rmem[2];
- else
- limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
- msg_importance(msg);
- return limit;
+ return sysctl_tipc_rmem[2];
+
+ return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
+ msg_importance(msg);
}
/**
@@ -1514,14 +1507,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
sock->state != SS_CONNECTING,
timeout ? (long)msecs_to_jiffies(timeout)
: MAX_SCHEDULE_TIMEOUT);
- lock_sock(sk);
if (res <= 0) {
if (res == 0)
res = -ETIMEDOUT;
- else
- ; /* leave "res" unchanged */
- goto exit;
+ return res;
}
+ lock_sock(sk);
}
if (unlikely(sock->state == SS_DISCONNECTING))