diff options
30 files changed, 2875 insertions, 441 deletions
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 09f6b3cc1f66..4daffb284931 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -233,6 +233,35 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) core_writel(priv, reg, CORE_EEE_EN_CTRL); } +static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 reg; + + reg = reg_readl(priv, REG_SPHY_CNTRL); + if (enable) { + reg |= PHY_RESET; + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); + reg_writel(priv, reg, REG_SPHY_CNTRL); + udelay(21); + reg = reg_readl(priv, REG_SPHY_CNTRL); + reg &= ~PHY_RESET; + } else { + reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; + reg_writel(priv, reg, REG_SPHY_CNTRL); + mdelay(1); + reg |= CK25_DIS; + } + reg_writel(priv, reg, REG_SPHY_CNTRL); + + /* Use PHY-driven LED signaling */ + if (!enable) { + reg = reg_readl(priv, REG_LED_CNTRL(0)); + reg |= SPDLNK_SRC_SEL; + reg_writel(priv, reg, REG_LED_CNTRL(0)); + } +} + static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { @@ -248,6 +277,24 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); + /* Re-enable the GPHY and re-apply workarounds */ + if (port == 0 && priv->hw_params.num_gphy == 1) { + bcm_sf2_gphy_enable_set(ds, true); + if (phy) { + /* if phy_stop() has been called before, phy + * will be in halted state, and phy_start() + * will call resume. + * + * the resume path does not configure back + * autoneg settings, and since we hard reset + * the phy manually here, we need to reset the + * state machine also. + */ + phy->state = PHY_READY; + phy_init_hw(phy); + } + } + /* Enable port 7 interrupts to get notified */ if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); @@ -281,6 +328,9 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); } + if (port == 0 && priv->hw_params.num_gphy == 1) + bcm_sf2_gphy_enable_set(ds, false); + if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else @@ -771,7 +821,6 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int port; - u32 reg; int ret; ret = bcm_sf2_sw_rst(priv); @@ -780,17 +829,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) return ret; } - /* Reinitialize the single GPHY */ - if (priv->hw_params.num_gphy == 1) { - reg = reg_readl(priv, REG_SPHY_CNTRL); - reg |= PHY_RESET; - reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS); - reg_writel(priv, reg, REG_SPHY_CNTRL); - udelay(21); - reg = reg_readl(priv, REG_SPHY_CNTRL); - reg &= ~PHY_RESET; - reg_writel(priv, reg, REG_SPHY_CNTRL); - } + if (priv->hw_params.num_gphy == 1) + bcm_sf2_gphy_enable_set(ds, true); for (port = 0; port < DSA_MAX_PORTS; port++) { if ((1 << port) & ds->phys_port_mask) diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 1bb49cb699ab..cabdfa5e217a 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -61,6 +61,10 @@ #define LPI_COUNT_SHIFT 9 #define LPI_COUNT_MASK 0x3F +#define REG_LED_CNTRL_BASE 0x90 +#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4) +#define SPDLNK_SRC_SEL (1 << 24) + /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ #define INTRL2_CPU_STATUS 0x00 #define INTRL2_CPU_SET 0x04 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b74a272243ae..5980ac6c48dd 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -489,16 +489,16 @@ struct rx_desc { #define RX_LEN_MASK 0x7fff __le32 opts2; -#define RD_UDP_CS (1 << 23) -#define RD_TCP_CS (1 << 22) -#define RD_IPV6_CS (1 << 20) -#define RD_IPV4_CS (1 << 19) +#define RD_UDP_CS BIT(23) +#define RD_TCP_CS BIT(22) +#define RD_IPV6_CS BIT(20) +#define RD_IPV4_CS BIT(19) __le32 opts3; -#define IPF (1 << 23) /* IP checksum fail */ -#define UDPF (1 << 22) /* UDP checksum fail */ -#define TCPF (1 << 21) /* TCP checksum fail */ -#define RX_VLAN_TAG (1 << 16) +#define IPF BIT(23) /* IP checksum fail */ +#define UDPF BIT(22) /* UDP checksum fail */ +#define TCPF BIT(21) /* TCP checksum fail */ +#define RX_VLAN_TAG BIT(16) __le32 opts4; __le32 opts5; @@ -507,24 +507,24 @@ struct rx_desc { struct tx_desc { __le32 opts1; -#define TX_FS (1 << 31) /* First segment of a packet */ -#define TX_LS (1 << 30) /* Final segment of a packet */ -#define GTSENDV4 (1 << 28) -#define GTSENDV6 (1 << 27) +#define TX_FS BIT(31) /* First segment of a packet */ +#define TX_LS BIT(30) /* Final segment of a packet */ +#define GTSENDV4 BIT(28) +#define GTSENDV6 BIT(27) #define GTTCPHO_SHIFT 18 #define GTTCPHO_MAX 0x7fU #define TX_LEN_MAX 0x3ffffU __le32 opts2; -#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ -#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ -#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ -#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ +#define UDP_CS BIT(31) /* Calculate UDP/IP checksum */ +#define TCP_CS BIT(30) /* Calculate TCP/IP checksum */ +#define IPV4_CS BIT(29) /* Calculate IPv4 checksum */ +#define IPV6_CS BIT(28) /* Calculate IPv6 checksum */ #define MSS_SHIFT 17 #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU -#define TX_VLAN_TAG (1 << 16) +#define TX_VLAN_TAG BIT(16) }; struct r8152; @@ -581,7 +581,6 @@ struct r8152 { u16 ocp_base; u8 *intr_buff; u8 version; - u8 speed; }; enum rtl_version { @@ -1157,12 +1156,12 @@ static void intr_callback(struct urb *urb) d = urb->transfer_buffer; if (INTR_LINK & __le16_to_cpu(d[0])) { - if (!(tp->speed & LINK_STATUS)) { + if (!netif_carrier_ok(tp->netdev)) { set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } else { - if (tp->speed & LINK_STATUS) { + if (netif_carrier_ok(tp->netdev)) { set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } @@ -1331,18 +1330,6 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) return agg; } -static inline __be16 get_protocol(struct sk_buff *skb) -{ - __be16 protocol; - - if (skb->protocol == htons(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - - return protocol; -} - /* r8152_csum_workaround() * The hw limites the value the transport offset. When the offset is out of the * range, calculate the checksum by sw. @@ -1448,7 +1435,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, goto unavailable; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts1 |= GTSENDV4; break; @@ -1479,7 +1466,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, goto unavailable; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts2 |= IPV4_CS; ip_protocol = ip_hdr(skb)->protocol; @@ -1643,7 +1630,7 @@ static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; struct list_head *cursor, *next, rx_queue; - int work_done = 0; + int ret = 0, work_done = 0; if (!skb_queue_empty(&tp->rx_queue)) { while (work_done < budget) { @@ -1734,7 +1721,18 @@ find_next_rx: } submit: - r8152_submit_rx(tp, agg, GFP_ATOMIC); + if (!ret) { + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); + } else { + urb->actual_length = 0; + list_add_tail(&agg->list, next); + } + } + + if (!list_empty(&rx_queue)) { + spin_lock_irqsave(&tp->rx_lock, flags); + list_splice_tail(&rx_queue, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); } out1: @@ -1883,7 +1881,7 @@ static void rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - if (tp->speed & LINK_STATUS) { + if (netif_carrier_ok(netdev)) { set_bit(RTL8152_SET_RX_MODE, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } @@ -2907,21 +2905,20 @@ static void set_carrier(struct r8152 *tp) speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { - if (!(tp->speed & LINK_STATUS)) { + if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); netif_carrier_on(netdev); rtl_start_rx(tp); } } else { - if (tp->speed & LINK_STATUS) { + if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); napi_disable(&tp->napi); tp->rtl_ops.disable(tp); napi_enable(&tp->napi); } } - tp->speed = speed; } static void rtl_work_func_t(struct work_struct *work) @@ -2953,7 +2950,7 @@ static void rtl_work_func_t(struct work_struct *work) /* don't schedule napi before linking */ if (test_bit(SCHEDULE_NAPI, &tp->flags) && - (tp->speed & LINK_STATUS)) { + netif_carrier_ok(tp->netdev)) { clear_bit(SCHEDULE_NAPI, &tp->flags); napi_schedule(&tp->napi); } @@ -2976,8 +2973,7 @@ static int rtl8152_open(struct net_device *netdev) if (res) goto out; - /* set speed to 0 to avoid autoresume try to submit rx */ - tp->speed = 0; + netif_carrier_off(netdev); res = usb_autopm_get_interface(tp->intf); if (res < 0) { @@ -2994,7 +2990,7 @@ static int rtl8152_open(struct net_device *netdev) cancel_delayed_work_sync(&tp->schedule); /* disable the tx/rx, if the workqueue has enabled them. */ - if (tp->speed & LINK_STATUS) + if (netif_carrier_ok(netdev)) tp->rtl_ops.disable(tp); } @@ -3003,7 +2999,6 @@ static int rtl8152_open(struct net_device *netdev) rtl8152_set_speed(tp, AUTONEG_ENABLE, tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, DUPLEX_FULL); - tp->speed = 0; netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@ -3039,7 +3034,7 @@ static int rtl8152_close(struct net_device *netdev) netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); - if (res < 0) { + if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { @@ -3245,10 +3240,10 @@ static void r8153_init(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL); ocp_data &= ~LPM_TIMER_MASK; - if (tp->udev->speed == USB_SPEED_SUPER) - ocp_data |= LPM_TIMER_500US; - else + if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER) ocp_data |= LPM_TIMER_500MS; + else + ocp_data |= LPM_TIMER_500US; ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2); @@ -3329,7 +3324,7 @@ static int rtl8152_resume(struct usb_interface *intf) rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); set_bit(WORK_ENABLE, &tp->flags); - if (tp->speed & LINK_STATUS) + if (netif_carrier_ok(tp->netdev)) rtl_start_rx(tp); } else { tp->rtl_ops.up(tp); @@ -3337,7 +3332,6 @@ static int rtl8152_resume(struct usb_interface *intf) tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, DUPLEX_FULL); - tp->speed = 0; netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@ -3902,8 +3896,7 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/st21nfca_se.c index 9b93d3904ab5..bd13cac9c66a 100644 --- a/drivers/nfc/st21nfca/st21nfca_se.c +++ b/drivers/nfc/st21nfca/st21nfca_se.c @@ -301,6 +301,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, u8 event, struct sk_buff *skb) { int r = 0; + struct device *dev = &hdev->ndev->dev; + struct nfc_evt_transaction *transaction; pr_debug("connectivity gate event: %x\n", event); @@ -308,6 +310,25 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, case ST21NFCA_EVT_CONNECTIVITY: break; case ST21NFCA_EVT_TRANSACTION: + if (skb->len < NFC_MIN_AID_LENGTH + 2 && + skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + return -EPROTO; + + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, + skb->len - 2, GFP_KERNEL); + + transaction->aid_len = skb->data[1]; + memcpy(transaction->aid, &skb->data[2], skb->data[1]); + + if (skb->data[transaction->aid_len + 2] != + NFC_EVT_TRANSACTION_PARAMS_TAG) + return -EPROTO; + + transaction->params_len = skb->data[transaction->aid_len + 3]; + memcpy(transaction->params, skb->data + + transaction->aid_len + 4, transaction->params_len); + + r = nfc_se_transaction(hdev->ndev, host, transaction); break; default: return 1; diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile index f4d835dd15f2..ce659a9e5a1a 100644 --- a/drivers/nfc/st21nfcb/Makefile +++ b/drivers/nfc/st21nfcb/Makefile @@ -2,7 +2,7 @@ # Makefile for ST21NFCB NCI based NFC driver # -st21nfcb_nci-objs = ndlc.o st21nfcb.o +st21nfcb_nci-objs = ndlc.o st21nfcb.o st21nfcb_se.o obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o st21nfcb_i2c-objs = i2c.o diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c index ea63d5877831..ca9871ab3fb3 100644 --- a/drivers/nfc/st21nfcb/st21nfcb.c +++ b/drivers/nfc/st21nfcb/st21nfcb.c @@ -22,6 +22,7 @@ #include <net/nfc/nci_core.h> #include "st21nfcb.h" +#include "st21nfcb_se.h" #define DRIVER_DESC "NCI NFC driver for ST21NFCB" @@ -78,6 +79,13 @@ static struct nci_ops st21nfcb_nci_ops = { .close = st21nfcb_nci_close, .send = st21nfcb_nci_send, .get_rfprotocol = st21nfcb_nci_get_rfprotocol, + .discover_se = st21nfcb_nci_discover_se, + .enable_se = st21nfcb_nci_enable_se, + .disable_se = st21nfcb_nci_disable_se, + .se_io = st21nfcb_nci_se_io, + .hci_load_session = st21nfcb_hci_load_session, + .hci_event_received = st21nfcb_hci_event_received, + .hci_cmd_received = st21nfcb_hci_cmd_received, }; int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom, @@ -114,9 +122,10 @@ int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom, if (r) { pr_err("Cannot register nfc device to nci core\n"); nci_free_device(ndlc->ndev); + return r; } - return r; + return st21nfcb_se_init(ndlc->ndev); } EXPORT_SYMBOL_GPL(st21nfcb_nci_probe); diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st21nfcb/st21nfcb.h index ea58a56ad794..5ef8a58c9839 100644 --- a/drivers/nfc/st21nfcb/st21nfcb.h +++ b/drivers/nfc/st21nfcb/st21nfcb.h @@ -19,6 +19,7 @@ #ifndef __LOCAL_ST21NFCB_H_ #define __LOCAL_ST21NFCB_H_ +#include "st21nfcb_se.h" #include "ndlc.h" /* Define private flags: */ @@ -27,6 +28,7 @@ struct st21nfcb_nci_info { struct llt_ndlc *ndlc; unsigned long flags; + struct st21nfcb_se_info se_info; }; void st21nfcb_nci_remove(struct nci_dev *ndev); diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st21nfcb/st21nfcb_se.c new file mode 100644 index 000000000000..7c82e9d87a65 --- /dev/null +++ b/drivers/nfc/st21nfcb/st21nfcb_se.c @@ -0,0 +1,707 @@ +/* + * NCI based Driver for STMicroelectronics NFC Chip + * + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/nfc.h> +#include <linux/delay.h> +#include <net/nfc/nci.h> +#include <net/nfc/nci_core.h> + +#include "st21nfcb.h" +#include "st21nfcb_se.h" + +struct st21nfcb_pipe_info { + u8 pipe_state; + u8 src_host_id; + u8 src_gate_id; + u8 dst_host_id; + u8 dst_gate_id; +} __packed; + +/* Hosts */ +#define ST21NFCB_HOST_CONTROLLER_ID 0x00 +#define ST21NFCB_TERMINAL_HOST_ID 0x01 +#define ST21NFCB_UICC_HOST_ID 0x02 +#define ST21NFCB_ESE_HOST_ID 0xc0 + +/* Gates */ +#define ST21NFCB_DEVICE_MGNT_GATE 0x01 +#define ST21NFCB_APDU_READER_GATE 0xf0 +#define ST21NFCB_CONNECTIVITY_GATE 0x41 + +/* Pipes */ +#define ST21NFCB_DEVICE_MGNT_PIPE 0x02 + +/* Connectivity pipe only */ +#define ST21NFCB_SE_COUNT_PIPE_UICC 0x01 +/* Connectivity + APDU Reader pipe */ +#define ST21NFCB_SE_COUNT_PIPE_EMBEDDED 0x02 + +#define ST21NFCB_SE_TO_HOT_PLUG 1000 /* msecs */ +#define ST21NFCB_SE_TO_PIPES 2000 + +#define ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80) + +#define NCI_HCI_APDU_PARAM_ATR 0x01 +#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01 +#define NCI_HCI_ADMIN_PARAM_WHITELIST 0x03 +#define NCI_HCI_ADMIN_PARAM_HOST_LIST 0x04 + +#define ST21NFCB_EVT_SE_HARD_RESET 0x20 +#define ST21NFCB_EVT_TRANSMIT_DATA 0x10 +#define ST21NFCB_EVT_WTX_REQUEST 0x11 +#define ST21NFCB_EVT_SE_SOFT_RESET 0x11 +#define ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER 0x21 +#define ST21NFCB_EVT_HOT_PLUG 0x03 + +#define ST21NFCB_SE_MODE_OFF 0x00 +#define ST21NFCB_SE_MODE_ON 0x01 + +#define ST21NFCB_EVT_CONNECTIVITY 0x10 +#define ST21NFCB_EVT_TRANSACTION 0x12 + +#define ST21NFCB_DM_GETINFO 0x13 +#define ST21NFCB_DM_GETINFO_PIPE_LIST 0x02 +#define ST21NFCB_DM_GETINFO_PIPE_INFO 0x01 +#define ST21NFCB_DM_PIPE_CREATED 0x02 +#define ST21NFCB_DM_PIPE_OPEN 0x04 +#define ST21NFCB_DM_RF_ACTIVE 0x80 +#define ST21NFCB_DM_DISCONNECT 0x30 + +#define ST21NFCB_DM_IS_PIPE_OPEN(p) \ + ((p & 0x0f) == (ST21NFCB_DM_PIPE_CREATED | ST21NFCB_DM_PIPE_OPEN)) + +#define ST21NFCB_ATR_DEFAULT_BWI 0x04 + +/* + * WT = 2^BWI/10[s], convert into msecs and add a secure + * room by increasing by 2 this timeout + */ +#define ST21NFCB_BWI_TO_TIMEOUT(x) ((1 << x) * 200) +#define ST21NFCB_ATR_GET_Y_FROM_TD(x) (x >> 4) + +/* If TA is present bit 0 is set */ +#define ST21NFCB_ATR_TA_PRESENT(x) (x & 0x01) +/* If TB is present bit 1 is set */ +#define ST21NFCB_ATR_TB_PRESENT(x) (x & 0x02) + +#define ST21NFCB_NUM_DEVICES 256 + +static DECLARE_BITMAP(dev_mask, ST21NFCB_NUM_DEVICES); + +/* Here are the mandatory pipe for st21nfcb */ +static struct nci_hci_gate st21nfcb_gates[] = { + {NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE, + ST21NFCB_HOST_CONTROLLER_ID}, + {NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE, + ST21NFCB_HOST_CONTROLLER_ID}, + {ST21NFCB_DEVICE_MGNT_GATE, ST21NFCB_DEVICE_MGNT_PIPE, + ST21NFCB_HOST_CONTROLLER_ID}, + + /* Secure element pipes are created by secure element host */ + {ST21NFCB_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE, + ST21NFCB_HOST_CONTROLLER_ID}, + {ST21NFCB_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE, + ST21NFCB_HOST_CONTROLLER_ID}, +}; + +static u8 st21nfcb_se_get_bwi(struct nci_dev *ndev) +{ + int i; + u8 td; + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */ + for (i = 1; i < ST21NFCB_ESE_MAX_LENGTH; i++) { + td = ST21NFCB_ATR_GET_Y_FROM_TD(info->se_info.atr[i]); + if (ST21NFCB_ATR_TA_PRESENT(td)) + i++; + if (ST21NFCB_ATR_TB_PRESENT(td)) { + i++; + return info->se_info.atr[i] >> 4; + } + } + return ST21NFCB_ATR_DEFAULT_BWI; +} + +static void st21nfcb_se_get_atr(struct nci_dev *ndev) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + int r; + struct sk_buff *skb; + + r = nci_hci_get_param(ndev, ST21NFCB_APDU_READER_GATE, + NCI_HCI_APDU_PARAM_ATR, &skb); + if (r < 0) + return; + + if (skb->len <= ST21NFCB_ESE_MAX_LENGTH) { + memcpy(info->se_info.atr, skb->data, skb->len); + + info->se_info.wt_timeout = + ST21NFCB_BWI_TO_TIMEOUT(st21nfcb_se_get_bwi(ndev)); + } + kfree_skb(skb); +} + +int st21nfcb_hci_load_session(struct nci_dev *ndev) +{ + int i, j, r; + struct sk_buff *skb_pipe_list, *skb_pipe_info; + struct st21nfcb_pipe_info *dm_pipe_info; + u8 pipe_list[] = { ST21NFCB_DM_GETINFO_PIPE_LIST, + ST21NFCB_TERMINAL_HOST_ID}; + u8 pipe_info[] = { ST21NFCB_DM_GETINFO_PIPE_INFO, + ST21NFCB_TERMINAL_HOST_ID, 0}; + + /* On ST21NFCB device pipes number are dynamics + * If pipes are already created, hci_dev_up will fail. + * Doing a clear all pipe is a bad idea because: + * - It does useless EEPROM cycling + * - It might cause issue for secure elements support + * (such as removing connectivity or APDU reader pipe) + * A better approach on ST21NFCB is to: + * - get a pipe list for each host. + * (eg: ST21NFCB_HOST_CONTROLLER_ID for now). + * (TODO Later on UICC HOST and eSE HOST) + * - get pipe information + * - match retrieved pipe list in st21nfcb_gates + * ST21NFCB_DEVICE_MGNT_GATE is a proprietary gate + * with ST21NFCB_DEVICE_MGNT_PIPE. + * Pipe can be closed and need to be open. + */ + r = nci_hci_connect_gate(ndev, ST21NFCB_HOST_CONTROLLER_ID, + ST21NFCB_DEVICE_MGNT_GATE, + ST21NFCB_DEVICE_MGNT_PIPE); + if (r < 0) + goto free_info; + + /* Get pipe list */ + r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE, + ST21NFCB_DM_GETINFO, pipe_list, sizeof(pipe_list), + &skb_pipe_list); + if (r < 0) + goto free_info; + + /* Complete the existing gate_pipe table */ + for (i = 0; i < skb_pipe_list->len; i++) { + pipe_info[2] = skb_pipe_list->data[i]; + r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE, + ST21NFCB_DM_GETINFO, pipe_info, + sizeof(pipe_info), &skb_pipe_info); + + if (r) + continue; + + /* + * Match pipe ID and gate ID + * Output format from ST21NFC_DM_GETINFO is: + * - pipe state (1byte) + * - source hid (1byte) + * - source gid (1byte) + * - destination hid (1byte) + * - destination gid (1byte) + */ + dm_pipe_info = (struct st21nfcb_pipe_info *)skb_pipe_info->data; + if (dm_pipe_info->dst_gate_id == ST21NFCB_APDU_READER_GATE && + dm_pipe_info->src_host_id != ST21NFCB_ESE_HOST_ID) { + pr_err("Unexpected apdu_reader pipe on host %x\n", + dm_pipe_info->src_host_id); + continue; + } + + for (j = 0; (j < ARRAY_SIZE(st21nfcb_gates)) && + (st21nfcb_gates[j].gate != dm_pipe_info->dst_gate_id); j++) + ; + + if (j < ARRAY_SIZE(st21nfcb_gates) && + st21nfcb_gates[j].gate == dm_pipe_info->dst_gate_id && + ST21NFCB_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) { + st21nfcb_gates[j].pipe = pipe_info[2]; + + ndev->hci_dev->gate2pipe[st21nfcb_gates[j].gate] = + st21nfcb_gates[j].pipe; + ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].gate = + st21nfcb_gates[j].gate; + ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].host = + dm_pipe_info->src_host_id; + } + } + + memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates, + sizeof(st21nfcb_gates)); + +free_info: + kfree_skb(skb_pipe_info); + kfree_skb(skb_pipe_list); + return r; +} +EXPORT_SYMBOL_GPL(st21nfcb_hci_load_session); + +static void st21nfcb_hci_admin_event_received(struct nci_dev *ndev, + u8 event, struct sk_buff *skb) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + switch (event) { + case ST21NFCB_EVT_HOT_PLUG: + if (info->se_info.se_active) { + if (!ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(skb)) { + del_timer_sync(&info->se_info.se_active_timer); + info->se_info.se_active = false; + complete(&info->se_info.req_completion); + } else { + mod_timer(&info->se_info.se_active_timer, + jiffies + + msecs_to_jiffies(ST21NFCB_SE_TO_PIPES)); + } + } + break; + } +} + +static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev, + u8 event, + struct sk_buff *skb) +{ + int r = 0; + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + pr_debug("apdu reader gate event: %x\n", event); + + switch (event) { + case ST21NFCB_EVT_TRANSMIT_DATA: + del_timer_sync(&info->se_info.bwi_timer); + info->se_info.bwi_active = false; + info->se_info.cb(info->se_info.cb_context, + skb->data, skb->len, 0); + break; + case ST21NFCB_EVT_WTX_REQUEST: + mod_timer(&info->se_info.bwi_timer, jiffies + + msecs_to_jiffies(info->se_info.wt_timeout)); + break; + } + + kfree_skb(skb); + return r; +} + +/* + * Returns: + * <= 0: driver handled the event, skb consumed + * 1: driver does not handle the event, please do standard processing + */ +static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev, + u8 host, u8 event, + struct sk_buff *skb) +{ + int r = 0; + struct device *dev = &ndev->nfc_dev->dev; + struct nfc_evt_transaction *transaction; + + pr_debug("connectivity gate event: %x\n", event); + + switch (event) { + case ST21NFCB_EVT_CONNECTIVITY: + + break; + case ST21NFCB_EVT_TRANSACTION: + if (skb->len < NFC_MIN_AID_LENGTH + 2 && + skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + return -EPROTO; + + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, + skb->len - 2, GFP_KERNEL); + + transaction->aid_len = skb->data[1]; + memcpy(transaction->aid, &skb->data[2], skb->data[1]); + + if (skb->data[transaction->aid_len + 2] != + NFC_EVT_TRANSACTION_PARAMS_TAG) + return -EPROTO; + + transaction->params_len = skb->data[transaction->aid_len + 3]; + memcpy(transaction->params, skb->data + + transaction->aid_len + 4, transaction->params_len); + + r = nfc_se_transaction(ndev->nfc_dev, host, transaction); + default: + return 1; + } + kfree_skb(skb); + return r; +} + +void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe, + u8 event, struct sk_buff *skb) +{ + u8 gate = ndev->hci_dev->pipes[pipe].gate; + u8 host = ndev->hci_dev->pipes[pipe].host; + + switch (gate) { + case NCI_HCI_ADMIN_GATE: + st21nfcb_hci_admin_event_received(ndev, event, skb); + break; + case ST21NFCB_APDU_READER_GATE: + st21nfcb_hci_apdu_reader_event_received(ndev, event, skb); + break; + case ST21NFCB_CONNECTIVITY_GATE: + st21nfcb_hci_connectivity_event_received(ndev, host, event, + skb); + break; + } +} +EXPORT_SYMBOL_GPL(st21nfcb_hci_event_received); + + +void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, + struct sk_buff *skb) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + u8 gate = ndev->hci_dev->pipes[pipe].gate; + + pr_debug("cmd: %x\n", cmd); + + switch (cmd) { + case NCI_HCI_ANY_OPEN_PIPE: + if (gate != ST21NFCB_APDU_READER_GATE && + ndev->hci_dev->pipes[pipe].host != ST21NFCB_UICC_HOST_ID) + ndev->hci_dev->count_pipes++; + + if (ndev->hci_dev->count_pipes == + ndev->hci_dev->expected_pipes) { + del_timer_sync(&info->se_info.se_active_timer); + info->se_info.se_active = false; + ndev->hci_dev->count_pipes = 0; + complete(&info->se_info.req_completion); + } + break; + } +} +EXPORT_SYMBOL_GPL(st21nfcb_hci_cmd_received); + +/* + * Remarks: On some early st21nfcb firmware, nci_nfcee_mode_set(0) + * is rejected + */ +static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx, + u8 state) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + int r; + struct sk_buff *sk_host_list; + u8 host_id; + + switch (se_idx) { + case ST21NFCB_UICC_HOST_ID: + ndev->hci_dev->count_pipes = 0; + ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_UICC; + break; + case ST21NFCB_ESE_HOST_ID: + ndev->hci_dev->count_pipes = 0; + ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_EMBEDDED; + break; + default: + return -EINVAL; + } + + /* + * Wait for an EVT_HOT_PLUG in order to + * retrieve a relevant host list. + */ + reinit_completion(&info->se_info.req_completion); + r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE); + if (r != NCI_STATUS_OK) + return r; + + mod_timer(&info->se_info.se_active_timer, jiffies + + msecs_to_jiffies(ST21NFCB_SE_TO_HOT_PLUG)); + info->se_info.se_active = true; + + /* Ignore return value and check in any case the host_list */ + wait_for_completion_interruptible(&info->se_info.req_completion); + + /* There might be some "collision" after receiving a HOT_PLUG event + * This may cause the CLF to not answer to the next hci command. + * There is no possible synchronization to prevent this. + * Adding a small delay is the only way to solve the issue. + */ + usleep_range(3000, 5000); + + r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE, + NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list); + if (r != NCI_HCI_ANY_OK) + return r; + + host_id = sk_host_list->data[sk_host_list->len - 1]; + kfree_skb(sk_host_list); + if (state == ST21NFCB_SE_MODE_ON && host_id == se_idx) + return se_idx; + else if (state == ST21NFCB_SE_MODE_OFF && host_id != se_idx) + return se_idx; + + return -1; +} + +int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx) +{ + int r; + + pr_debug("st21nfcb_nci_disable_se\n"); + + if (se_idx == NFC_SE_EMBEDDED) { + r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE, + ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); + if (r < 0) + return r; + } + + return 0; +} +EXPORT_SYMBOL_GPL(st21nfcb_nci_disable_se); + +int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx) +{ + int r; + + pr_debug("st21nfcb_nci_enable_se\n"); + + if (se_idx == ST21NFCB_HCI_HOST_ID_ESE) { + r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE, + ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0); + if (r < 0) + return r; + } + + return 0; +} +EXPORT_SYMBOL_GPL(st21nfcb_nci_enable_se); + +static int st21nfcb_hci_network_init(struct nci_dev *ndev) +{ + struct core_conn_create_dest_spec_params *dest_params; + struct dest_spec_params spec_params; + struct nci_conn_info *conn_info; + int r, dev_num; + + dest_params = + kzalloc(sizeof(struct core_conn_create_dest_spec_params) + + sizeof(struct dest_spec_params), GFP_KERNEL); + if (dest_params == NULL) { + r = -ENOMEM; + goto exit; + } + + dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE; + dest_params->length = sizeof(struct dest_spec_params); + spec_params.id = ndev->hci_dev->nfcee_id; + spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS; + memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params)); + r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1, + sizeof(struct core_conn_create_dest_spec_params) + + sizeof(struct dest_spec_params), + dest_params); + if (r != NCI_STATUS_OK) + goto free_dest_params; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + goto free_dest_params; + + memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates, + sizeof(st21nfcb_gates)); + + /* + * Session id must include the driver name + i2c bus addr + * persistent info to discriminate 2 identical chips + */ + dev_num = find_first_zero_bit(dev_mask, ST21NFCB_NUM_DEVICES); + if (dev_num >= ST21NFCB_NUM_DEVICES) { + r = -ENODEV; + goto free_dest_params; + } + + scnprintf(ndev->hci_dev->init_data.session_id, + sizeof(ndev->hci_dev->init_data.session_id), + "%s%2x", "ST21BH", dev_num); + + r = nci_hci_dev_session_init(ndev); + if (r != NCI_HCI_ANY_OK) + goto exit; + + r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id, + NCI_NFCEE_ENABLE); + if (r != NCI_STATUS_OK) + goto exit; + + return 0; + +free_dest_params: + kfree(dest_params); + +exit: + return r; +} + +int st21nfcb_nci_discover_se(struct nci_dev *ndev) +{ + u8 param[2]; + int r; + int se_count = 0; + + pr_debug("st21nfcb_nci_discover_se\n"); + + r = st21nfcb_hci_network_init(ndev); + if (r != 0) + return r; + + param[0] = ST21NFCB_UICC_HOST_ID; + param[1] = ST21NFCB_HCI_HOST_ID_ESE; + r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, + NCI_HCI_ADMIN_PARAM_WHITELIST, + param, sizeof(param)); + if (r != NCI_HCI_ANY_OK) + return r; + + r = st21nfcb_nci_control_se(ndev, ST21NFCB_UICC_HOST_ID, + ST21NFCB_SE_MODE_ON); + if (r == ST21NFCB_UICC_HOST_ID) { + nfc_add_se(ndev->nfc_dev, ST21NFCB_UICC_HOST_ID, NFC_SE_UICC); + se_count++; + } + + /* Try to enable eSE in order to check availability */ + r = st21nfcb_nci_control_se(ndev, ST21NFCB_HCI_HOST_ID_ESE, + ST21NFCB_SE_MODE_ON); + if (r == ST21NFCB_HCI_HOST_ID_ESE) { + nfc_add_se(ndev->nfc_dev, ST21NFCB_HCI_HOST_ID_ESE, + NFC_SE_EMBEDDED); + se_count++; + st21nfcb_se_get_atr(ndev); + } + + return !se_count; +} +EXPORT_SYMBOL_GPL(st21nfcb_nci_discover_se); + +int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + pr_debug("\n"); + + switch (se_idx) { + case ST21NFCB_HCI_HOST_ID_ESE: + info->se_info.cb = cb; + info->se_info.cb_context = cb_context; + mod_timer(&info->se_info.bwi_timer, jiffies + + msecs_to_jiffies(info->se_info.wt_timeout)); + info->se_info.bwi_active = true; + return nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE, + ST21NFCB_EVT_TRANSMIT_DATA, apdu, + apdu_length); + default: + return -ENODEV; + } +} +EXPORT_SYMBOL(st21nfcb_nci_se_io); + +static void st21nfcb_se_wt_timeout(unsigned long data) +{ + /* + * No answer from the secure element + * within the defined timeout. + * Let's send a reset request as recovery procedure. + * According to the situation, we first try to send a software reset + * to the secure element. If the next command is still not + * answering in time, we send to the CLF a secure element hardware + * reset request. + */ + /* hardware reset managed through VCC_UICC_OUT power supply */ + u8 param = 0x01; + struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data; + + pr_debug("\n"); + + info->se_info.bwi_active = false; + + if (!info->se_info.xch_error) { + info->se_info.xch_error = true; + nci_hci_send_event(info->ndlc->ndev, ST21NFCB_APDU_READER_GATE, + ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0); + } else { + info->se_info.xch_error = false; + nci_hci_send_event(info->ndlc->ndev, ST21NFCB_DEVICE_MGNT_GATE, + ST21NFCB_EVT_SE_HARD_RESET, ¶m, 1); + } + info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); +} + +static void st21nfcb_se_activation_timeout(unsigned long data) +{ + struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data; + + pr_debug("\n"); + + info->se_info.se_active = false; + + complete(&info->se_info.req_completion); +} + +int st21nfcb_se_init(struct nci_dev *ndev) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + init_completion(&info->se_info.req_completion); + /* initialize timers */ + init_timer(&info->se_info.bwi_timer); + info->se_info.bwi_timer.data = (unsigned long)info; + info->se_info.bwi_timer.function = st21nfcb_se_wt_timeout; + info->se_info.bwi_active = false; + + init_timer(&info->se_info.se_active_timer); + info->se_info.se_active_timer.data = (unsigned long)info; + info->se_info.se_active_timer.function = + st21nfcb_se_activation_timeout; + info->se_info.se_active = false; + + info->se_info.xch_error = false; + + info->se_info.wt_timeout = + ST21NFCB_BWI_TO_TIMEOUT(ST21NFCB_ATR_DEFAULT_BWI); + + return 0; +} +EXPORT_SYMBOL(st21nfcb_se_init); + +void st21nfcb_se_deinit(struct nci_dev *ndev) +{ + struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); + + if (info->se_info.bwi_active) + del_timer_sync(&info->se_info.bwi_timer); + if (info->se_info.se_active) + del_timer_sync(&info->se_info.se_active_timer); + + info->se_info.se_active = false; + info->se_info.bwi_active = false; +} +EXPORT_SYMBOL(st21nfcb_se_deinit); + diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.h b/drivers/nfc/st21nfcb/st21nfcb_se.h new file mode 100644 index 000000000000..52a323872bea --- /dev/null +++ b/drivers/nfc/st21nfcb/st21nfcb_se.h @@ -0,0 +1,61 @@ +/* + * NCI based Driver for STMicroelectronics NFC Chip + * + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __LOCAL_ST21NFCB_SE_H_ +#define __LOCAL_ST21NFCB_SE_H_ + +/* + * ref ISO7816-3 chap 8.1. the initial character TS is followed by a + * sequence of at most 32 characters. + */ +#define ST21NFCB_ESE_MAX_LENGTH 33 +#define ST21NFCB_HCI_HOST_ID_ESE 0xc0 + +struct st21nfcb_se_info { + u8 atr[ST21NFCB_ESE_MAX_LENGTH]; + struct completion req_completion; + + struct timer_list bwi_timer; + int wt_timeout; /* in msecs */ + bool bwi_active; + + struct timer_list se_active_timer; + bool se_active; + + bool xch_error; + + se_io_cb_t cb; + void *cb_context; +}; + +int st21nfcb_se_init(struct nci_dev *ndev); +void st21nfcb_se_deinit(struct nci_dev *ndev); + +int st21nfcb_nci_discover_se(struct nci_dev *ndev); +int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx); +int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx); +int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); +int st21nfcb_hci_load_session(struct nci_dev *ndev); +void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe, + u8 event, struct sk_buff *skb); +void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, + struct sk_buff *skb); + + +#endif /* __LOCAL_ST21NFCB_NCI_H_ */ diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index e7257a4653b4..a2f2f3d3196d 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -62,6 +62,25 @@ #define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2 #define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3 +/* NFCEE Interface/Protocols */ +#define NCI_NFCEE_INTERFACE_APDU 0x00 +#define NCI_NFCEE_INTERFACE_HCI_ACCESS 0x01 +#define NCI_NFCEE_INTERFACE_TYPE3_CMD_SET 0x02 +#define NCI_NFCEE_INTERFACE_TRANSPARENT 0x03 + +/* Destination type */ +#define NCI_DESTINATION_NFCC_LOOPBACK 0x01 +#define NCI_DESTINATION_REMOTE_NFC_ENDPOINT 0x02 +#define NCI_DESTINATION_NFCEE 0x03 + +/* Destination-specific parameters type */ +#define NCI_DESTINATION_SPECIFIC_PARAM_RF_TYPE 0x00 +#define NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE 0x01 + +/* NFCEE Discovery Action */ +#define NCI_NFCEE_DISCOVERY_ACTION_DISABLE 0x00 +#define NCI_NFCEE_DISCOVERY_ACTION_ENABLE 0x01 + /* NCI RF Technology and Mode */ #define NCI_NFC_A_PASSIVE_POLL_MODE 0x00 #define NCI_NFC_B_PASSIVE_POLL_MODE 0x01 @@ -224,6 +243,28 @@ struct nci_core_set_config_cmd { struct set_config_param param; /* support 1 param per cmd is enough */ } __packed; +#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04) +#define DEST_SPEC_PARAMS_ID_INDEX 0 +#define DEST_SPEC_PARAMS_PROTOCOL_INDEX 1 +struct dest_spec_params { + __u8 id; + __u8 protocol; +} __packed; + +struct core_conn_create_dest_spec_params { + __u8 type; + __u8 length; + __u8 value[0]; +} __packed; + +struct nci_core_conn_create_cmd { + __u8 destination_type; + __u8 number_destination_params; + struct core_conn_create_dest_spec_params params[0]; +} __packed; + +#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05) + #define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) struct disc_map_config { __u8 rf_protocol; @@ -260,6 +301,19 @@ struct nci_rf_deactivate_cmd { __u8 type; } __packed; +#define NCI_OP_NFCEE_DISCOVER_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00) +struct nci_nfcee_discover_cmd { + __u8 discovery_action; +} __packed; + +#define NCI_OP_NFCEE_MODE_SET_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01) +#define NCI_NFCEE_DISABLE 0x00 +#define NCI_NFCEE_ENABLE 0x01 +struct nci_nfcee_mode_set_cmd { + __u8 nfcee_id; + __u8 nfcee_mode; +} __packed; + /* ----------------------- */ /* ---- NCI Responses ---- */ /* ----------------------- */ @@ -295,6 +349,16 @@ struct nci_core_set_config_rsp { __u8 params_id[0]; /* variable size array */ } __packed; +#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04) +struct nci_core_conn_create_rsp { + __u8 status; + __u8 max_ctrl_pkt_payload_len; + __u8 credits_cnt; + __u8 conn_id; +} __packed; + +#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x05) + #define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) #define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) @@ -303,6 +367,13 @@ struct nci_core_set_config_rsp { #define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) +#define NCI_OP_NFCEE_DISCOVER_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00) +struct nci_nfcee_discover_rsp { + __u8 status; + __u8 num_nfcee; +} __packed; + +#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01) /* --------------------------- */ /* ---- NCI Notifications ---- */ /* --------------------------- */ @@ -430,4 +501,30 @@ struct nci_rf_deactivate_ntf { __u8 reason; } __packed; +#define NCI_OP_RF_NFCEE_ACTION_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x09) +struct nci_rf_nfcee_action_ntf { + __u8 nfcee_id; + __u8 trigger; + __u8 supported_data_length; + __u8 supported_data[0]; +} __packed; + +#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00) +struct nci_nfcee_supported_protocol { + __u8 num_protocol; + __u8 supported_protocol[0]; +} __packed; + +struct nci_nfcee_information_tlv { + __u8 num_tlv; + __u8 information_tlv[0]; +} __packed; + +struct nci_nfcee_discover_ntf { + __u8 nfcee_id; + __u8 nfcee_status; + struct nci_nfcee_supported_protocol supported_protocols; + struct nci_nfcee_information_tlv information_tlv; +} __packed; + #endif /* __NCI_H */ diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 9e51bb4d841e..ff87f8611fa3 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -78,15 +78,107 @@ struct nci_ops { int (*se_io)(struct nci_dev *ndev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context); + int (*hci_load_session)(struct nci_dev *ndev); + void (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event, + struct sk_buff *skb); + void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd, + struct sk_buff *skb); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 #define NCI_MAX_DISCOVERED_TARGETS 10 +#define NCI_MAX_NUM_NFCEE 255 +#define NCI_MAX_CONN_ID 7 + +struct nci_conn_info { + struct list_head list; + __u8 id; /* can be an RF Discovery ID or an NFCEE ID */ + __u8 conn_id; + __u8 max_pkt_payload_len; + + atomic_t credits_cnt; + __u8 initial_num_credits; + + data_exchange_cb_t data_exchange_cb; + void *data_exchange_cb_context; + + struct sk_buff *rx_skb; +}; + +#define NCI_INVALID_CONN_ID 0x80 + +#define NCI_HCI_ANY_OPEN_PIPE 0x03 + +/* Gates */ +#define NCI_HCI_ADMIN_GATE 0x00 +#define NCI_HCI_LINK_MGMT_GATE 0x06 + +/* Pipes */ +#define NCI_HCI_LINK_MGMT_PIPE 0x00 +#define NCI_HCI_ADMIN_PIPE 0x01 + +/* Generic responses */ +#define NCI_HCI_ANY_OK 0x00 +#define NCI_HCI_ANY_E_NOT_CONNECTED 0x01 +#define NCI_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02 +#define NCI_HCI_ANY_E_NOK 0x03 +#define NCI_HCI_ANY_E_PIPES_FULL 0x04 +#define NCI_HCI_ANY_E_REG_PAR_UNKNOWN 0x05 +#define NCI_HCI_ANY_E_PIPE_NOT_OPENED 0x06 +#define NCI_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07 +#define NCI_HCI_ANY_E_INHIBITED 0x08 +#define NCI_HCI_ANY_E_TIMEOUT 0x09 +#define NCI_HCI_ANY_E_REG_ACCESS_DENIED 0x0a +#define NCI_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b + +#define NCI_HCI_DO_NOT_OPEN_PIPE 0x81 +#define NCI_HCI_INVALID_PIPE 0x80 +#define NCI_HCI_INVALID_GATE 0xFF +#define NCI_HCI_INVALID_HOST 0x80 + +#define NCI_HCI_MAX_CUSTOM_GATES 50 +#define NCI_HCI_MAX_PIPES 127 + +struct nci_hci_gate { + u8 gate; + u8 pipe; + u8 dest_host; +} __packed; + +struct nci_hci_pipe { + u8 gate; + u8 host; +} __packed; + +struct nci_hci_init_data { + u8 gate_count; + struct nci_hci_gate gates[NCI_HCI_MAX_CUSTOM_GATES]; + char session_id[9]; +}; + +#define NCI_HCI_MAX_GATES 256 + +struct nci_hci_dev { + u8 nfcee_id; + struct nci_dev *ndev; + struct nci_conn_info *conn_info; + + struct nci_hci_init_data init_data; + struct nci_hci_pipe pipes[NCI_HCI_MAX_PIPES]; + u8 gate2pipe[NCI_HCI_MAX_GATES]; + int expected_pipes; + int count_pipes; + + struct sk_buff_head rx_hcp_frags; + struct work_struct msg_rx_work; + struct sk_buff_head msg_rx_queue; +}; /* NCI Core structures */ struct nci_dev { struct nfc_dev *nfc_dev; struct nci_ops *ops; + struct nci_hci_dev *hci_dev; int tx_headroom; int tx_tailroom; @@ -95,7 +187,10 @@ struct nci_dev { unsigned long flags; atomic_t cmd_cnt; - atomic_t credits_cnt; + __u8 cur_conn_id; + + struct list_head conn_info_list; + struct nci_conn_info *rf_conn_info; struct timer_list cmd_timer; struct timer_list data_timer; @@ -141,13 +236,10 @@ struct nci_dev { __u8 manufact_id; __u32 manufact_specific_info; - /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */ - __u8 max_data_pkt_payload_size; - __u8 initial_num_credits; + /* Save RF Discovery ID or NFCEE ID under conn_create */ + __u8 cur_id; /* stored during nci_data_exchange */ - data_exchange_cb_t data_exchange_cb; - void *data_exchange_cb_context; struct sk_buff *rx_data_reassembly; /* stored during intf_activated_ntf */ @@ -163,9 +255,36 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, void nci_free_device(struct nci_dev *ndev); int nci_register_device(struct nci_dev *ndev); void nci_unregister_device(struct nci_dev *ndev); +int nci_request(struct nci_dev *ndev, + void (*req)(struct nci_dev *ndev, + unsigned long opt), + unsigned long opt, __u32 timeout); int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); +int nci_nfcee_discover(struct nci_dev *ndev, u8 action); +int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode); +int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, + u8 number_destination_params, + size_t params_len, + struct core_conn_create_dest_spec_params *params); +int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id); + +struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); +int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, + const u8 *param, size_t param_len); +int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, + u8 cmd, const u8 *param, size_t param_len, + struct sk_buff **skb); +int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe); +int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host, + u8 dest_gate, u8 pipe); +int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx, + const u8 *param, size_t param_len); +int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, + struct sk_buff **skb); +int nci_hci_dev_session_init(struct nci_dev *ndev); + static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, unsigned int len, gfp_t how) @@ -200,7 +319,9 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, - int err); + __u8 conn_id, int err); +void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err); + void nci_clear_target_list(struct nci_dev *ndev); /* ----- NCI requests ----- */ @@ -209,6 +330,8 @@ void nci_clear_target_list(struct nci_dev *ndev); #define NCI_REQ_CANCELED 2 void nci_req_complete(struct nci_dev *ndev, int result); +struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, + int conn_id); /* ----- NCI status code ----- */ int nci_to_errno(__u8 code); diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 12adb817c27a..73190e65d5c1 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -135,6 +135,31 @@ struct nfc_se { u16 state; }; +/** + * nfc_evt_transaction - A struct for NFC secure element event transaction. + * + * @aid: The application identifier triggering the event + * + * @aid_len: The application identifier length [5:16] + * + * @params: The application parameters transmitted during the transaction + * + * @params_len: The applications parameters length [0:255] + * + */ +#define NFC_MIN_AID_LENGTH 5 +#define NFC_MAX_AID_LENGTH 16 +#define NFC_MAX_PARAMS_LENGTH 255 + +#define NFC_EVT_TRANSACTION_AID_TAG 0x81 +#define NFC_EVT_TRANSACTION_PARAMS_TAG 0x82 +struct nfc_evt_transaction { + u32 aid_len; + u8 aid[NFC_MAX_AID_LENGTH]; + u8 params_len; + u8 params[NFC_MAX_PARAMS_LENGTH]; +} __packed; + struct nfc_genl_data { u32 poll_req_portid; struct mutex genl_data_mutex; @@ -262,6 +287,8 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb); void nfc_driver_failure(struct nfc_dev *dev, int err); +int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx, + struct nfc_evt_transaction *evt_transaction); int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type); int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx); diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 8119255feae4..c1e2e63cf9b5 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -183,6 +183,7 @@ enum nfc_attrs { NFC_ATTR_SE_APDU, NFC_ATTR_TARGET_ISO15693_DSFID, NFC_ATTR_TARGET_ISO15693_UID, + NFC_ATTR_SE_PARAMS, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 7a8785a99243..bbd49a0c46c7 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -599,6 +599,12 @@ struct ovs_action_hash { * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its * value. + * @OVS_ACTION_ATTR_SET_MASKED: Replaces the contents of an existing header. A + * nested %OVS_KEY_ATTR_* attribute specifies a header to modify, its value, + * and a mask. For every bit set in the mask, the corresponding bit value + * is copied from the value to the packet header field, rest of the bits are + * left unchanged. The non-masked value bits must be passed in as zeroes. + * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute. * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the * packet. * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. @@ -617,6 +623,9 @@ struct ovs_action_hash { * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment * type may not be changed. + * + * @OVS_ACTION_ATTR_SET_TO_MASKED: Kernel internal masked set action translated + * from the @OVS_ACTION_ATTR_SET. */ enum ovs_action_attr { @@ -631,8 +640,19 @@ enum ovs_action_attr { OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */ OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */ + OVS_ACTION_ATTR_SET_MASKED, /* One nested OVS_KEY_ATTR_* including + * data immediately followed by a mask. + * The data must be zero for the unmasked + * bits. */ + + __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted + * from userspace. */ - __OVS_ACTION_ATTR_MAX +#ifdef __KERNEL__ + OVS_ACTION_ATTR_SET_TO_MASKED, /* Kernel module internal masked + * set action converted from + * OVS_ACTION_ATTR_SET. */ +#endif }; #define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 057919164e23..e96fc00208bc 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1,7 +1,7 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <[email protected]> + * Copyright (c) 2014-2015 Thomas Graf <[email protected]> * Copyright (c) 2008-2014 Patrick McHardy <[email protected]> * * Based on the following paper: @@ -34,12 +34,17 @@ enum { RHT_LOCK_NORMAL, RHT_LOCK_NESTED, - RHT_LOCK_NESTED2, }; /* The bucket lock is selected based on the hash and protects mutations * on a group of hash buckets. * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * * IMPORTANT: When holding the bucket lock of both the old and new table * during expansions and shrinking, the old bucket lock must always be * acquired first. @@ -49,26 +54,6 @@ static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) return &tbl->locks[hash & tbl->locks_mask]; } -#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) -#define ASSERT_BUCKET_LOCK(TBL, HASH) \ - BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) - -#ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(struct rhashtable *ht) -{ - return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; -} -EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); - -int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) -{ - spinlock_t *lock = bucket_lock(tbl, hash); - - return (debug_locks) ? lockdep_is_held(lock) : 1; -} -EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); -#endif - static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { return (void *) he - ht->p.head_offset; @@ -94,13 +79,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) { - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - u32 hash; - - hash = ht->p.hashfn(key, len, ht->p.hash_rnd); - hash >>= HASH_RESERVED_SPACE; - - return rht_bucket_index(tbl, hash); + return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; } static u32 head_hashfn(const struct rhashtable *ht, @@ -110,6 +89,77 @@ static u32 head_hashfn(const struct rhashtable *ht, return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } +#ifdef CONFIG_PROVE_LOCKING +static void debug_dump_buckets(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + struct rhash_head *he; + unsigned int i, hash; + + for (i = 0; i < tbl->size; i++) { + pr_warn(" [Bucket %d] ", i); + rht_for_each_rcu(he, tbl, i) { + hash = head_hashfn(ht, tbl, he); + pr_cont("[hash = %#x, lock = %p] ", + hash, bucket_lock(tbl, hash)); + } + pr_cont("\n"); + } + +} + +static void debug_dump_table(struct rhashtable *ht, + const struct bucket_table *tbl, + unsigned int hash) +{ + struct bucket_table *old_tbl, *future_tbl; + + pr_emerg("BUG: lock for hash %#x in table %p not held\n", + hash, tbl); + + rcu_read_lock(); + future_tbl = rht_dereference_rcu(ht->future_tbl, ht); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + if (future_tbl != old_tbl) { + pr_warn("Future table %p (size: %zd)\n", + future_tbl, future_tbl->size); + debug_dump_buckets(ht, future_tbl); + } + + pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); + debug_dump_buckets(ht, old_tbl); + + rcu_read_unlock(); +} + +#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ + do { \ + if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ + debug_dump_table(HT, TBL, HASH); \ + BUG(); \ + } \ + } while (0) + +int lockdep_rht_mutex_is_held(struct rhashtable *ht) +{ + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); + +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + spinlock_t *lock = bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); +#else +#define ASSERT_RHT_MUTEX(HT) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) +#endif + + static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) { struct rhash_head __rcu **pprev; @@ -134,8 +184,8 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); - /* Never allocate more than one lock per bucket */ - size = min_t(unsigned int, size, tbl->size); + /* Never allocate more than 0.5 locks per bucket */ + size = min_t(unsigned int, size, tbl->size >> 1); if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA @@ -217,25 +267,48 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) } EXPORT_SYMBOL_GPL(rht_shrink_below_30); -static void hashtable_chain_unzip(const struct rhashtable *ht, +static void lock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __acquires(old_bucket_lock) +{ + spin_lock_bh(bucket_lock(old_tbl, hash)); + if (new_tbl != old_tbl) + spin_lock_bh_nested(bucket_lock(new_tbl, hash), + RHT_LOCK_NESTED); +} + +static void unlock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __releases(old_bucket_lock) +{ + if (new_tbl != old_tbl) + spin_unlock_bh(bucket_lock(new_tbl, hash)); + spin_unlock_bh(bucket_lock(old_tbl, hash)); +} + +/** + * Unlink entries on bucket which hash to different bucket. + * + * Returns true if no more work needs to be performed on the bucket. + */ +static bool hashtable_chain_unzip(struct rhashtable *ht, const struct bucket_table *new_tbl, struct bucket_table *old_tbl, size_t old_hash) { struct rhash_head *he, *p, *next; - spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL; unsigned int new_hash, new_hash2; - ASSERT_BUCKET_LOCK(old_tbl, old_hash); + ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); /* Old bucket empty, no work needed. */ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, old_hash); if (rht_is_a_nulls(p)) - return; + return false; - new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); - new_bucket_lock = bucket_lock(new_tbl, new_hash); + new_hash = head_hashfn(ht, new_tbl, p); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the @@ -243,22 +316,14 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ rht_for_each_continue(he, p->next, old_tbl, old_hash) { new_hash2 = head_hashfn(ht, new_tbl, he); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); + if (new_hash != new_hash2) break; p = he; } rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); - - /* If we have encountered an entry that maps to a different bucket in - * the new table, lock down that bucket as well as we might cut off - * the end of the chain. - */ - new_bucket_lock2 = bucket_lock(new_tbl, new_hash); - if (new_bucket_lock != new_bucket_lock2) - spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2); - /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ @@ -277,21 +342,18 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ rcu_assign_pointer(p->next, next); - if (new_bucket_lock != new_bucket_lock2) - spin_unlock_bh(new_bucket_lock2); - spin_unlock_bh(new_bucket_lock); + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); + + return !rht_is_a_nulls(p); } -static void link_old_to_new(struct bucket_table *new_tbl, +static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, unsigned int new_hash, struct rhash_head *entry) { - spinlock_t *new_bucket_lock; + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); - new_bucket_lock = bucket_lock(new_tbl, new_hash); - - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); - spin_unlock_bh(new_bucket_lock); } /** @@ -314,7 +376,6 @@ int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; - spinlock_t *old_bucket_lock; unsigned int new_hash, old_hash; bool complete = false; @@ -344,24 +405,16 @@ int rhashtable_expand(struct rhashtable *ht) */ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { old_hash = rht_bucket_index(old_tbl, new_hash); - old_bucket_lock = bucket_lock(old_tbl, old_hash); - - spin_lock_bh(old_bucket_lock); + lock_buckets(new_tbl, old_tbl, new_hash); rht_for_each(he, old_tbl, old_hash) { if (head_hashfn(ht, new_tbl, he) == new_hash) { - link_old_to_new(new_tbl, new_hash, he); + link_old_to_new(ht, new_tbl, new_hash, he); break; } } - spin_unlock_bh(old_bucket_lock); + unlock_buckets(new_tbl, old_tbl, new_hash); } - /* Publish the new table pointer. Lookups may now traverse - * the new table, but they will not benefit from any - * additional efficiency until later steps unzip the buckets. - */ - rcu_assign_pointer(ht->tbl, new_tbl); - /* Unzip interleaved hash chains */ while (!complete && !ht->being_destroyed) { /* Wait for readers. All new readers will see the new @@ -376,21 +429,19 @@ int rhashtable_expand(struct rhashtable *ht) */ complete = true; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { - struct rhash_head *head; - - old_bucket_lock = bucket_lock(old_tbl, old_hash); - spin_lock_bh(old_bucket_lock); + lock_buckets(new_tbl, old_tbl, old_hash); - hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); - head = rht_dereference_bucket(old_tbl->buckets[old_hash], - old_tbl, old_hash); - if (!rht_is_a_nulls(head)) + if (hashtable_chain_unzip(ht, new_tbl, old_tbl, + old_hash)) complete = false; - spin_unlock_bh(old_bucket_lock); + unlock_buckets(new_tbl, old_tbl, old_hash); } } + rcu_assign_pointer(ht->tbl, new_tbl); + synchronize_rcu(); + bucket_table_free(old_tbl); return 0; } @@ -415,7 +466,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); - spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2; unsigned int new_hash; ASSERT_RHT_MUTEX(ht); @@ -433,36 +483,17 @@ int rhashtable_shrink(struct rhashtable *ht) * always divide the size in half when shrinking, each bucket * in the new table maps to exactly two buckets in the old * table. - * - * As removals can occur concurrently on the old table, we need - * to lock down both matching buckets in the old table. */ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { - old_bucket_lock1 = bucket_lock(tbl, new_hash); - old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size); - new_bucket_lock = bucket_lock(new_tbl, new_hash); - - spin_lock_bh(old_bucket_lock1); - - /* Depending on the lock per buckets mapping, the bucket in - * the lower and upper region may map to the same lock. - */ - if (old_bucket_lock1 != old_bucket_lock2) { - spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); - } else { - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); - } + lock_buckets(new_tbl, tbl, new_hash); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash]); + ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash + new_tbl->size]); - spin_unlock_bh(new_bucket_lock); - if (old_bucket_lock1 != old_bucket_lock2) - spin_unlock_bh(old_bucket_lock2); - spin_unlock_bh(old_bucket_lock1); + unlock_buckets(new_tbl, tbl, new_hash); } /* Publish the new, valid hash table */ @@ -521,8 +552,12 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, struct bucket_table *tbl, u32 hash) { - struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], - tbl, hash); + struct rhash_head *head; + + hash = rht_bucket_index(tbl, hash); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + + ASSERT_BUCKET_LOCK(ht, tbl, hash); if (rht_is_a_nulls(head)) INIT_RHT_NULLS_HEAD(obj->next, ht, hash); @@ -553,19 +588,18 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, */ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl; - spinlock_t *lock; + struct bucket_table *tbl, *old_tbl; unsigned hash; rcu_read_lock(); tbl = rht_dereference_rcu(ht->future_tbl, ht); - hash = head_hashfn(ht, tbl, obj); - lock = bucket_lock(tbl, hash); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); - spin_lock_bh(lock); + lock_buckets(tbl, old_tbl, hash); __rhashtable_insert(ht, obj, tbl, hash); - spin_unlock_bh(lock); + unlock_buckets(tbl, old_tbl, hash); rcu_read_unlock(); } @@ -588,21 +622,20 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); */ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl; + struct bucket_table *tbl, *new_tbl, *old_tbl; struct rhash_head __rcu **pprev; - struct rhash_head *he; - spinlock_t *lock; - unsigned int hash; + struct rhash_head *he, *he2; + unsigned int hash, new_hash; bool ret = false; rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); - hash = head_hashfn(ht, tbl, obj); - - lock = bucket_lock(tbl, hash); - spin_lock_bh(lock); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); + lock_buckets(new_tbl, old_tbl, new_hash); restart: + hash = rht_bucket_index(tbl, new_hash); pprev = &tbl->buckets[hash]; rht_for_each(he, tbl, hash) { if (he != obj) { @@ -610,8 +643,26 @@ restart: continue; } - rcu_assign_pointer(*pprev, obj->next); + ASSERT_BUCKET_LOCK(ht, tbl, hash); + + if (old_tbl->size > new_tbl->size && tbl == old_tbl && + !rht_is_a_nulls(obj->next) && + head_hashfn(ht, tbl, obj->next) != hash) { + rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); + } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { + rht_for_each_continue(he2, obj->next, tbl, hash) { + if (head_hashfn(ht, tbl, he2) == hash) { + rcu_assign_pointer(*pprev, he2); + goto found; + } + } + rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); + } else { + rcu_assign_pointer(*pprev, obj->next); + } + +found: ret = true; break; } @@ -621,18 +672,12 @@ restart: * resizing. Thus traversing both is fine and the added cost is * very rare. */ - if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { - spin_unlock_bh(lock); - - tbl = rht_dereference_rcu(ht->future_tbl, ht); - hash = head_hashfn(ht, tbl, obj); - - lock = bucket_lock(tbl, hash); - spin_lock_bh(lock); + if (tbl != old_tbl) { + tbl = old_tbl; goto restart; } - spin_unlock_bh(lock); + unlock_buckets(new_tbl, old_tbl, new_hash); if (ret) { atomic_dec(&ht->nelems); @@ -788,24 +833,17 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, void *arg) { struct bucket_table *new_tbl, *old_tbl; - spinlock_t *new_bucket_lock, *old_bucket_lock; - u32 new_hash, old_hash; + u32 new_hash; bool success = true; BUG_ON(!ht->p.key_len); rcu_read_lock(); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - old_hash = head_hashfn(ht, old_tbl, obj); - old_bucket_lock = bucket_lock(old_tbl, old_hash); - spin_lock_bh(old_bucket_lock); - new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - new_hash = head_hashfn(ht, new_tbl, obj); - new_bucket_lock = bucket_lock(new_tbl, new_hash); - if (unlikely(old_tbl != new_tbl)) - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); + + lock_buckets(new_tbl, old_tbl, new_hash); if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, compare, arg)) { @@ -816,10 +854,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, __rhashtable_insert(ht, obj, new_tbl, new_hash); exit: - if (unlikely(old_tbl != new_tbl)) - spin_unlock_bh(new_bucket_lock); - spin_unlock_bh(old_bucket_lock); - + unlock_buckets(new_tbl, old_tbl, new_hash); rcu_read_unlock(); return success; diff --git a/net/nfc/core.c b/net/nfc/core.c index 7f1b6351755c..cff3f1614ad4 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -932,6 +932,27 @@ int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) } EXPORT_SYMBOL(nfc_remove_se); +int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx, + struct nfc_evt_transaction *evt_transaction) +{ + int rc; + + pr_debug("transaction: %x\n", se_idx); + + device_lock(&dev->dev); + + if (!evt_transaction) { + rc = -EPROTO; + goto out; + } + + rc = nfc_genl_se_transaction(dev, se_idx, evt_transaction); +out: + device_unlock(&dev->dev); + return rc; +} +EXPORT_SYMBOL(nfc_se_transaction); + static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile index 7aeedc43187d..7ed8949266cc 100644 --- a/net/nfc/nci/Makefile +++ b/net/nfc/nci/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_NFC_NCI) += nci.o -nci-objs := core.o data.o lib.o ntf.o rsp.o +nci-objs := core.o data.o lib.o ntf.o rsp.o hci.o nci-$(CONFIG_NFC_NCI_SPI) += spi.o diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 51feb5e63008..9575a1892607 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -41,10 +41,28 @@ #include <net/nfc/nci_core.h> #include <linux/nfc.h> +struct core_conn_create_data { + int length; + struct nci_core_conn_create_cmd *cmd; +}; + static void nci_cmd_work(struct work_struct *work); static void nci_rx_work(struct work_struct *work); static void nci_tx_work(struct work_struct *work); +struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, + int conn_id) +{ + struct nci_conn_info *conn_info; + + list_for_each_entry(conn_info, &ndev->conn_info_list, list) { + if (conn_info->conn_id == conn_id) + return conn_info; + } + + return NULL; +} + /* ---- NCI requests ---- */ void nci_req_complete(struct nci_dev *ndev, int result) @@ -109,10 +127,10 @@ static int __nci_request(struct nci_dev *ndev, return rc; } -static inline int nci_request(struct nci_dev *ndev, - void (*req)(struct nci_dev *ndev, - unsigned long opt), - unsigned long opt, __u32 timeout) +inline int nci_request(struct nci_dev *ndev, + void (*req)(struct nci_dev *ndev, + unsigned long opt), + unsigned long opt, __u32 timeout) { int rc; @@ -456,6 +474,95 @@ int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val) } EXPORT_SYMBOL(nci_set_config); +static void nci_nfcee_discover_req(struct nci_dev *ndev, unsigned long opt) +{ + struct nci_nfcee_discover_cmd cmd; + __u8 action = opt; + + cmd.discovery_action = action; + + nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd); +} + +int nci_nfcee_discover(struct nci_dev *ndev, u8 action) +{ + return nci_request(ndev, nci_nfcee_discover_req, action, + msecs_to_jiffies(NCI_CMD_TIMEOUT)); +} +EXPORT_SYMBOL(nci_nfcee_discover); + +static void nci_nfcee_mode_set_req(struct nci_dev *ndev, unsigned long opt) +{ + struct nci_nfcee_mode_set_cmd *cmd = + (struct nci_nfcee_mode_set_cmd *)opt; + + nci_send_cmd(ndev, NCI_OP_NFCEE_MODE_SET_CMD, + sizeof(struct nci_nfcee_mode_set_cmd), cmd); +} + +int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode) +{ + struct nci_nfcee_mode_set_cmd cmd; + + cmd.nfcee_id = nfcee_id; + cmd.nfcee_mode = nfcee_mode; + + return nci_request(ndev, nci_nfcee_mode_set_req, (unsigned long)&cmd, + msecs_to_jiffies(NCI_CMD_TIMEOUT)); +} +EXPORT_SYMBOL(nci_nfcee_mode_set); + +static void nci_core_conn_create_req(struct nci_dev *ndev, unsigned long opt) +{ + struct core_conn_create_data *data = + (struct core_conn_create_data *)opt; + + nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, data->length, data->cmd); +} + +int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, + u8 number_destination_params, + size_t params_len, + struct core_conn_create_dest_spec_params *params) +{ + int r; + struct nci_core_conn_create_cmd *cmd; + struct core_conn_create_data data; + + data.length = params_len + sizeof(struct nci_core_conn_create_cmd); + cmd = kzalloc(data.length, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->destination_type = destination_type; + cmd->number_destination_params = number_destination_params; + memcpy(cmd->params, params, params_len); + + data.cmd = cmd; + ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX]; + + r = __nci_request(ndev, nci_core_conn_create_req, + (unsigned long)&data, + msecs_to_jiffies(NCI_CMD_TIMEOUT)); + kfree(cmd); + return r; +} +EXPORT_SYMBOL(nci_core_conn_create); + +static void nci_core_conn_close_req(struct nci_dev *ndev, unsigned long opt) +{ + __u8 conn_id = opt; + + nci_send_cmd(ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id); +} + +int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id) +{ + return nci_request(ndev, nci_core_conn_close_req, conn_id, + msecs_to_jiffies(NCI_CMD_TIMEOUT)); +} +EXPORT_SYMBOL(nci_core_conn_close); + static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); @@ -712,6 +819,11 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; + struct nci_conn_info *conn_info; + + conn_info = ndev->rf_conn_info; + if (!conn_info) + return -EPROTO; pr_debug("target_idx %d, len %d\n", target->idx, skb->len); @@ -724,8 +836,8 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, return -EBUSY; /* store cb and context to be used on receiving data */ - ndev->data_exchange_cb = cb; - ndev->data_exchange_cb_context = cb_context; + conn_info->data_exchange_cb = cb; + conn_info->data_exchange_cb_context = cb_context; rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) @@ -768,10 +880,16 @@ static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) static int nci_discover_se(struct nfc_dev *nfc_dev) { + int r; struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); - if (ndev->ops->discover_se) + if (ndev->ops->discover_se) { + r = nci_nfcee_discover(ndev, NCI_NFCEE_DISCOVERY_ACTION_ENABLE); + if (r != NCI_STATUS_OK) + return -EPROTO; + return ndev->ops->discover_se(ndev); + } return 0; } @@ -807,7 +925,6 @@ static struct nfc_ops nci_nfc_ops = { }; /* ---- Interface to NCI drivers ---- */ - /** * nci_allocate_device - allocate a new nci device * @@ -842,13 +959,20 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, tx_headroom + NCI_DATA_HDR_SIZE, tx_tailroom); if (!ndev->nfc_dev) - goto free_exit; + goto free_nci; + + ndev->hci_dev = nci_hci_allocate(ndev); + if (!ndev->hci_dev) + goto free_nfc; nfc_set_drvdata(ndev->nfc_dev, ndev); return ndev; -free_exit: +free_nfc: + kfree(ndev->nfc_dev); + +free_nci: kfree(ndev); return NULL; } @@ -913,6 +1037,7 @@ int nci_register_device(struct nci_dev *ndev) (unsigned long) ndev); mutex_init(&ndev->req_lock); + INIT_LIST_HEAD(&ndev->conn_info_list); rc = nfc_register_device(ndev->nfc_dev); if (rc) @@ -938,12 +1063,19 @@ EXPORT_SYMBOL(nci_register_device); */ void nci_unregister_device(struct nci_dev *ndev) { + struct nci_conn_info *conn_info, *n; + nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); destroy_workqueue(ndev->rx_wq); destroy_workqueue(ndev->tx_wq); + list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) { + list_del(&conn_info->list); + /* conn_info is allocated with devm_kzalloc */ + } + nfc_unregister_device(ndev->nfc_dev); } EXPORT_SYMBOL(nci_unregister_device); @@ -1027,20 +1159,25 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) static void nci_tx_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); + struct nci_conn_info *conn_info; struct sk_buff *skb; - pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt)); + conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); + if (!conn_info) + return; + + pr_debug("credits_cnt %d\n", atomic_read(&conn_info->credits_cnt)); /* Send queued tx data */ - while (atomic_read(&ndev->credits_cnt)) { + while (atomic_read(&conn_info->credits_cnt)) { skb = skb_dequeue(&ndev->tx_q); if (!skb) return; /* Check if data flow control is used */ - if (atomic_read(&ndev->credits_cnt) != + if (atomic_read(&conn_info->credits_cnt) != NCI_DATA_FLOW_CONTROL_NOT_USED) - atomic_dec(&ndev->credits_cnt); + atomic_dec(&conn_info->credits_cnt); pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", nci_pbf(skb->data), @@ -1092,7 +1229,9 @@ static void nci_rx_work(struct work_struct *work) if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) { /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) - nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT); + nci_data_exchange_complete(ndev, NULL, + ndev->cur_conn_id, + -ETIMEDOUT); clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); } diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index a2de2a8cb00e..566466d90048 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -36,10 +36,20 @@ /* Complete data exchange transaction and forward skb to nfc core */ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, - int err) + __u8 conn_id, int err) { - data_exchange_cb_t cb = ndev->data_exchange_cb; - void *cb_context = ndev->data_exchange_cb_context; + struct nci_conn_info *conn_info; + data_exchange_cb_t cb; + void *cb_context; + + conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); + if (!conn_info) { + kfree_skb(skb); + goto exit; + } + + cb = conn_info->data_exchange_cb; + cb_context = conn_info->data_exchange_cb_context; pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); @@ -48,9 +58,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); if (cb) { - ndev->data_exchange_cb = NULL; - ndev->data_exchange_cb_context = NULL; - /* forward skb to nfc core */ cb(cb_context, skb, err); } else if (skb) { @@ -60,6 +67,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, kfree_skb(skb); } +exit: clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); } @@ -85,6 +93,7 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev, static int nci_queue_tx_data_frags(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) { + struct nci_conn_info *conn_info; int total_len = skb->len; unsigned char *data = skb->data; unsigned long flags; @@ -95,11 +104,17 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev, pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len); + conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); + if (!conn_info) { + rc = -EPROTO; + goto free_exit; + } + __skb_queue_head_init(&frags_q); while (total_len) { frag_len = - min_t(int, total_len, ndev->max_data_pkt_payload_size); + min_t(int, total_len, conn_info->max_pkt_payload_len); skb_frag = nci_skb_alloc(ndev, (NCI_DATA_HDR_SIZE + frag_len), @@ -151,12 +166,19 @@ exit: /* Send NCI data */ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) { + struct nci_conn_info *conn_info; int rc = 0; pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len); + conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); + if (!conn_info) { + rc = -EPROTO; + goto free_exit; + } + /* check if the packet need to be fragmented */ - if (skb->len <= ndev->max_data_pkt_payload_size) { + if (skb->len <= conn_info->max_pkt_payload_len) { /* no need to fragment packet */ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST); @@ -170,6 +192,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) } } + ndev->cur_conn_id = conn_id; queue_work(ndev->tx_wq, &ndev->tx_work); goto exit; @@ -185,7 +208,7 @@ exit: static void nci_add_rx_data_frag(struct nci_dev *ndev, struct sk_buff *skb, - __u8 pbf, __u8 status) + __u8 pbf, __u8 conn_id, __u8 status) { int reassembly_len; int err = 0; @@ -229,16 +252,13 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev, } exit: - if (ndev->nfc_dev->rf_mode == NFC_RF_INITIATOR) { - nci_data_exchange_complete(ndev, skb, err); - } else if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) { + if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) { /* Data received in Target mode, forward to nfc core */ err = nfc_tm_data_received(ndev->nfc_dev, skb); if (err) pr_err("unable to handle received data\n"); } else { - pr_err("rf mode unknown\n"); - kfree_skb(skb); + nci_data_exchange_complete(ndev, skb, conn_id, err); } } @@ -247,6 +267,8 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 pbf = nci_pbf(skb->data); __u8 status = 0; + __u8 conn_id = nci_conn_id(skb->data); + struct nci_conn_info *conn_info; pr_debug("len %d\n", skb->len); @@ -255,6 +277,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_conn_id(skb->data), nci_plen(skb->data)); + conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); + if (!conn_info) + return; + /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); @@ -268,5 +294,5 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) skb_trim(skb, (skb->len - 1)); } - nci_add_rx_data_frag(ndev, skb, pbf, nci_to_errno(status)); + nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status)); } diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c new file mode 100644 index 000000000000..ed54ec533836 --- /dev/null +++ b/net/nfc/nci/hci.c @@ -0,0 +1,694 @@ +/* + * The NFC Controller Interface is the communication protocol between an + * NFC Controller (NFCC) and a Device Host (DH). + * This is the HCI over NCI implementation, as specified in the 10.2 + * section of the NCI 1.1 specification. + * + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/skbuff.h> + +#include "../nfc.h" +#include <net/nfc/nci.h> +#include <net/nfc/nci_core.h> +#include <linux/nfc.h> + +struct nci_data { + u8 conn_id; + u8 pipe; + u8 cmd; + const u8 *data; + u32 data_len; +} __packed; + +struct nci_hci_create_pipe_params { + u8 src_gate; + u8 dest_host; + u8 dest_gate; +} __packed; + +struct nci_hci_create_pipe_resp { + u8 src_host; + u8 src_gate; + u8 dest_host; + u8 dest_gate; + u8 pipe; +} __packed; + +struct nci_hci_delete_pipe_noti { + u8 pipe; +} __packed; + +struct nci_hci_all_pipe_cleared_noti { + u8 host; +} __packed; + +struct nci_hcp_message { + u8 header; /* type -cmd,evt,rsp- + instruction */ + u8 data[]; +} __packed; + +struct nci_hcp_packet { + u8 header; /* cbit+pipe */ + struct nci_hcp_message message; +} __packed; + +#define NCI_HCI_ANY_SET_PARAMETER 0x01 +#define NCI_HCI_ANY_GET_PARAMETER 0x02 +#define NCI_HCI_ANY_CLOSE_PIPE 0x04 + +#define NCI_HFP_NO_CHAINING 0x80 + +#define NCI_NFCEE_ID_HCI 0x80 + +#define NCI_EVT_HOT_PLUG 0x03 + +#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01 + +/* HCP headers */ +#define NCI_HCI_HCP_PACKET_HEADER_LEN 1 +#define NCI_HCI_HCP_MESSAGE_HEADER_LEN 1 +#define NCI_HCI_HCP_HEADER_LEN 2 + +/* HCP types */ +#define NCI_HCI_HCP_COMMAND 0x00 +#define NCI_HCI_HCP_EVENT 0x01 +#define NCI_HCI_HCP_RESPONSE 0x02 + +#define NCI_HCI_ADM_NOTIFY_PIPE_CREATED 0x12 +#define NCI_HCI_ADM_NOTIFY_PIPE_DELETED 0x13 +#define NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15 + +#define NCI_HCI_FRAGMENT 0x7f +#define NCI_HCP_HEADER(type, instr) ((((type) & 0x03) << 6) |\ + ((instr) & 0x3f)) + +#define NCI_HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6) +#define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f) +#define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f) + +/* HCI core */ +static void nci_hci_reset_pipes(struct nci_hci_dev *hdev) +{ + int i; + + for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { + hdev->pipes[i].gate = NCI_HCI_INVALID_GATE; + hdev->pipes[i].host = NCI_HCI_INVALID_HOST; + } + memset(hdev->gate2pipe, NCI_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); +} + +static void nci_hci_reset_pipes_per_host(struct nci_dev *ndev, u8 host) +{ + int i; + + for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { + if (ndev->hci_dev->pipes[i].host == host) { + ndev->hci_dev->pipes[i].gate = NCI_HCI_INVALID_GATE; + ndev->hci_dev->pipes[i].host = NCI_HCI_INVALID_HOST; + } + } +} + +/* Fragment HCI data over NCI packet. + * NFC Forum NCI 10.2.2 Data Exchange: + * The payload of the Data Packets sent on the Logical Connection SHALL be + * valid HCP packets, as defined within [ETSI_102622]. Each Data Packet SHALL + * contain a single HCP packet. NCI Segmentation and Reassembly SHALL NOT be + * applied to Data Messages in either direction. The HCI fragmentation mechanism + * is used if required. + */ +static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe, + const u8 data_type, const u8 *data, + size_t data_len) +{ + struct nci_conn_info *conn_info; + struct sk_buff *skb; + int len, i, r; + u8 cb = pipe; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len + + NCI_DATA_HDR_SIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE); + *skb_push(skb, 1) = data_type; + + i = 0; + len = conn_info->max_pkt_payload_len; + + do { + /* If last packet add NCI_HFP_NO_CHAINING */ + if (i + conn_info->max_pkt_payload_len - + (skb->len + 1) >= data_len) { + cb |= NCI_HFP_NO_CHAINING; + len = data_len - i; + } else { + len = conn_info->max_pkt_payload_len - skb->len - 1; + } + + *skb_push(skb, 1) = cb; + + if (len > 0) + memcpy(skb_put(skb, len), data + i, len); + + r = nci_send_data(ndev, conn_info->conn_id, skb); + if (r < 0) + return r; + + i += len; + if (i < data_len) { + skb_trim(skb, 0); + skb_pull(skb, len); + } + } while (i < data_len); + + return i; +} + +static void nci_hci_send_data_req(struct nci_dev *ndev, unsigned long opt) +{ + struct nci_data *data = (struct nci_data *)opt; + + nci_hci_send_data(ndev, data->pipe, data->cmd, + data->data, data->data_len); +} + +int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, + const u8 *param, size_t param_len) +{ + u8 pipe = ndev->hci_dev->gate2pipe[gate]; + + if (pipe == NCI_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nci_hci_send_data(ndev, pipe, + NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event), + param, param_len); +} +EXPORT_SYMBOL(nci_hci_send_event); + +int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, + struct sk_buff **skb) +{ + struct nci_conn_info *conn_info; + struct nci_data data; + int r; + u8 pipe = ndev->hci_dev->gate2pipe[gate]; + + if (pipe == NCI_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + data.conn_id = conn_info->conn_id; + data.pipe = pipe; + data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, cmd); + data.data = param; + data.data_len = param_len; + + r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); + + if (r == NCI_STATUS_OK) + *skb = conn_info->rx_skb; + + return r; +} +EXPORT_SYMBOL(nci_hci_send_cmd); + +static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe, + u8 event, struct sk_buff *skb) +{ + if (ndev->ops->hci_event_received) + ndev->ops->hci_event_received(ndev, pipe, event, skb); +} + +static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, + u8 cmd, struct sk_buff *skb) +{ + u8 gate = ndev->hci_dev->pipes[pipe].gate; + u8 status = NCI_HCI_ANY_OK | ~NCI_HCI_FRAGMENT; + u8 dest_gate, new_pipe; + struct nci_hci_create_pipe_resp *create_info; + struct nci_hci_delete_pipe_noti *delete_info; + struct nci_hci_all_pipe_cleared_noti *cleared_info; + + pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd); + + switch (cmd) { + case NCI_HCI_ADM_NOTIFY_PIPE_CREATED: + if (skb->len != 5) { + status = NCI_HCI_ANY_E_NOK; + goto exit; + } + create_info = (struct nci_hci_create_pipe_resp *)skb->data; + dest_gate = create_info->dest_gate; + new_pipe = create_info->pipe; + + /* Save the new created pipe and bind with local gate, + * the description for skb->data[3] is destination gate id + * but since we received this cmd from host controller, we + * are the destination and it is our local gate + */ + ndev->hci_dev->gate2pipe[dest_gate] = new_pipe; + ndev->hci_dev->pipes[new_pipe].gate = dest_gate; + ndev->hci_dev->pipes[new_pipe].host = + create_info->src_host; + break; + case NCI_HCI_ANY_OPEN_PIPE: + /* If the pipe is not created report an error */ + if (gate == NCI_HCI_INVALID_GATE) { + status = NCI_HCI_ANY_E_NOK; + goto exit; + } + break; + case NCI_HCI_ADM_NOTIFY_PIPE_DELETED: + if (skb->len != 1) { + status = NCI_HCI_ANY_E_NOK; + goto exit; + } + delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; + + ndev->hci_dev->pipes[delete_info->pipe].gate = + NCI_HCI_INVALID_GATE; + ndev->hci_dev->pipes[delete_info->pipe].host = + NCI_HCI_INVALID_HOST; + break; + case NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED: + if (skb->len != 1) { + status = NCI_HCI_ANY_E_NOK; + goto exit; + } + + cleared_info = + (struct nci_hci_all_pipe_cleared_noti *)skb->data; + nci_hci_reset_pipes_per_host(ndev, cleared_info->host); + break; + default: + pr_debug("Discarded unknown cmd %x to gate %x\n", cmd, gate); + break; + } + + if (ndev->ops->hci_cmd_received) + ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb); + +exit: + nci_hci_send_data(ndev, pipe, status, NULL, 0); + + kfree_skb(skb); +} + +static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe, + u8 result, struct sk_buff *skb) +{ + struct nci_conn_info *conn_info; + u8 status = result; + + if (result != NCI_HCI_ANY_OK) + goto exit; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) { + status = NCI_STATUS_REJECTED; + goto exit; + } + + conn_info->rx_skb = skb; + +exit: + nci_req_complete(ndev, status); +} + +/* Receive hcp message for pipe, with type and cmd. + * skb contains optional message data only. + */ +static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe, + u8 type, u8 instruction, struct sk_buff *skb) +{ + switch (type) { + case NCI_HCI_HCP_RESPONSE: + nci_hci_resp_received(ndev, pipe, instruction, skb); + break; + case NCI_HCI_HCP_COMMAND: + nci_hci_cmd_received(ndev, pipe, instruction, skb); + break; + case NCI_HCI_HCP_EVENT: + nci_hci_event_received(ndev, pipe, instruction, skb); + break; + default: + pr_err("UNKNOWN MSG Type %d, instruction=%d\n", + type, instruction); + kfree_skb(skb); + break; + } + + nci_req_complete(ndev, 0); +} + +static void nci_hci_msg_rx_work(struct work_struct *work) +{ + struct nci_hci_dev *hdev = + container_of(work, struct nci_hci_dev, msg_rx_work); + struct sk_buff *skb; + struct nci_hcp_message *message; + u8 pipe, type, instruction; + + while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) { + pipe = skb->data[0]; + skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN); + message = (struct nci_hcp_message *)skb->data; + type = NCI_HCP_MSG_GET_TYPE(message->header); + instruction = NCI_HCP_MSG_GET_CMD(message->header); + skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); + + nci_hci_hcp_message_rx(hdev->ndev, pipe, + type, instruction, skb); + } +} + +void nci_hci_data_received_cb(void *context, + struct sk_buff *skb, int err) +{ + struct nci_dev *ndev = (struct nci_dev *)context; + struct nci_hcp_packet *packet; + u8 pipe, type, instruction; + struct sk_buff *hcp_skb; + struct sk_buff *frag_skb; + int msg_len; + + pr_debug("\n"); + + if (err) { + nci_req_complete(ndev, err); + return; + } + + packet = (struct nci_hcp_packet *)skb->data; + if ((packet->header & ~NCI_HCI_FRAGMENT) == 0) { + skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); + return; + } + + /* it's the last fragment. Does it need re-aggregation? */ + if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) { + pipe = packet->header & NCI_HCI_FRAGMENT; + skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); + + msg_len = 0; + skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { + msg_len += (frag_skb->len - + NCI_HCI_HCP_PACKET_HEADER_LEN); + } + + hcp_skb = nfc_alloc_recv_skb(NCI_HCI_HCP_PACKET_HEADER_LEN + + msg_len, GFP_KERNEL); + if (!hcp_skb) { + nci_req_complete(ndev, -ENOMEM); + return; + } + + *skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe; + + skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { + msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN; + memcpy(skb_put(hcp_skb, msg_len), frag_skb->data + + NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len); + } + + skb_queue_purge(&ndev->hci_dev->rx_hcp_frags); + } else { + packet->header &= NCI_HCI_FRAGMENT; + hcp_skb = skb; + } + + /* if this is a response, dispatch immediately to + * unblock waiting cmd context. Otherwise, enqueue to dispatch + * in separate context where handler can also execute command. + */ + packet = (struct nci_hcp_packet *)hcp_skb->data; + type = NCI_HCP_MSG_GET_TYPE(packet->message.header); + if (type == NCI_HCI_HCP_RESPONSE) { + pipe = packet->header; + instruction = NCI_HCP_MSG_GET_CMD(packet->message.header); + skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN + + NCI_HCI_HCP_MESSAGE_HEADER_LEN); + nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb); + } else { + skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb); + schedule_work(&ndev->hci_dev->msg_rx_work); + } +} + +int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe) +{ + struct nci_data data; + struct nci_conn_info *conn_info; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + data.conn_id = conn_info->conn_id; + data.pipe = pipe; + data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, + NCI_HCI_ANY_OPEN_PIPE); + data.data = NULL; + data.data_len = 0; + + return nci_request(ndev, nci_hci_send_data_req, + (unsigned long)&data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); +} +EXPORT_SYMBOL(nci_hci_open_pipe); + +int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx, + const u8 *param, size_t param_len) +{ + struct nci_conn_info *conn_info; + struct nci_data data; + int r; + u8 *tmp; + u8 pipe = ndev->hci_dev->gate2pipe[gate]; + + pr_debug("idx=%d to gate %d\n", idx, gate); + + if (pipe == NCI_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + tmp = kmalloc(1 + param_len, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + *tmp = idx; + memcpy(tmp + 1, param, param_len); + + data.conn_id = conn_info->conn_id; + data.pipe = pipe; + data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, + NCI_HCI_ANY_SET_PARAMETER); + data.data = tmp; + data.data_len = param_len + 1; + + r = nci_request(ndev, nci_hci_send_data_req, + (unsigned long)&data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); + + kfree(tmp); + return r; +} +EXPORT_SYMBOL(nci_hci_set_param); + +int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, + struct sk_buff **skb) +{ + struct nci_conn_info *conn_info; + struct nci_data data; + int r; + u8 pipe = ndev->hci_dev->gate2pipe[gate]; + + pr_debug("idx=%d to gate %d\n", idx, gate); + + if (pipe == NCI_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + data.conn_id = conn_info->conn_id; + data.pipe = pipe; + data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, + NCI_HCI_ANY_GET_PARAMETER); + data.data = &idx; + data.data_len = 1; + + r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); + + if (r == NCI_STATUS_OK) + *skb = conn_info->rx_skb; + + return r; +} +EXPORT_SYMBOL(nci_hci_get_param); + +int nci_hci_connect_gate(struct nci_dev *ndev, + u8 dest_host, u8 dest_gate, u8 pipe) +{ + int r; + + if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE) + return 0; + + if (ndev->hci_dev->gate2pipe[dest_gate] != NCI_HCI_INVALID_PIPE) + return -EADDRINUSE; + + if (pipe != NCI_HCI_INVALID_PIPE) + goto open_pipe; + + switch (dest_gate) { + case NCI_HCI_LINK_MGMT_GATE: + pipe = NCI_HCI_LINK_MGMT_PIPE; + break; + case NCI_HCI_ADMIN_GATE: + pipe = NCI_HCI_ADMIN_PIPE; + break; + } + +open_pipe: + r = nci_hci_open_pipe(ndev, pipe); + if (r < 0) + return r; + + ndev->hci_dev->pipes[pipe].gate = dest_gate; + ndev->hci_dev->pipes[pipe].host = dest_host; + ndev->hci_dev->gate2pipe[dest_gate] = pipe; + + return 0; +} +EXPORT_SYMBOL(nci_hci_connect_gate); + +static int nci_hci_dev_connect_gates(struct nci_dev *ndev, + u8 gate_count, + struct nci_hci_gate *gates) +{ + int r; + + while (gate_count--) { + r = nci_hci_connect_gate(ndev, gates->dest_host, + gates->gate, gates->pipe); + if (r < 0) + return r; + gates++; + } + + return 0; +} + +int nci_hci_dev_session_init(struct nci_dev *ndev) +{ + struct nci_conn_info *conn_info; + struct sk_buff *skb; + int r; + + ndev->hci_dev->count_pipes = 0; + ndev->hci_dev->expected_pipes = 0; + + conn_info = ndev->hci_dev->conn_info; + if (!conn_info) + return -EPROTO; + + conn_info->data_exchange_cb = nci_hci_data_received_cb; + conn_info->data_exchange_cb_context = ndev; + + nci_hci_reset_pipes(ndev->hci_dev); + + if (ndev->hci_dev->init_data.gates[0].gate != NCI_HCI_ADMIN_GATE) + return -EPROTO; + + r = nci_hci_connect_gate(ndev, + ndev->hci_dev->init_data.gates[0].dest_host, + ndev->hci_dev->init_data.gates[0].gate, + ndev->hci_dev->init_data.gates[0].pipe); + if (r < 0) + goto exit; + + r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE, + NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb); + if (r < 0) + goto exit; + + if (skb->len && + skb->len == strlen(ndev->hci_dev->init_data.session_id) && + memcmp(ndev->hci_dev->init_data.session_id, + skb->data, skb->len) == 0 && + ndev->ops->hci_load_session) { + /* Restore gate<->pipe table from some proprietary location. */ + r = ndev->ops->hci_load_session(ndev); + if (r < 0) + goto exit; + } else { + r = nci_hci_dev_connect_gates(ndev, + ndev->hci_dev->init_data.gate_count, + ndev->hci_dev->init_data.gates); + if (r < 0) + goto exit; + + r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, + NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, + ndev->hci_dev->init_data.session_id, + strlen(ndev->hci_dev->init_data.session_id)); + } + if (r == 0) + goto exit; + +exit: + kfree_skb(skb); + + return r; +} +EXPORT_SYMBOL(nci_hci_dev_session_init); + +struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev) +{ + struct nci_hci_dev *hdev; + + hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return NULL; + + skb_queue_head_init(&hdev->rx_hcp_frags); + INIT_WORK(&hdev->msg_rx_work, nci_hci_msg_rx_work); + skb_queue_head_init(&hdev->msg_rx_queue); + hdev->ndev = ndev; + + return hdev; +} diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 22e453cb787d..3218071072ac 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -43,6 +43,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; + struct nci_conn_info *conn_info; int i; pr_debug("num_entries %d\n", ntf->num_entries); @@ -59,11 +60,13 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, i, ntf->conn_entries[i].conn_id, ntf->conn_entries[i].credits); - if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { - /* found static rf connection */ - atomic_add(ntf->conn_entries[i].credits, - &ndev->credits_cnt); - } + conn_info = nci_get_conn_info_by_conn_id(ndev, + ntf->conn_entries[i].conn_id); + if (!conn_info) + return; + + atomic_add(ntf->conn_entries[i].credits, + &conn_info->credits_cnt); } /* trigger the next tx */ @@ -96,7 +99,7 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) - nci_data_exchange_complete(ndev, NULL, -EIO); + nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO); } static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, @@ -513,6 +516,7 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev, static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { + struct nci_conn_info *conn_info; struct nci_rf_intf_activated_ntf ntf; __u8 *data = skb->data; int err = NCI_STATUS_OK; @@ -537,6 +541,13 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); + /* If this contains a value of 0x00 (NFCEE Direct RF + * Interface) then all following parameters SHALL contain a + * value of 0 and SHALL be ignored. + */ + if (ntf.rf_interface == NCI_RF_INTERFACE_NFCEE_DIRECT) + goto listen; + if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: @@ -614,11 +625,16 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, exit: if (err == NCI_STATUS_OK) { - ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size; - ndev->initial_num_credits = ntf.initial_num_credits; + conn_info = ndev->rf_conn_info; + if (!conn_info) + return; + + conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size; + conn_info->initial_num_credits = ntf.initial_num_credits; /* set the available credits to initial value */ - atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); + atomic_set(&conn_info->credits_cnt, + conn_info->initial_num_credits); /* store general bytes to be reported later in dep_link_up */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) { @@ -643,6 +659,7 @@ exit: nci_req_complete(ndev, err); } } else { +listen: /* Listen mode */ atomic_set(&ndev->state, NCI_LISTEN_ACTIVE); if (err == NCI_STATUS_OK && @@ -661,10 +678,15 @@ exit: static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { + struct nci_conn_info *conn_info; struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); + conn_info = ndev->rf_conn_info; + if (!conn_info) + return; + /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); @@ -676,7 +698,8 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) - nci_data_exchange_complete(ndev, NULL, -EIO); + nci_data_exchange_complete(ndev, NULL, NCI_STATIC_RF_CONN_ID, + -EIO); switch (ntf->type) { case NCI_DEACTIVATE_TYPE_IDLE_MODE: @@ -696,6 +719,32 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, nci_req_complete(ndev, NCI_STATUS_OK); } +static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + u8 status = NCI_STATUS_OK; + struct nci_nfcee_discover_ntf *nfcee_ntf = + (struct nci_nfcee_discover_ntf *)skb->data; + + pr_debug("\n"); + + /* NFCForum NCI 9.2.1 HCI Network Specific Handling + * If the NFCC supports the HCI Network, it SHALL return one, + * and only one, NFCEE_DISCOVER_NTF with a Protocol type of + * “HCI Access”, even if the HCI Network contains multiple NFCEEs. + */ + ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id; + ndev->cur_id = nfcee_ntf->nfcee_id; + + nci_req_complete(ndev, status); +} + +static void nci_nfcee_action_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + pr_debug("\n"); +} + void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); @@ -734,6 +783,14 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_deactivate_ntf_packet(ndev, skb); break; + case NCI_OP_NFCEE_DISCOVER_NTF: + nci_nfcee_discover_ntf_packet(ndev, skb); + break; + + case NCI_OP_RF_NFCEE_ACTION_NTF: + nci_nfcee_action_ntf_packet(ndev, skb); + break; + default: pr_err("unknown ntf opcode 0x%x\n", ntf_opcode); break; diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c index 041de51ccdbe..02486bc2ceea 100644 --- a/net/nfc/nci/rsp.c +++ b/net/nfc/nci/rsp.c @@ -140,13 +140,31 @@ static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { + struct nci_conn_info *conn_info; __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); - if (status == NCI_STATUS_OK) + if (status == NCI_STATUS_OK) { atomic_set(&ndev->state, NCI_DISCOVERY); + conn_info = ndev->rf_conn_info; + if (!conn_info) { + conn_info = devm_kzalloc(&ndev->nfc_dev->dev, + sizeof(struct nci_conn_info), + GFP_KERNEL); + if (!conn_info) { + status = NCI_STATUS_REJECTED; + goto exit; + } + conn_info->conn_id = NCI_STATIC_RF_CONN_ID; + INIT_LIST_HEAD(&conn_info->list); + list_add(&conn_info->list, &ndev->conn_info_list); + ndev->rf_conn_info = conn_info; + } + } + +exit: nci_req_complete(ndev, status); } @@ -178,6 +196,90 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, } } +static void nci_nfcee_discover_rsp_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + struct nci_nfcee_discover_rsp *discover_rsp; + + if (skb->len != 2) { + nci_req_complete(ndev, NCI_STATUS_NFCEE_PROTOCOL_ERROR); + return; + } + + discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data; + + if (discover_rsp->status != NCI_STATUS_OK || + discover_rsp->num_nfcee == 0) + nci_req_complete(ndev, discover_rsp->status); +} + +static void nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + __u8 status = skb->data[0]; + + pr_debug("status 0x%x\n", status); + nci_req_complete(ndev, status); +} + +static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + __u8 status = skb->data[0]; + struct nci_conn_info *conn_info; + struct nci_core_conn_create_rsp *rsp; + + pr_debug("status 0x%x\n", status); + + if (status == NCI_STATUS_OK) { + rsp = (struct nci_core_conn_create_rsp *)skb->data; + + conn_info = devm_kzalloc(&ndev->nfc_dev->dev, + sizeof(*conn_info), GFP_KERNEL); + if (!conn_info) { + status = NCI_STATUS_REJECTED; + goto exit; + } + + conn_info->id = ndev->cur_id; + conn_info->conn_id = rsp->conn_id; + + /* Note: data_exchange_cb and data_exchange_cb_context need to + * be specify out of nci_core_conn_create_rsp_packet + */ + + INIT_LIST_HEAD(&conn_info->list); + list_add(&conn_info->list, &ndev->conn_info_list); + + if (ndev->cur_id == ndev->hci_dev->nfcee_id) + ndev->hci_dev->conn_info = conn_info; + + conn_info->conn_id = rsp->conn_id; + conn_info->max_pkt_payload_len = rsp->max_ctrl_pkt_payload_len; + atomic_set(&conn_info->credits_cnt, rsp->credits_cnt); + } + +exit: + nci_req_complete(ndev, status); +} + +static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev, + struct sk_buff *skb) +{ + struct nci_conn_info *conn_info; + __u8 status = skb->data[0]; + + pr_debug("status 0x%x\n", status); + if (status == NCI_STATUS_OK) { + conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id); + if (conn_info) { + list_del(&conn_info->list); + devm_kfree(&ndev->nfc_dev->dev, conn_info); + } + } + nci_req_complete(ndev, status); +} + void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 rsp_opcode = nci_opcode(skb->data); @@ -207,6 +309,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_set_config_rsp_packet(ndev, skb); break; + case NCI_OP_CORE_CONN_CREATE_RSP: + nci_core_conn_create_rsp_packet(ndev, skb); + break; + + case NCI_OP_CORE_CONN_CLOSE_RSP: + nci_core_conn_close_rsp_packet(ndev, skb); + break; + case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; @@ -223,6 +333,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_rf_deactivate_rsp_packet(ndev, skb); break; + case NCI_OP_NFCEE_DISCOVER_RSP: + nci_nfcee_discover_rsp_packet(ndev, skb); + break; + + case NCI_OP_NFCEE_MODE_SET_RSP: + nci_nfcee_mode_set_rsp_packet(ndev, skb); + break; + default: pr_err("unknown rsp opcode 0x%x\n", rsp_opcode); break; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index be387e6219a0..14a2d11581da 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -497,6 +497,53 @@ free_msg: return -EMSGSIZE; } +int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, + struct nfc_evt_transaction *evt_transaction) +{ + struct nfc_se *se; + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_EVENT_SE_TRANSACTION); + if (!hdr) + goto free_msg; + + se = nfc_find_se(dev, se_idx); + if (!se) + goto free_msg; + + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || + nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) || + nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len, + evt_transaction->aid) || + nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len, + evt_transaction->params)) + goto nla_put_failure; + + /* evt_transaction is no more used */ + devm_kfree(&dev->dev, evt_transaction); + + genlmsg_end(msg, hdr); + + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + /* evt_transaction is no more used */ + devm_kfree(&dev->dev, evt_transaction); + nlmsg_free(msg); + return -EMSGSIZE; +} + static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index 88d60064890e..a8ce80b47720 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -100,6 +100,8 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list); int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type); int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx); +int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, + struct nfc_evt_transaction *evt_transaction); struct nfc_dev *nfc_get_device(unsigned int idx); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b4cffe686126..b491c1c296fe 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -185,10 +185,15 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, return 0; } -static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, - const __be32 *mpls_lse) +/* 'KEY' must not have any bits set outside of the 'MASK' */ +#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK))) +#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK)) + +static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, + const __be32 *mpls_lse, const __be32 *mask) { __be32 *stack; + __be32 lse; int err; err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); @@ -196,14 +201,16 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, return err; stack = (__be32 *)skb_mpls_header(skb); + lse = MASKED(*stack, *mpls_lse, *mask); if (skb->ip_summed == CHECKSUM_COMPLETE) { - __be32 diff[] = { ~(*stack), *mpls_lse }; + __be32 diff[] = { ~(*stack), lse }; + skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum); } - *stack = *mpls_lse; - key->mpls.top_lse = *mpls_lse; + *stack = lse; + flow_key->mpls.top_lse = lse; return 0; } @@ -230,23 +237,39 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); } -static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ethernet *eth_key) +/* 'src' is already properly masked. */ +static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_) +{ + u16 *dst = (u16 *)dst_; + const u16 *src = (const u16 *)src_; + const u16 *mask = (const u16 *)mask_; + + SET_MASKED(dst[0], src[0], mask[0]); + SET_MASKED(dst[1], src[1], mask[1]); + SET_MASKED(dst[2], src[2], mask[2]); +} + +static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_ethernet *key, + const struct ovs_key_ethernet *mask) { int err; + err = skb_ensure_writable(skb, ETH_HLEN); if (unlikely(err)) return err; skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); - ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); - ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); + ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, + mask->eth_src); + ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, + mask->eth_dst); ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); - ether_addr_copy(key->eth.src, eth_key->eth_src); - ether_addr_copy(key->eth.dst, eth_key->eth_dst); + ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source); + ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest); return 0; } @@ -304,6 +327,15 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, } } +static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4], + const __be32 mask[4], __be32 masked[4]) +{ + masked[0] = MASKED(old[0], addr[0], mask[0]); + masked[1] = MASKED(old[1], addr[1], mask[1]); + masked[2] = MASKED(old[2], addr[2], mask[2]); + masked[3] = MASKED(old[3], addr[3], mask[3]); +} + static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, __be32 addr[4], const __be32 new_addr[4], bool recalculate_csum) @@ -315,29 +347,29 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) +static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) { - nh->priority = tc >> 4; - nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); + /* Bits 21-24 are always unmasked, so this retains their values. */ + SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); + SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); + SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); } -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) +static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, + u8 mask) { - nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; - nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; - nh->flow_lbl[2] = fl & 0x000000FF; -} + new_ttl = MASKED(nh->ttl, new_ttl, mask); -static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) -{ csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); nh->ttl = new_ttl; } -static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ipv4 *ipv4_key) +static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_ipv4 *key, + const struct ovs_key_ipv4 *mask) { struct iphdr *nh; + __be32 new_addr; int err; err = skb_ensure_writable(skb, skb_network_offset(skb) + @@ -347,36 +379,49 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, nh = ip_hdr(skb); - if (ipv4_key->ipv4_src != nh->saddr) { - set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); - key->ipv4.addr.src = ipv4_key->ipv4_src; - } + /* Setting an IP addresses is typically only a side effect of + * matching on them in the current userspace implementation, so it + * makes sense to check if the value actually changed. + */ + if (mask->ipv4_src) { + new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src); - if (ipv4_key->ipv4_dst != nh->daddr) { - set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); - key->ipv4.addr.dst = ipv4_key->ipv4_dst; + if (unlikely(new_addr != nh->saddr)) { + set_ip_addr(skb, nh, &nh->saddr, new_addr); + flow_key->ipv4.addr.src = new_addr; + } } + if (mask->ipv4_dst) { + new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst); - if (ipv4_key->ipv4_tos != nh->tos) { - ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); - key->ip.tos = nh->tos; + if (unlikely(new_addr != nh->daddr)) { + set_ip_addr(skb, nh, &nh->daddr, new_addr); + flow_key->ipv4.addr.dst = new_addr; + } } - - if (ipv4_key->ipv4_ttl != nh->ttl) { - set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); - key->ip.ttl = ipv4_key->ipv4_ttl; + if (mask->ipv4_tos) { + ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos); + flow_key->ip.tos = nh->tos; + } + if (mask->ipv4_ttl) { + set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); + flow_key->ip.ttl = nh->ttl; } return 0; } -static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ipv6 *ipv6_key) +static bool is_ipv6_mask_nonzero(const __be32 addr[4]) +{ + return !!(addr[0] | addr[1] | addr[2] | addr[3]); +} + +static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_ipv6 *key, + const struct ovs_key_ipv6 *mask) { struct ipv6hdr *nh; int err; - __be32 *saddr; - __be32 *daddr; err = skb_ensure_writable(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr)); @@ -384,71 +429,77 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, return err; nh = ipv6_hdr(skb); - saddr = (__be32 *)&nh->saddr; - daddr = (__be32 *)&nh->daddr; - - if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) { - set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, - ipv6_key->ipv6_src, true); - memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src, - sizeof(ipv6_key->ipv6_src)); - } - if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { + /* Setting an IP addresses is typically only a side effect of + * matching on them in the current userspace implementation, so it + * makes sense to check if the value actually changed. + */ + if (is_ipv6_mask_nonzero(mask->ipv6_src)) { + __be32 *saddr = (__be32 *)&nh->saddr; + __be32 masked[4]; + + mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); + + if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { + set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, + true); + memcpy(&flow_key->ipv6.addr.src, masked, + sizeof(flow_key->ipv6.addr.src)); + } + } + if (is_ipv6_mask_nonzero(mask->ipv6_dst)) { unsigned int offset = 0; int flags = IP6_FH_F_SKIP_RH; bool recalc_csum = true; - - if (ipv6_ext_hdr(nh->nexthdr)) - recalc_csum = ipv6_find_hdr(skb, &offset, - NEXTHDR_ROUTING, NULL, - &flags) != NEXTHDR_ROUTING; - - set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, - ipv6_key->ipv6_dst, recalc_csum); - memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst, - sizeof(ipv6_key->ipv6_dst)); + __be32 *daddr = (__be32 *)&nh->daddr; + __be32 masked[4]; + + mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked); + + if (unlikely(memcmp(daddr, masked, sizeof(masked)))) { + if (ipv6_ext_hdr(nh->nexthdr)) + recalc_csum = (ipv6_find_hdr(skb, &offset, + NEXTHDR_ROUTING, + NULL, &flags) + != NEXTHDR_ROUTING); + + set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, + recalc_csum); + memcpy(&flow_key->ipv6.addr.dst, masked, + sizeof(flow_key->ipv6.addr.dst)); + } + } + if (mask->ipv6_tclass) { + ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); + flow_key->ip.tos = ipv6_get_dsfield(nh); + } + if (mask->ipv6_label) { + set_ipv6_fl(nh, ntohl(key->ipv6_label), + ntohl(mask->ipv6_label)); + flow_key->ipv6.label = + *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); + } + if (mask->ipv6_hlimit) { + SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit); + flow_key->ip.ttl = nh->hop_limit; } - - set_ipv6_tc(nh, ipv6_key->ipv6_tclass); - key->ip.tos = ipv6_get_dsfield(nh); - - set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); - key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); - - nh->hop_limit = ipv6_key->ipv6_hlimit; - key->ip.ttl = ipv6_key->ipv6_hlimit; return 0; } /* Must follow skb_ensure_writable() since that can move the skb data. */ static void set_tp_port(struct sk_buff *skb, __be16 *port, - __be16 new_port, __sum16 *check) + __be16 new_port, __sum16 *check) { inet_proto_csum_replace2(check, skb, *port, new_port, 0); *port = new_port; - skb_clear_hash(skb); -} - -static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) -{ - struct udphdr *uh = udp_hdr(skb); - - if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { - set_tp_port(skb, port, new_port, &uh->check); - - if (!uh->check) - uh->check = CSUM_MANGLED_0; - } else { - *port = new_port; - skb_clear_hash(skb); - } } -static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_udp *udp_port_key) +static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_udp *key, + const struct ovs_key_udp *mask) { struct udphdr *uh; + __be16 src, dst; int err; err = skb_ensure_writable(skb, skb_transport_offset(skb) + @@ -457,23 +508,40 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, return err; uh = udp_hdr(skb); - if (udp_port_key->udp_src != uh->source) { - set_udp_port(skb, &uh->source, udp_port_key->udp_src); - key->tp.src = udp_port_key->udp_src; - } + /* Either of the masks is non-zero, so do not bother checking them. */ + src = MASKED(uh->source, key->udp_src, mask->udp_src); + dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst); - if (udp_port_key->udp_dst != uh->dest) { - set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); - key->tp.dst = udp_port_key->udp_dst; + if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { + if (likely(src != uh->source)) { + set_tp_port(skb, &uh->source, src, &uh->check); + flow_key->tp.src = src; + } + if (likely(dst != uh->dest)) { + set_tp_port(skb, &uh->dest, dst, &uh->check); + flow_key->tp.dst = dst; + } + + if (unlikely(!uh->check)) + uh->check = CSUM_MANGLED_0; + } else { + uh->source = src; + uh->dest = dst; + flow_key->tp.src = src; + flow_key->tp.dst = dst; } + skb_clear_hash(skb); + return 0; } -static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_tcp *tcp_port_key) +static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_tcp *key, + const struct ovs_key_tcp *mask) { struct tcphdr *th; + __be16 src, dst; int err; err = skb_ensure_writable(skb, skb_transport_offset(skb) + @@ -482,50 +550,49 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, return err; th = tcp_hdr(skb); - if (tcp_port_key->tcp_src != th->source) { - set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); - key->tp.src = tcp_port_key->tcp_src; + src = MASKED(th->source, key->tcp_src, mask->tcp_src); + if (likely(src != th->source)) { + set_tp_port(skb, &th->source, src, &th->check); + flow_key->tp.src = src; } - - if (tcp_port_key->tcp_dst != th->dest) { - set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); - key->tp.dst = tcp_port_key->tcp_dst; + dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst); + if (likely(dst != th->dest)) { + set_tp_port(skb, &th->dest, dst, &th->check); + flow_key->tp.dst = dst; } + skb_clear_hash(skb); return 0; } -static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_sctp *sctp_port_key) +static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, + const struct ovs_key_sctp *key, + const struct ovs_key_sctp *mask) { + unsigned int sctphoff = skb_transport_offset(skb); struct sctphdr *sh; + __le32 old_correct_csum, new_csum, old_csum; int err; - unsigned int sctphoff = skb_transport_offset(skb); err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); if (unlikely(err)) return err; sh = sctp_hdr(skb); - if (sctp_port_key->sctp_src != sh->source || - sctp_port_key->sctp_dst != sh->dest) { - __le32 old_correct_csum, new_csum, old_csum; + old_csum = sh->checksum; + old_correct_csum = sctp_compute_cksum(skb, sctphoff); - old_csum = sh->checksum; - old_correct_csum = sctp_compute_cksum(skb, sctphoff); + sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src); + sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst); - sh->source = sctp_port_key->sctp_src; - sh->dest = sctp_port_key->sctp_dst; + new_csum = sctp_compute_cksum(skb, sctphoff); - new_csum = sctp_compute_cksum(skb, sctphoff); + /* Carry any checksum errors through. */ + sh->checksum = old_csum ^ old_correct_csum ^ new_csum; - /* Carry any checksum errors through. */ - sh->checksum = old_csum ^ old_correct_csum ^ new_csum; - - skb_clear_hash(skb); - key->tp.src = sctp_port_key->sctp_src; - key->tp.dst = sctp_port_key->sctp_dst; - } + skb_clear_hash(skb); + flow_key->tp.src = sh->source; + flow_key->tp.dst = sh->dest; return 0; } @@ -653,52 +720,77 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, key->ovs_flow_hash = hash; } -static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key, - const struct nlattr *nested_attr) +static int execute_set_action(struct sk_buff *skb, + struct sw_flow_key *flow_key, + const struct nlattr *a) +{ + /* Only tunnel set execution is supported without a mask. */ + if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) { + OVS_CB(skb)->egress_tun_info = nla_data(a); + return 0; + } + + return -EINVAL; +} + +/* Mask is at the midpoint of the data. */ +#define get_mask(a, type) ((const type)nla_data(a) + 1) + +static int execute_masked_set_action(struct sk_buff *skb, + struct sw_flow_key *flow_key, + const struct nlattr *a) { int err = 0; - switch (nla_type(nested_attr)) { + switch (nla_type(a)) { case OVS_KEY_ATTR_PRIORITY: - skb->priority = nla_get_u32(nested_attr); - key->phy.priority = skb->priority; + SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *)); + flow_key->phy.priority = skb->priority; break; case OVS_KEY_ATTR_SKB_MARK: - skb->mark = nla_get_u32(nested_attr); - key->phy.skb_mark = skb->mark; + SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); + flow_key->phy.skb_mark = skb->mark; break; case OVS_KEY_ATTR_TUNNEL_INFO: - OVS_CB(skb)->egress_tun_info = nla_data(nested_attr); + /* Masked data not supported for tunnel. */ + err = -EINVAL; break; case OVS_KEY_ATTR_ETHERNET: - err = set_eth_addr(skb, key, nla_data(nested_attr)); + err = set_eth_addr(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_ethernet *)); break; case OVS_KEY_ATTR_IPV4: - err = set_ipv4(skb, key, nla_data(nested_attr)); + err = set_ipv4(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_ipv4 *)); break; case OVS_KEY_ATTR_IPV6: - err = set_ipv6(skb, key, nla_data(nested_attr)); + err = set_ipv6(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_ipv6 *)); break; case OVS_KEY_ATTR_TCP: - err = set_tcp(skb, key, nla_data(nested_attr)); + err = set_tcp(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_tcp *)); break; case OVS_KEY_ATTR_UDP: - err = set_udp(skb, key, nla_data(nested_attr)); + err = set_udp(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_udp *)); break; case OVS_KEY_ATTR_SCTP: - err = set_sctp(skb, key, nla_data(nested_attr)); + err = set_sctp(skb, flow_key, nla_data(a), + get_mask(a, struct ovs_key_sctp *)); break; case OVS_KEY_ATTR_MPLS: - err = set_mpls(skb, key, nla_data(nested_attr)); + err = set_mpls(skb, flow_key, nla_data(a), get_mask(a, + __be32 *)); break; } @@ -818,6 +910,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = execute_set_action(skb, key, nla_data(a)); break; + case OVS_ACTION_ATTR_SET_MASKED: + case OVS_ACTION_ATTR_SET_TO_MASKED: + err = execute_masked_set_action(skb, key, nla_data(a)); + break; + case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, key, a); break; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 8b9a612b39d1..993281e6278d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1695,16 +1695,6 @@ static int validate_and_copy_sample(const struct nlattr *attr, return 0; } -static int validate_tp_port(const struct sw_flow_key *flow_key, - __be16 eth_type) -{ - if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) && - (flow_key->tp.src || flow_key->tp.dst)) - return 0; - - return -EINVAL; -} - void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, struct sw_flow_mask *mask) @@ -1805,23 +1795,45 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, return err; } +/* Return false if there are any non-masked bits set. + * Mask follows data immediately, before any netlink padding. + */ +static bool validate_masked(u8 *data, int len) +{ + u8 *mask = data + len; + + while (len--) + if (*data++ & ~*mask++) + return false; + + return true; +} + static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key, struct sw_flow_actions **sfa, - bool *set_tun, __be16 eth_type, bool log) + bool *skip_copy, __be16 eth_type, bool masked, bool log) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); + size_t key_len; /* There can be only one key in a action */ if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; + key_len = nla_len(ovs_key); + if (masked) + key_len /= 2; + if (key_type > OVS_KEY_ATTR_MAX || - (ovs_key_lens[key_type].len != nla_len(ovs_key) && + (ovs_key_lens[key_type].len != key_len && ovs_key_lens[key_type].len != OVS_ATTR_NESTED)) return -EINVAL; + if (masked && !validate_masked(nla_data(ovs_key), key_len)) + return -EINVAL; + switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; const struct ovs_key_ipv6 *ipv6_key; @@ -1836,7 +1848,10 @@ static int validate_set(const struct nlattr *a, if (eth_p_mpls(eth_type)) return -EINVAL; - *set_tun = true; + if (masked) + return -EINVAL; /* Masked tunnel set not supported. */ + + *skip_copy = true; err = validate_and_copy_set_tun(a, sfa, log); if (err) return err; @@ -1846,48 +1861,66 @@ static int validate_set(const struct nlattr *a, if (eth_type != htons(ETH_P_IP)) return -EINVAL; - if (!flow_key->ip.proto) - return -EINVAL; - ipv4_key = nla_data(ovs_key); - if (ipv4_key->ipv4_proto != flow_key->ip.proto) - return -EINVAL; - if (ipv4_key->ipv4_frag != flow_key->ip.frag) - return -EINVAL; + if (masked) { + const struct ovs_key_ipv4 *mask = ipv4_key + 1; + /* Non-writeable fields. */ + if (mask->ipv4_proto || mask->ipv4_frag) + return -EINVAL; + } else { + if (ipv4_key->ipv4_proto != flow_key->ip.proto) + return -EINVAL; + + if (ipv4_key->ipv4_frag != flow_key->ip.frag) + return -EINVAL; + } break; case OVS_KEY_ATTR_IPV6: if (eth_type != htons(ETH_P_IPV6)) return -EINVAL; - if (!flow_key->ip.proto) - return -EINVAL; - ipv6_key = nla_data(ovs_key); - if (ipv6_key->ipv6_proto != flow_key->ip.proto) - return -EINVAL; - if (ipv6_key->ipv6_frag != flow_key->ip.frag) - return -EINVAL; + if (masked) { + const struct ovs_key_ipv6 *mask = ipv6_key + 1; + + /* Non-writeable fields. */ + if (mask->ipv6_proto || mask->ipv6_frag) + return -EINVAL; + + /* Invalid bits in the flow label mask? */ + if (ntohl(mask->ipv6_label) & 0xFFF00000) + return -EINVAL; + } else { + if (ipv6_key->ipv6_proto != flow_key->ip.proto) + return -EINVAL; + if (ipv6_key->ipv6_frag != flow_key->ip.frag) + return -EINVAL; + } if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) return -EINVAL; break; case OVS_KEY_ATTR_TCP: - if (flow_key->ip.proto != IPPROTO_TCP) + if ((eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6)) || + flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; - return validate_tp_port(flow_key, eth_type); + break; case OVS_KEY_ATTR_UDP: - if (flow_key->ip.proto != IPPROTO_UDP) + if ((eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6)) || + flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; - return validate_tp_port(flow_key, eth_type); + break; case OVS_KEY_ATTR_MPLS: if (!eth_p_mpls(eth_type)) @@ -1895,15 +1928,45 @@ static int validate_set(const struct nlattr *a, break; case OVS_KEY_ATTR_SCTP: - if (flow_key->ip.proto != IPPROTO_SCTP) + if ((eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6)) || + flow_key->ip.proto != IPPROTO_SCTP) return -EINVAL; - return validate_tp_port(flow_key, eth_type); + break; default: return -EINVAL; } + /* Convert non-masked non-tunnel set actions to masked set actions. */ + if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) { + int start, len = key_len * 2; + struct nlattr *at; + + *skip_copy = true; + + start = add_nested_action_start(sfa, + OVS_ACTION_ATTR_SET_TO_MASKED, + log); + if (start < 0) + return start; + + at = __add_action(sfa, key_type, NULL, len, log); + if (IS_ERR(at)) + return PTR_ERR(at); + + memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */ + memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */ + /* Clear non-writeable bits from otherwise writeable fields. */ + if (key_type == OVS_KEY_ATTR_IPV6) { + struct ovs_key_ipv6 *mask = nla_data(at) + key_len; + + mask->ipv6_label &= htonl(0x000FFFFF); + } + add_nested_action_end(*sfa, start); + } + return 0; } @@ -1965,6 +2028,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, + [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1, [OVS_ACTION_ATTR_SAMPLE] = (u32)-1, [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash) }; @@ -2060,7 +2124,14 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, case OVS_ACTION_ATTR_SET: err = validate_set(a, key, sfa, - &skip_copy, eth_type, log); + &skip_copy, eth_type, false, log); + if (err) + return err; + break; + + case OVS_ACTION_ATTR_SET_MASKED: + err = validate_set(a, key, sfa, + &skip_copy, eth_type, true, log); if (err) return err; break; @@ -2090,6 +2161,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, return 0; } +/* 'key' must be the masked key. */ int ovs_nla_copy_actions(const struct nlattr *attr, const struct sw_flow_key *key, struct sw_flow_actions **sfa, bool log) @@ -2177,6 +2249,21 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) return 0; } +static int masked_set_action_to_set_action_attr(const struct nlattr *a, + struct sk_buff *skb) +{ + const struct nlattr *ovs_key = nla_data(a); + size_t key_len = nla_len(ovs_key) / 2; + + /* Revert the conversion we did from a non-masked set action to + * masked set action. + */ + if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key)) + return -EMSGSIZE; + + return 0; +} + int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) { const struct nlattr *a; @@ -2192,6 +2279,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) return err; break; + case OVS_ACTION_ATTR_SET_TO_MASKED: + err = masked_set_action_to_set_action_attr(a, skb); + if (err) + return err; + break; + case OVS_ACTION_ATTR_SAMPLE: err = sample_action_to_attr(a, skb); if (err) diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 1dde91e3dc70..bd3825d38abc 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -409,7 +409,7 @@ try_again: posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); - rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", + rdsdebug("wanted=%u credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ @@ -453,7 +453,7 @@ void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) if (credits == 0) return; - rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n", + rdsdebug("credits=%u current=%u%s\n", credits, IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a91e1db62ee6..a6c2bea9f8f9 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -590,8 +590,8 @@ void rds_iw_conn_shutdown(struct rds_connection *conn) /* Actually this may happen quite frequently, when * an outgoing connect raced with an incoming connect. */ - rdsdebug("rds_iw_conn_shutdown: failed to disconnect," - " cm: %p err %d\n", ic->i_cm_id, err); + rdsdebug("failed to disconnect, cm: %p err %d\n", + ic->i_cm_id, err); } if (ic->i_cm_id->qp) { diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 9105ea03aec5..13834780a308 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c @@ -361,7 +361,7 @@ try_again: posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); - rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n", + rdsdebug("wanted=%u credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ @@ -405,7 +405,7 @@ void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits) if (credits == 0) return; - rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n", + rdsdebug("credits=%u current=%u%s\n", credits, IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); diff --git a/net/rds/message.c b/net/rds/message.c index 5a21e6f5986f..756c73729126 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -266,7 +266,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from) { - unsigned long to_copy; + unsigned long to_copy, nbytes; unsigned long sg_off; struct scatterlist *sg; int ret = 0; @@ -293,9 +293,9 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from) sg->length - sg_off); rds_stats_add(s_copy_from_user, to_copy); - ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, - to_copy, from); - if (ret != to_copy) + nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, + to_copy, from); + if (nbytes != to_copy) return -EFAULT; sg_off += to_copy; |