aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c55
-rw-r--r--drivers/net/ethernet/lantiq_etop.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c24
-rw-r--r--drivers/net/ipa/ipa_endpoint.c5
-rw-r--r--drivers/net/tun.c5
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/syscall.c57
-rw-r--r--kernel/bpf/verifier.c27
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/bpf_tcp_ca.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/udp.c11
-rw-r--r--net/smc/af_smc.c14
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/tipc/crypto.c8
-rw-r--r--net/tipc/link.c7
-rw-r--r--net/xdp/xsk_buff_pool.c7
-rw-r--r--samples/bpf/hbm_kern.h2
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c5
-rw-r--r--samples/bpf/xdp_sample_user.c28
-rw-r--r--tools/bpf/runqslower/Makefile3
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h4
-rw-r--r--tools/lib/bpf/gen_loader.c47
-rw-r--r--tools/lib/bpf/libbpf.c4
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c123
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_restricted.c196
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c34
41 files changed, 735 insertions, 118 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index fc0e66006644..3f1704cbe1cb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
if (fw.len == 0xFFFFU) {
+ if (sw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err = hw_atl_utils_fw_rpc_call(self, sw.len);
if (err < 0)
goto err_exit;
@@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (rpc) {
if (fw.len) {
+ if (fw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err =
hw_atl_utils_fw_downld_dwords(self,
self->rpc_addr,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 1835d2e451c0..fc7fce642666 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
- struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+ struct ilt_client_info *ilt_cli;
if (!ilt || !ilt->lines)
return -1;
+ ilt_cli = &ilt->clients[cli_num];
+
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 5c464ea73576..6fe9e9b59f83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
break;
default:
- netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
+ netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
break;
}
@@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
- if (BNXT_PF(bp) && bp->pf.active_vfs) {
+ rtnl_lock();
+ if (BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg)) {
NL_SET_ERR_MSG_MOD(extack,
- "reload is unsupported when VFs are allocated");
+ "reload is unsupported while VFs are allocated or being configured");
+ rtnl_unlock();
return -EOPNOTSUPP;
}
- rtnl_lock();
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index e6a4a768b10b..1471b6130a2b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
struct flow_cls_offload *flower = type_data;
struct bnxt *bp = priv->bp;
- if (flower->common.chain_index)
+ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e6e7c1da47fb..75635bd57cf6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -39,6 +39,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 5a359a0a20ec..144a77679359 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1776,6 +1776,7 @@ static int iavf_set_channels(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 num_req = ch->combined_count;
+ int i;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -1786,7 +1787,7 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req > adapter->vsi_res->num_queue_pairs)
+ if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
if (num_req == adapter->num_active_queues)
@@ -1798,6 +1799,20 @@ static int iavf_set_channels(struct net_device *netdev,
adapter->num_req_queues = num_req;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_schedule_reset(adapter);
+
+ /* wait for the reset is done */
+ for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
+ msleep(IAVF_RESET_WAIT_MS);
+ if (adapter->flags & IAVF_FLAG_RESET_PENDING)
+ continue;
+ break;
+ }
+ if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_active_queues = num_req;
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -1844,14 +1859,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
- return 0;
-
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ if (key)
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ if (indir)
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 847d67e32a54..336e6bf95e48 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -697,6 +697,23 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
}
/**
+ * iavf_restore_filters
+ * @adapter: board private structure
+ *
+ * Restore existing non MAC filters when VF netdev comes back up
+ **/
+static void iavf_restore_filters(struct iavf_adapter *adapter)
+{
+ /* re-add all VLAN filters */
+ if (VLAN_ALLOWED(adapter)) {
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, vid);
+ }
+}
+
+/**
* iavf_vlan_rx_add_vid - Add a VLAN filter to a device
* @netdev: network device struct
* @proto: unused protocol data
@@ -709,8 +726,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
if (!VLAN_ALLOWED(adapter))
return -EIO;
+
if (iavf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
+
+ set_bit(vid, adapter->vsi.active_vlans);
return 0;
}
@@ -725,11 +745,13 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (VLAN_ALLOWED(adapter)) {
- iavf_del_vlan(adapter, vid);
- return 0;
- }
- return -EIO;
+ if (!VLAN_ALLOWED(adapter))
+ return -EIO;
+
+ iavf_del_vlan(adapter, vid);
+ clear_bit(vid, adapter->vsi.active_vlans);
+
+ return 0;
}
/**
@@ -1639,8 +1661,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
return 0;
}
-
- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
+ if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
(adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
iavf_set_promiscuous(adapter, 0);
return 0;
@@ -2123,8 +2144,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- iavf_free_queues(adapter);
iavf_free_q_vectors(adapter);
+ iavf_free_queues(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
@@ -2410,7 +2431,7 @@ static void iavf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
- if (val == 0xdeadbeef) /* indicates device in reset */
+ if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
goto freedom;
oldval = val;
if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
@@ -3095,8 +3116,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
return -ENOMEM;
while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0)
- goto err;
+ if (--count == 0) {
+ kfree(filter);
+ return err;
+ }
udelay(1);
}
@@ -3107,11 +3130,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
/* start out with flow type and eth type IPv4 to begin with */
filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
err = iavf_parse_cls_flower(adapter, cls_flower, filter);
- if (err < 0)
+ if (err)
goto err;
err = iavf_handle_tclass(adapter, tc, filter);
- if (err < 0)
+ if (err)
goto err;
/* add filter to the list */
@@ -3308,6 +3331,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* Restore VLAN filters that were removed with IFF_DOWN */
+ iavf_restore_filters(adapter);
+
iavf_configure(adapter);
iavf_up_complete(adapter);
@@ -3503,7 +3529,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ if (adapter->vf_res &&
+ !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6433c909c6b2..072391c494ce 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <asm/checksum.h>
@@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
+ int err;
ltq_pmu_enable(PMU_PPE);
@@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev)
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
- request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+ err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+ if (err) {
+ netdev_err(dev,
+ "Unable to get Tx DMA IRQ %d\n",
+ irq);
+ return err;
+ }
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
- request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+ err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+ if (err) {
+ netdev_err(dev,
+ "Unable to get Rx DMA IRQ %d\n",
+ irq);
+ return err;
+ }
}
ch->dma.irq = irq;
}
@@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = {
},
};
-int __init
+static int __init
init_ltq_etop(void)
{
int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 85208128f135..b7c2579c963b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
- socfpga_dwmac_resume);
+static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_bus_clks_config(priv, false);
+
+ return 0;
+}
+
+static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return stmmac_bus_clks_config(priv, true);
+}
+
+static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
+ SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
+};
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 5528d97110d5..ef790fd0ab56 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 offset;
u32 val;
+ /* This should only be changed when HOL_BLOCK_EN is disabled */
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
val = hol_block_timer_val(ipa, microseconds);
iowrite32(val, ipa->reg_virt + offset);
@@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
val = enable ? HOL_BLOCK_EN_FMASK : 0;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
iowrite32(val, endpoint->ipa->reg_virt + offset);
+ /* When enabling, the register must be written twice for IPA v4.5+ */
+ if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
}
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
@@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
+ ipa_endpoint_init_hol_block_enable(endpoint, false);
ipa_endpoint_init_hol_block_timer(endpoint, 0);
ipa_endpoint_init_hol_block_enable(endpoint, true);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fecc9a1d293a..1572878c3403 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
+ struct netdev_queue *queue;
struct tun_file *tfile;
int len = skb->len;
@@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
+ /* NETIF_F_LLTX requires to do our own update of trans_start */
+ queue = netdev_get_tx_queue(dev, txq);
+ queue->trans_start = jiffies;
+
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f715e8863f4d..e7a163a3146b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -193,7 +193,7 @@ struct bpf_map {
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
- u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
+ atomic64_t writecnt;
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -1419,6 +1419,7 @@ void bpf_map_put(struct bpf_map *map);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 686a666d073d..c8cb7e697d47 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4226,7 +4226,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
return;
}
- if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 2ca643af9a54..43eb3501721b 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sysctl_get_new_value_proto;
case BPF_FUNC_sysctl_set_new_value:
return &bpf_sysctl_set_new_value_proto;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
default:
return cgroup_base_func_proto(func_id, prog);
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1ffd469c217f..649f07623df6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto;
- case BPF_FUNC_ktime_get_coarse_ns:
- return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_ringbuf_output:
return &bpf_ringbuf_output_proto;
case BPF_FUNC_ringbuf_reserve:
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 50f96ea4452a..1033ee8c0caf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -132,6 +132,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map;
}
+static void bpf_map_write_active_inc(struct bpf_map *map)
+{
+ atomic64_inc(&map->writecnt);
+}
+
+static void bpf_map_write_active_dec(struct bpf_map *map)
+{
+ atomic64_dec(&map->writecnt);
+}
+
+bool bpf_map_write_active(const struct bpf_map *map)
+{
+ return atomic64_read(&map->writecnt) != 0;
+}
+
static u32 bpf_map_value_size(const struct bpf_map *map)
{
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -601,11 +616,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
- if (vma->vm_flags & VM_MAYWRITE) {
- mutex_lock(&map->freeze_mutex);
- map->writecnt++;
- mutex_unlock(&map->freeze_mutex);
- }
+ if (vma->vm_flags & VM_MAYWRITE)
+ bpf_map_write_active_inc(map);
}
/* called for all unmapped memory region (including initial) */
@@ -613,11 +625,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
{
struct bpf_map *map = vma->vm_file->private_data;
- if (vma->vm_flags & VM_MAYWRITE) {
- mutex_lock(&map->freeze_mutex);
- map->writecnt--;
- mutex_unlock(&map->freeze_mutex);
- }
+ if (vma->vm_flags & VM_MAYWRITE)
+ bpf_map_write_active_dec(map);
}
static const struct vm_operations_struct bpf_map_default_vmops = {
@@ -668,7 +677,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
goto out;
if (vma->vm_flags & VM_MAYWRITE)
- map->writecnt++;
+ bpf_map_write_active_inc(map);
out:
mutex_unlock(&map->freeze_mutex);
return err;
@@ -1139,6 +1148,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
+ bpf_map_write_active_inc(map);
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
@@ -1174,6 +1184,7 @@ free_value:
free_key:
kvfree(key);
err_put:
+ bpf_map_write_active_dec(map);
fdput(f);
return err;
}
@@ -1196,6 +1207,7 @@ static int map_delete_elem(union bpf_attr *attr)
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
+ bpf_map_write_active_inc(map);
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
@@ -1226,6 +1238,7 @@ static int map_delete_elem(union bpf_attr *attr)
out:
kvfree(key);
err_put:
+ bpf_map_write_active_dec(map);
fdput(f);
return err;
}
@@ -1533,6 +1546,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
+ bpf_map_write_active_inc(map);
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
err = -EPERM;
@@ -1597,6 +1611,7 @@ free_value:
free_key:
kvfree(key);
err_put:
+ bpf_map_write_active_dec(map);
fdput(f);
return err;
}
@@ -1624,8 +1639,7 @@ static int map_freeze(const union bpf_attr *attr)
}
mutex_lock(&map->freeze_mutex);
-
- if (map->writecnt) {
+ if (bpf_map_write_active(map)) {
err = -EBUSY;
goto err_put;
}
@@ -4171,6 +4185,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
union bpf_attr __user *uattr,
int cmd)
{
+ bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
+ cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
+ bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
struct bpf_map *map;
int err, ufd;
struct fd f;
@@ -4183,16 +4200,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
-
- if ((cmd == BPF_MAP_LOOKUP_BATCH ||
- cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
- !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
+ if (has_write)
+ bpf_map_write_active_inc(map);
+ if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
err = -EPERM;
goto err_put;
}
-
- if (cmd != BPF_MAP_LOOKUP_BATCH &&
- !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
+ if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
}
@@ -4205,8 +4219,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
BPF_DO_BATCH(map->ops->map_update_batch);
else
BPF_DO_BATCH(map->ops->map_delete_batch);
-
err_put:
+ if (has_write)
+ bpf_map_write_active_dec(map);
fdput(f);
return err;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 890b3ec375a3..50efda51515b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1151,7 +1151,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
/* transfer reg's id which is unique for every map_lookup_elem
* as UID of the inner map.
*/
- reg->map_uid = reg->id;
+ if (map_value_has_timer(map->inner_map_meta))
+ reg->map_uid = reg->id;
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
reg->type = PTR_TO_XDP_SOCK;
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
@@ -4055,7 +4056,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{
- return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
+ /* A map is considered read-only if the following condition are true:
+ *
+ * 1) BPF program side cannot change any of the map content. The
+ * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
+ * and was set at map creation time.
+ * 2) The map value(s) have been initialized from user space by a
+ * loader and then "frozen", such that no new map update/delete
+ * operations from syscall side are possible for the rest of
+ * the map's lifetime from that point onwards.
+ * 3) Any parallel/pending map update/delete operations from syscall
+ * side have been completed. Only after that point, it's safe to
+ * assume that map value(s) are immutable.
+ */
+ return (map->map_flags & BPF_F_RDONLY_PROG) &&
+ READ_ONCE(map->frozen) &&
+ !bpf_map_write_active(map);
}
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
@@ -11631,6 +11647,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
}
}
+ if (map_value_has_timer(map)) {
+ if (is_tracing_prog_type(prog_type)) {
+ verbose(env, "tracing progs cannot use bpf_timer yet\n");
+ return -EINVAL;
+ }
+ }
+
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7396488793ff..ae9755037b7e 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto;
- case BPF_FUNC_ktime_get_coarse_ns:
- return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_tail_call_proto;
case BPF_FUNC_get_current_pid_tgid:
diff --git a/net/core/filter.c b/net/core/filter.c
index e471c9b09670..6102f093d59a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_cg_sock_proto;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
return &sk_reuseport_load_bytes_relative_proto;
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_ptr_cookie_proto;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skc_to_unix_sock:
func = &bpf_skc_to_unix_sock_proto;
break;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 8f2b2f2c0e7b..41e91d0f7061 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2124,8 +2124,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_prot_creator = prot;
/* SANITY */
- if (likely(newsk->sk_net_refcnt))
+ if (likely(newsk->sk_net_refcnt)) {
get_net(sock_net(newsk));
+ sock_inuse_add(sock_net(newsk), 1);
+ }
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
bh_lock_sock(newsk);
@@ -2197,8 +2199,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
- if (likely(newsk->sk_net_refcnt))
- sock_inuse_add(sock_net(newsk), 1);
/* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.rst for details)
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 2cf02b4d77fb..4bb9401b0a3f 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
offsetof(struct tcp_congestion_ops, release))
return &bpf_sk_getsockopt_proto;
return NULL;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ return &bpf_ktime_get_coarse_ns_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ec73a0d52d3e..323e622ff9b7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2591,7 +2591,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
free:
kfree(t);
out:
- return -ENOBUFS;
+ return -ENOMEM;
}
static void __devinet_sysctl_unregister(struct net *net,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 319dd7bbfe33..8bcecdd6aeda 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1807,6 +1807,17 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
skb = skb_recv_udp(sk, 0, 1, &err);
if (!skb)
return err;
+
+ if (udp_lib_checksum_complete(skb)) {
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+ IS_UDPLITE(sk));
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ atomic_inc(&sk->sk_drops);
+ kfree_skb(skb);
+ continue;
+ }
+
used = recv_actor(desc, skb, 0, skb->len);
if (used <= 0) {
if (!copied)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 59284da9116d..b61c802e3bf3 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -566,6 +566,10 @@ static void smc_stat_fallback(struct smc_sock *smc)
static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
+ wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
+ wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
+ unsigned long flags;
+
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
@@ -575,6 +579,16 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
smc->clcsock->file->private_data = smc->clcsock;
smc->clcsock->wq.fasync_list =
smc->sk.sk_socket->wq.fasync_list;
+
+ /* There may be some entries remaining in
+ * smc socket->wq, which should be removed
+ * to clcsocket->wq during the fallback.
+ */
+ spin_lock_irqsave(&smc_wait->lock, flags);
+ spin_lock(&clc_wait->lock);
+ list_splice_init(&smc_wait->head, &clc_wait->head);
+ spin_unlock(&clc_wait->lock);
+ spin_unlock_irqrestore(&smc_wait->lock, flags);
}
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 49b8ba3bb683..25ebd30feecd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -708,13 +708,14 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
int i;
while (1) {
+again:
link_id = ++lgr->next_link_id;
if (!link_id) /* skip zero as link_id */
link_id = ++lgr->next_link_id;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (smc_link_usable(&lgr->lnk[i]) &&
lgr->lnk[i].link_id == link_id)
- continue;
+ goto again;
}
break;
}
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index dc60c32bb70d..e701651f6533 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
return -EEXIST;
/* Allocate a new AEAD */
- tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (unlikely(!tmp))
return -ENOMEM;
@@ -1470,7 +1470,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
return -EEXIST;
/* Allocate crypto */
- c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
@@ -1484,7 +1484,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
}
/* Allocate statistic structure */
- c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ c->stats = alloc_percpu(struct tipc_crypto_stats);
if (!c->stats) {
if (c->wq)
destroy_workqueue(c->wq);
@@ -2457,7 +2457,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
}
/* Lets duplicate it first */
- skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
rcu_read_unlock();
/* Now, generate new key, initiate & distribute it */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1b7a487c8841..09ae8448f394 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1298,8 +1298,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
return false;
#ifdef CONFIG_TIPC_CRYPTO
case MSG_CRYPTO:
- tipc_crypto_msg_rcv(l->net, skb);
- return true;
+ if (TIPC_SKB_CB(skb)->decrypted) {
+ tipc_crypto_msg_rcv(l->net, skb);
+ return true;
+ }
+ fallthrough;
#endif
default:
pr_warn("Dropping received illegal msg type\n");
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 90c4e1e819d3..bc4ad48ea4f0 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -500,7 +500,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
pool->free_list_cnt--;
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
free_list_node);
- list_del(&xskb->free_list_node);
+ list_del_init(&xskb->free_list_node);
}
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
@@ -568,7 +568,7 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
i = nb_entries;
while (i--) {
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
- list_del(&xskb->free_list_node);
+ list_del_init(&xskb->free_list_node);
*xdp = &xskb->xdp;
xdp++;
@@ -615,6 +615,9 @@ EXPORT_SYMBOL(xp_can_alloc);
void xp_free(struct xdp_buff_xsk *xskb)
{
+ if (!list_empty(&xskb->free_list_node))
+ return;
+
xskb->pool->free_list_cnt++;
list_add(&xskb->free_list_node, &xskb->pool->free_list);
}
diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h
index 722b3fadb467..1752a46a2b05 100644
--- a/samples/bpf/hbm_kern.h
+++ b/samples/bpf/hbm_kern.h
@@ -9,8 +9,6 @@
* Include file for sample Host Bandwidth Manager (HBM) BPF programs
*/
#define KBUILD_MODNAME "foo"
-#include <stddef.h>
-#include <stdbool.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index d84e6949007c..a81704d3317b 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -309,7 +309,6 @@ int main(int argc, char **argv)
const char *mprog_filename = NULL, *mprog_name = NULL;
struct xdp_redirect_cpu *skel;
struct bpf_map_info info = {};
- char ifname_buf[IF_NAMESIZE];
struct bpf_cpumap_val value;
__u32 infosz = sizeof(info);
int ret = EXIT_FAIL_OPTION;
@@ -390,10 +389,10 @@ int main(int argc, char **argv)
case 'd':
if (strlen(optarg) >= IF_NAMESIZE) {
fprintf(stderr, "-d/--dev name too long\n");
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
goto end_cpu;
}
- safe_strncpy(ifname_buf, optarg, strlen(ifname_buf));
- ifindex = if_nametoindex(ifname_buf);
+ ifindex = if_nametoindex(optarg);
if (!ifindex)
ifindex = strtoul(optarg, NULL, 0);
if (!ifindex) {
diff --git a/samples/bpf/xdp_sample_user.c b/samples/bpf/xdp_sample_user.c
index b32d82178199..8740838e7767 100644
--- a/samples/bpf/xdp_sample_user.c
+++ b/samples/bpf/xdp_sample_user.c
@@ -120,7 +120,10 @@ struct sample_output {
__u64 xmit;
} totals;
struct {
- __u64 pps;
+ union {
+ __u64 pps;
+ __u64 num;
+ };
__u64 drop;
__u64 err;
} rx_cnt;
@@ -1322,7 +1325,7 @@ int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
static void sample_summary_print(void)
{
- double period = sample_out.rx_cnt.pps;
+ double num = sample_out.rx_cnt.num;
if (sample_out.totals.rx) {
double pkts = sample_out.totals.rx;
@@ -1330,7 +1333,7 @@ static void sample_summary_print(void)
print_always(" Packets received : %'-10llu\n",
sample_out.totals.rx);
print_always(" Average packets/s : %'-10.0f\n",
- sample_round(pkts / period));
+ sample_round(pkts / num));
}
if (sample_out.totals.redir) {
double pkts = sample_out.totals.redir;
@@ -1338,7 +1341,7 @@ static void sample_summary_print(void)
print_always(" Packets redirected : %'-10llu\n",
sample_out.totals.redir);
print_always(" Average redir/s : %'-10.0f\n",
- sample_round(pkts / period));
+ sample_round(pkts / num));
}
if (sample_out.totals.drop)
print_always(" Rx dropped : %'-10llu\n",
@@ -1355,7 +1358,7 @@ static void sample_summary_print(void)
print_always(" Packets transmitted : %'-10llu\n",
sample_out.totals.xmit);
print_always(" Average transmit/s : %'-10.0f\n",
- sample_round(pkts / period));
+ sample_round(pkts / num));
}
}
@@ -1422,7 +1425,7 @@ static int sample_stats_collect(struct stats_record *rec)
return 0;
}
-static void sample_summary_update(struct sample_output *out, int interval)
+static void sample_summary_update(struct sample_output *out)
{
sample_out.totals.rx += out->totals.rx;
sample_out.totals.redir += out->totals.redir;
@@ -1430,12 +1433,11 @@ static void sample_summary_update(struct sample_output *out, int interval)
sample_out.totals.drop_xmit += out->totals.drop_xmit;
sample_out.totals.err += out->totals.err;
sample_out.totals.xmit += out->totals.xmit;
- sample_out.rx_cnt.pps += interval;
+ sample_out.rx_cnt.num++;
}
static void sample_stats_print(int mask, struct stats_record *cur,
- struct stats_record *prev, char *prog_name,
- int interval)
+ struct stats_record *prev, char *prog_name)
{
struct sample_output out = {};
@@ -1452,7 +1454,7 @@ static void sample_stats_print(int mask, struct stats_record *cur,
else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
stats_get_devmap_xmit_multi(cur, prev, 0, &out,
mask & SAMPLE_DEVMAP_XMIT_CNT);
- sample_summary_update(&out, interval);
+ sample_summary_update(&out);
stats_print(prog_name, mask, cur, prev, &out);
}
@@ -1495,7 +1497,7 @@ static void swap(struct stats_record **a, struct stats_record **b)
}
static int sample_timer_cb(int timerfd, struct stats_record **rec,
- struct stats_record **prev, int interval)
+ struct stats_record **prev)
{
char line[64] = "Summary";
int ret;
@@ -1524,7 +1526,7 @@ static int sample_timer_cb(int timerfd, struct stats_record **rec,
snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
}
- sample_stats_print(sample_mask, *rec, *prev, line, interval);
+ sample_stats_print(sample_mask, *rec, *prev, line);
return 0;
}
@@ -1579,7 +1581,7 @@ int sample_run(int interval, void (*post_cb)(void *), void *ctx)
if (pfd[0].revents & POLLIN)
ret = sample_signal_cb();
else if (pfd[1].revents & POLLIN)
- ret = sample_timer_cb(timerfd, &rec, &prev, interval);
+ ret = sample_timer_cb(timerfd, &rec, &prev);
if (ret)
break;
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index bbd1150578f7..8791d0e2762b 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -88,5 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
- LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \
- LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD)
+ CC=$(HOSTCC) LD=$(HOSTLD)
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index d26e5472fe50..6f3df004479b 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -45,8 +45,8 @@ struct bpf_gen {
int nr_fd_array;
};
-void bpf_gen__init(struct bpf_gen *gen, int log_level);
-int bpf_gen__finish(struct bpf_gen *gen);
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 502dea53a742..9934851ccde7 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -18,7 +18,7 @@
#define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32
#define MAX_KFUNC_DESCS 256
-#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
+#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
/* The following structure describes the stack layout of the loader program.
* In addition R6 contains the pointer to context.
@@ -33,8 +33,8 @@
*/
struct loader_stack {
__u32 btf_fd;
- __u32 prog_fd[MAX_USED_PROGS];
__u32 inner_map_fd;
+ __u32 prog_fd[MAX_USED_PROGS];
};
#define stack_off(field) \
@@ -42,6 +42,11 @@ struct loader_stack {
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
+static int blob_fd_array_off(struct bpf_gen *gen, int index)
+{
+ return gen->fd_array + index * sizeof(int);
+}
+
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
{
size_t off = gen->insn_cur - gen->insn_start;
@@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
emit(gen, insn2);
}
-void bpf_gen__init(struct bpf_gen *gen, int log_level)
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
{
- size_t stack_sz = sizeof(struct loader_stack);
+ size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
int i;
+ gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
gen->log_level = log_level;
/* save ctx pointer into R6 */
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
@@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
+ /* amount of stack actually used, only used to calculate iterations, not stack offset */
+ nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
/* jump over cleanup code */
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
- /* size of cleanup code below */
- (stack_sz / 4) * 3 + 2));
+ /* size of cleanup code below (including map fd cleanup) */
+ (nr_progs_sz / 4) * 3 + 2 +
+ /* 6 insns for emit_sys_close_blob,
+ * 6 insns for debug_regs in emit_sys_close_blob
+ */
+ nr_maps * (6 + (gen->log_level ? 6 : 0))));
/* remember the label where all error branches will jump to */
gen->cleanup_label = gen->insn_cur - gen->insn_start;
/* emit cleanup code: close all temp FDs */
- for (i = 0; i < stack_sz; i += 4) {
+ for (i = 0; i < nr_progs_sz; i += 4) {
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
}
+ for (i = 0; i < nr_maps; i++)
+ emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
emit(gen, BPF_EXIT_INSN());
@@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
*/
static int add_map_fd(struct bpf_gen *gen)
{
- if (!gen->fd_array)
- gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
if (gen->nr_maps == MAX_USED_MAPS) {
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
gen->error = -E2BIG;
@@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
{
int cur;
- if (!gen->fd_array)
- gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
cur = add_data(gen, NULL, sizeof(int));
return (cur - gen->fd_array) / sizeof(int);
@@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
return MAX_USED_MAPS + gen->nr_fd_array++;
}
-static int blob_fd_array_off(struct bpf_gen *gen, int index)
-{
- return gen->fd_array + index * sizeof(int);
-}
-
static int insn_bytes_to_bpf_size(__u32 sz)
{
switch (sz) {
@@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
__emit_sys_close(gen);
}
-int bpf_gen__finish(struct bpf_gen *gen)
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
{
int i;
+ if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
+ pr_warn("progs/maps mismatch\n");
+ gen->error = -EFAULT;
+ return gen->error;
+ }
emit_sys_close_stack(gen, stack_off(btf_fd));
for (i = 0; i < gen->nr_progs; i++)
move_stack2ctx(gen,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a1bea1953df6..7c74342bb668 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
}
if (obj->gen_loader)
- bpf_gen__init(obj->gen_loader, attr->log_level);
+ bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
@@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
for (i = 0; i < obj->nr_maps; i++)
obj->maps[i].fd = -1;
if (!err)
- err = bpf_gen__finish(obj->gen_loader);
+ err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
}
/* clean up fd_array */
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 54b0a41a3775..62fafbeb4672 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -187,7 +187,7 @@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
- BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \
+ BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
cp $(RUNQSLOWER_OUTPUT)runqslower $@
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
new file mode 100644
index 000000000000..e1de5f80c3b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_helper_restricted.skel.h"
+
+void test_helper_restricted(void)
+{
+ int prog_i = 0, prog_cnt;
+ int duration = 0;
+
+ do {
+ struct test_helper_restricted *test;
+ int maybeOK;
+
+ test = test_helper_restricted__open();
+ if (!ASSERT_OK_PTR(test, "open"))
+ return;
+
+ prog_cnt = test->skeleton->prog_cnt;
+
+ for (int j = 0; j < prog_cnt; ++j) {
+ struct bpf_program *prog = *test->skeleton->progs[j].prog;
+
+ maybeOK = bpf_program__set_autoload(prog, prog_i == j);
+ ASSERT_OK(maybeOK, "set autoload");
+ }
+
+ maybeOK = test_helper_restricted__load(test);
+ CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
+
+ test_helper_restricted__destroy(test);
+ } while (++prog_i < prog_cnt);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
new file mode 100644
index 000000000000..68d64c365f90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <time.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct timer {
+ struct bpf_timer t;
+};
+
+struct lock {
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct timer);
+} timers SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct lock);
+} locks SEC(".maps");
+
+static int timer_cb(void *map, int *key, struct timer *timer)
+{
+ return 0;
+}
+
+static void timer_work(void)
+{
+ struct timer *timer;
+ const int key = 0;
+
+ timer = bpf_map_lookup_elem(&timers, &key);
+ if (timer) {
+ bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&timer->t, timer_cb);
+ bpf_timer_start(&timer->t, 10E9, 0);
+ bpf_timer_cancel(&timer->t);
+ }
+}
+
+static void spin_lock_work(void)
+{
+ const int key = 0;
+ struct lock *lock;
+
+ lock = bpf_map_lookup_elem(&locks, &key);
+ if (lock) {
+ bpf_spin_lock(&lock->l);
+ bpf_spin_unlock(&lock->l);
+ }
+}
+
+SEC("raw_tp/sys_enter")
+int raw_tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("kprobe/sys_nanosleep")
+int kprobe_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("perf_event")
+int perf_event_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int raw_tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("kprobe/sys_nanosleep")
+int kprobe_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("perf_event")
+int perf_event_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+const char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 25afe423b3f0..465ef3f112c0 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -92,6 +92,7 @@ struct bpf_test {
int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS];
+ int fixup_map_timer[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string
* means no log verification.
@@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu)
* int cnt;
* struct bpf_spin_lock l;
* };
+ * struct bpf_timer {
+ * __u64 :64;
+ * __u64 :64;
+ * } __attribute__((aligned(8)));
+ * struct timer {
+ * struct bpf_timer t;
+ * };
*/
-static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
static __u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = {
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+ /* struct bpf_timer */ /* [4] */
+ BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
+ /* struct timer */ /* [5] */
+ BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
+ BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
};
static int load_btf(void)
@@ -696,6 +709,29 @@ static int create_sk_storage_map(void)
return fd;
}
+static int create_map_timer(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .key_size = 4,
+ .value_size = 16,
+ .max_entries = 1,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 5,
+ };
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ if (fd < 0)
+ printf("Failed to create map with timer\n");
+ return fd;
+}
+
static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
@@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
+ int *fixup_map_timer = test->fixup_map_timer;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_ringbuf++;
} while (*fixup_map_ringbuf);
}
+ if (*fixup_map_timer) {
+ map_fds[21] = create_map_timer();
+ do {
+ prog[*fixup_map_timer].imm = map_fds[21];
+ fixup_map_timer++;
+ } while (*fixup_map_timer);
+ }
}
struct libcap {
diff --git a/tools/testing/selftests/bpf/verifier/helper_restricted.c b/tools/testing/selftests/bpf/verifier/helper_restricted.c
new file mode 100644
index 000000000000..a067b7098b97
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_restricted.c
@@ -0,0 +1,196 @@
+{
+ "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown func bpf_ktime_get_coarse_ns",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+ "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown func bpf_ktime_get_coarse_ns",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown func bpf_ktime_get_coarse_ns",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown func bpf_ktime_get_coarse_ns",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
+{
+ "bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_timer_init),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_timer = { 3, 8 },
+ .errstr = "tracing progs cannot use bpf_timer yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+ "bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_timer_init),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_timer = { 3, 8 },
+ .errstr = "tracing progs cannot use bpf_timer yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_timer_init),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_timer = { 3, 8 },
+ .errstr = "tracing progs cannot use bpf_timer yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_timer_init),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_timer = { 3, 8 },
+ .errstr = "tracing progs cannot use bpf_timer yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
+{
+ "bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .errstr = "tracing progs cannot use bpf_spin_lock yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+ "bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .errstr = "tracing progs cannot use bpf_spin_lock yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .errstr = "tracing progs cannot use bpf_spin_lock yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .errstr = "tracing progs cannot use bpf_spin_lock yet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c
index 2798927ee9ff..128a348b762d 100644
--- a/tools/testing/selftests/bpf/verifier/map_in_map.c
+++ b/tools/testing/selftests/bpf/verifier/map_in_map.c
@@ -19,6 +19,40 @@
.result = ACCEPT,
},
{
+ "map in map state pruning",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 4, 14 },
+ .flags = BPF_F_TEST_STATE_FREQ,
+ .result = VERBOSE_ACCEPT,
+ .errstr = "processed 25 insns",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
"invalid inner map pointer",
.insns = {
BPF_ST_MEM(0, BPF_REG_10, -4, 0),