aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDaniel Borkmann <[email protected]>2019-08-31 01:08:27 +0200
committerDaniel Borkmann <[email protected]>2019-08-31 01:08:27 +0200
commitbdb15a29cc28f8155e20f7fb58b60ffc452f2d1b (patch)
tree6d1552142231d509d5a13ebdd05d69ca1230c518 /include
parent1c6d6e021c452ac15ce034e6762f0477f1cf7f19 (diff)
parentd57f172c99bd252fbfc93a9b38d8d42642a4b377 (diff)
Merge branch 'bpf-xdp-unaligned-chunk'
Kevin Laatz says: ==================== This patch set adds the ability to use unaligned chunks in the XDP umem. Currently, all chunk addresses passed to the umem are masked to be chunk size aligned (max is PAGE_SIZE). This limits where we can place chunks within the umem as well as limiting the packet sizes that are supported. The changes in this patch set removes these restrictions, allowing XDP to be more flexible in where it can place a chunk within a umem. By relaxing where the chunks can be placed, it allows us to use an arbitrary buffer size and place that wherever we have a free address in the umem. These changes add the ability to support arbitrary frame sizes up to 4k (PAGE_SIZE) and make it easy to integrate with other existing frameworks that have their own memory management systems, such as DPDK. In DPDK, for example, there is already support for AF_XDP with zero-copy. However, with this patch set the integration will be much more seamless. You can find the DPDK AF_XDP driver at: https://git.dpdk.org/dpdk/tree/drivers/net/af_xdp Since we are now dealing with arbitrary frame sizes, we need also need to update how we pass around addresses. Currently, the addresses can simply be masked to 2k to get back to the original address. This becomes less trivial when using frame sizes that are not a 'power of 2' size. This patch set modifies the Rx/Tx descriptor format to use the upper 16-bits of the addr field for an offset value, leaving the lower 48-bits for the address (this leaves us with 256 Terabytes, which should be enough!). We only need to use the upper 16-bits to store the offset when running in unaligned mode. Rather than adding the offset (headroom etc) to the address, we will store it in the upper 16-bits of the address field. This way, we can easily add the offset to the address where we need it, using some bit manipulation and addition, and we can also easily get the original address wherever we need it (for example in i40e_zca_free) by simply masking to get the lower 48-bits of the address field. The patch set was tested with the following set up: - Intel(R) Xeon(R) Gold 6140 CPU @ 2.30GHz - Intel Corporation Ethernet Controller XXV710 for 25GbE SFP28 (rev 02) - Driver: i40e - Application: xdpsock with l2fwd (single interface) - Turbo disabled in BIOS There are no changes to performance before and after these patches for SKB mode and Copy mode. Zero-copy mode saw a performance degradation of ~1.5%. This patch set has been applied against commit 0bb52b0dfc88 ("tools: bpftool: add 'bpftool map freeze' subcommand") Structure of the patch set: Patch 1: - Remove unnecessary masking and headroom addition during zero-copy Rx buffer recycling in i40e. This change is required in order for the buffer recycling to work in the unaligned chunk mode. Patch 2: - Remove unnecessary masking and headroom addition during zero-copy Rx buffer recycling in ixgbe. This change is required in order for the buffer recycling to work in the unaligned chunk mode. Patch 3: - Add infrastructure for unaligned chunks. Since we are dealing with unaligned chunks that could potentially cross a physical page boundary, we add checks to keep track of that information. We can later use this information to correctly handle buffers that are placed at an address where they cross a page boundary. This patch also modifies the existing Rx and Tx functions to use the new descriptor format. To handle addresses correctly, we need to mask appropriately based on whether we are in aligned or unaligned mode. Patch 4: - This patch updates the i40e driver to make use of the new descriptor format. Patch 5: - This patch updates the ixgbe driver to make use of the new descriptor format. Patch 6: - This patch updates the mlx5e driver to make use of the new descriptor format. These changes are required to handle the new descriptor format and for unaligned chunks support. Patch 7: - This patch allows XSK frames smaller than page size in the mlx5e driver. Relax the requirements to the XSK frame size to allow it to be smaller than a page and even not a power of two. The current implementation can work in this mode, both with Striding RQ and without it. Patch 8: - Add flags for umem configuration to libbpf. Since we increase the size of the struct by adding flags, we also need to add the ABI versioning in this patch. Patch 9: - Modify xdpsock application to add a command line option for unaligned chunks Patch 10: - Since we can now run the application in unaligned chunk mode, we need to make sure we recycle the buffers appropriately. Patch 11: - Adds hugepage support to the xdpsock application Patch 12: - Documentation update to include the unaligned chunk scenario. We need to explicitly state that the incoming addresses are only masked in the aligned chunk mode and not the unaligned chunk mode. v2: - fixed checkpatch issues - fixed Rx buffer recycling for unaligned chunks in xdpsock - removed unused defines - fixed how chunk_size is calculated in xsk_diag.c - added some performance numbers to cover letter - modified descriptor format to make it easier to retrieve original address - removed patch adding off_t off to the zero copy allocator. This is no longer needed with the new descriptor format. v3: - added patch for mlx5 driver changes needed for unaligned chunks - moved offset handling to new helper function - changed value used for the umem chunk_mask. Now using the new descriptor format to save us doing the calculations in a number of places meaning more of the code is left unchanged while adding unaligned chunk support. v4: - reworked the next_pg_contig field in the xdp_umem_page struct. We now use the low 12 bits of the addr for flags rather than adding an extra field in the struct. - modified unaligned chunks flag define - fixed page_start calculation in __xsk_rcv_memcpy(). - move offset handling to the xdp_umem_get_* functions - modified the len field in xdp_umem_reg struct. We now use 16 bits from this for the flags field. - fixed headroom addition to handle in the mlx5e driver - other minor changes based on review comments v5: - Added ABI versioning in the libbpf patch - Removed bitfields in the xdp_umem_reg struct. Adding new flags field. - Added accessors for getting addr and offset. - Added helper function for adding the offset to the addr. - Fixed conflicts with 'bpf-af-xdp-wakeup' which was merged recently. - Fixed typo in mlx driver patch. - Moved libbpf patch to later in the set (7/11, just before the sample app changes) v6: - Added support for XSK frames smaller than page in mlx5e driver (Maxim Mikityanskiy <[email protected]). - Fixed offset handling in xsk_generic_rcv. - Added check for base address in xskq_is_valid_addr_unaligned. ==================== Signed-off-by: Daniel Borkmann <[email protected]>
Diffstat (limited to 'include')
-rw-r--r--include/net/xdp_sock.h75
-rw-r--r--include/uapi/linux/if_xdp.h9
2 files changed, 80 insertions, 4 deletions
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index f023b9940d64..c9398ce7960f 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -16,6 +16,13 @@
struct net_device;
struct xsk_queue;
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
+
struct xdp_umem_page {
void *addr;
dma_addr_t dma;
@@ -27,8 +34,12 @@ struct xdp_umem_fq_reuse {
u64 handles[];
};
-/* Flags for the umem flags field. */
-#define XDP_UMEM_USES_NEED_WAKEUP (1 << 0)
+/* Flags for the umem flags field.
+ *
+ * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
+ * flags. See inlude/uapi/include/linux/if_xdp.h.
+ */
+#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
struct xdp_umem {
struct xsk_queue *fq;
@@ -124,14 +135,36 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+static inline u64 xsk_umem_extract_addr(u64 addr)
+{
+ return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
+}
+
+static inline u64 xsk_umem_extract_offset(u64 addr)
+{
+ return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+}
+
+static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
+{
+ return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
+}
+
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
- return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
+ unsigned long page_addr;
+
+ addr = xsk_umem_add_offset_to_addr(addr);
+ page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
+
+ return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{
- return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
+ addr = xsk_umem_add_offset_to_addr(addr);
+
+ return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
}
/* Reuse-queue aware version of FILL queue helpers */
@@ -172,6 +205,19 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
rq->handles[rq->length++] = addr;
}
+
+/* Handle the offset appropriately depending on aligned or unaligned mode.
+ * For unaligned mode, we store the offset in the upper 16-bits of the address.
+ * For aligned mode, we simply add the offset to the address.
+ */
+static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
+ u64 offset)
+{
+ if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
+ return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+ else
+ return address + offset;
+}
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
@@ -241,6 +287,21 @@ static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
return NULL;
}
+static inline u64 xsk_umem_extract_addr(u64 addr)
+{
+ return 0;
+}
+
+static inline u64 xsk_umem_extract_offset(u64 addr)
+{
+ return 0;
+}
+
+static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
+{
+ return 0;
+}
+
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
return NULL;
@@ -290,6 +351,12 @@ static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
return false;
}
+static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
+ u64 offset)
+{
+ return 0;
+}
+
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index 62b80d57b72a..be328c59389d 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -26,6 +26,9 @@
*/
#define XDP_USE_NEED_WAKEUP (1 << 3)
+/* Flags for xsk_umem_config flags */
+#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;
@@ -66,6 +69,7 @@ struct xdp_umem_reg {
__u64 len; /* Length of packet data area */
__u32 chunk_size;
__u32 headroom;
+ __u32 flags;
};
struct xdp_statistics {
@@ -87,6 +91,11 @@ struct xdp_options {
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+/* Masks for unaligned chunks mode */
+#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48
+#define XSK_UNALIGNED_BUF_ADDR_MASK \
+ ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;