diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2016-01-11 17:47:25 -0800 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2016-01-11 17:47:25 -0800 |
commit | 009f773836513960d3982e80c86e266d25528563 (patch) | |
tree | 7315e5666698997dee34a80c1a1db801d3f8879b /drivers/net/xen-netback/common.h | |
parent | dd0d0d4de582a6a61c032332c91f4f4cb2bab569 (diff) | |
parent | 6544a1df11c48c8413071aac3316792e4678fbfb (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 4.5 merge window.
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r-- | drivers/net/xen-netback/common.h | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index a7bf74727116..0333ab0fd926 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -44,6 +44,7 @@ #include <xen/interface/grant_table.h> #include <xen/grant_table.h> #include <xen/xenbus.h> +#include <xen/page.h> #include <linux/debugfs.h> typedef unsigned int pending_ring_idx_t; @@ -64,8 +65,8 @@ struct pending_tx_info { struct ubuf_info callback_struct; }; -#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) -#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) struct xenvif_rx_meta { int id; @@ -80,16 +81,21 @@ struct xenvif_rx_meta { /* Discriminate from any valid pending_idx value. */ #define INVALID_PENDING_IDX 0xFFFF -#define MAX_BUFFER_OFFSET PAGE_SIZE +#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE +/* The maximum number of frags is derived from the size of a grant (same + * as a Xen page size for now). + */ +#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) + /* It's possible for an skb to have a maximal number of frags * but still be less than MAX_BUFFER_OFFSET in size. Thus the - * worst-case number of copy operations is MAX_SKB_FRAGS per + * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per * ring slot. */ -#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) +#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) #define NETBACK_INVALID_HANDLE -1 |