aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback/common.h
diff options
context:
space:
mode:
authorChris Zankel <[email protected]>2014-02-24 00:34:36 -0800
committerChris Zankel <[email protected]>2014-02-24 00:34:36 -0800
commitb3fdfc1b4b641d372e35ced98814289bc60bc5d1 (patch)
tree5f11d5ba885031dde45690745646519fb887f447 /drivers/net/xen-netback/common.h
parentc0e50d41126e4786d9cf1105bdf783e55c99f915 (diff)
parentf63b6d7555cd4064554b39da4d44c4cbbc9d6a4a (diff)
Merge tag 'xtensa-for-next-20140221-1' into for_next
Xtensa fixes for 3.14: - allow booting xtfpga on boards with new uBoot and >128MBytes memory; - drop nonexistent GPIO32 support from fsf variant; - don't select USE_GENERIC_SMP_HELPERS; - enable common clock framework support, set up ethoc clock on xtfpga; - wire up sched_setattr and sched_getattr syscalls. Signed-off-by: Chris Zankel <[email protected]>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r--drivers/net/xen-netback/common.h43
1 files changed, 21 insertions, 22 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -136,20 +143,15 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
+ RING_IDX rx_last_skb_slots;
- /* Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
- */
- RING_IDX rx_req_cons_peek;
+ /* This array is allocated seperately as it is large */
+ struct gnttab_copy *grant_copy_op;
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
+ /* We create one meta structure per ring request we consume, so
+ * the maximum number is the same as the ring size.
*/
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
-
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];
@@ -198,8 +200,6 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_rx_ring_full(struct xenvif *vif);
-
int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
@@ -211,21 +211,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_check_rx_xenvif(struct xenvif *vif);
-/* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
-/* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
-
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-
int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
int xenvif_kthread(void *data);
+void xenvif_kick_thread(struct xenvif *vif);
+
+/* Determine whether the needed number of slots (req) are available,
+ * and set req_event if not.
+ */
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+
+void xenvif_stop_queue(struct xenvif *vif);
extern bool separate_tx_rx_irq;