aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/rx.c')
-rw-r--r--drivers/net/xen-netback/rx.c83
1 files changed, 58 insertions, 25 deletions
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b1cf7c6f407a..29c7645f5780 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,22 +33,37 @@
#include <xen/xen.h>
#include <xen/events.h>
+/*
+ * Update the needed ring page slots for the first SKB queued.
+ * Note that any call sequence outside the RX thread calling this function
+ * needs to wake up the RX thread via a call of xenvif_kick_thread()
+ * afterwards in order to avoid a race with putting the thread to sleep.
+ */
+static void xenvif_update_needed_slots(struct xenvif_queue *queue,
+ const struct sk_buff *skb)
+{
+ unsigned int needed = 0;
+
+ if (skb) {
+ needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+ if (skb_is_gso(skb))
+ needed++;
+ if (skb->sw_hash)
+ needed++;
+ }
+
+ WRITE_ONCE(queue->rx_slots_needed, needed);
+}
+
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
- struct sk_buff *skb;
- int needed;
+ unsigned int needed;
- skb = skb_peek(&queue->rx_queue);
- if (!skb)
+ needed = READ_ONCE(queue->rx_slots_needed);
+ if (!needed)
return false;
- needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
- if (skb_is_gso(skb))
- needed++;
- if (skb->sw_hash)
- needed++;
-
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
@@ -73,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
spin_lock_irqsave(&queue->rx_queue.lock, flags);
- __skb_queue_tail(&queue->rx_queue, skb);
-
- queue->rx_queue_len += skb->len;
- if (queue->rx_queue_len > queue->rx_queue_max) {
+ if (queue->rx_queue_len >= queue->rx_queue_max) {
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+ kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
+ } else {
+ if (skb_queue_empty(&queue->rx_queue))
+ xenvif_update_needed_slots(queue, skb);
+
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -93,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
skb = __skb_dequeue(&queue->rx_queue);
if (skb) {
+ xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
+
queue->rx_queue_len -= skb->len;
if (queue->rx_queue_len < queue->rx_queue_max) {
struct netdev_queue *txq;
@@ -127,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
break;
xenvif_rx_dequeue(queue);
kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
}
}
@@ -467,36 +491,40 @@ void xenvif_rx_action(struct xenvif_queue *queue)
xenvif_rx_copy_flush(queue);
}
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{
RING_IDX prod, cons;
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
+ return prod - cons;
+}
+
+static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
+{
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+
return !queue->stalled &&
- prod - cons < 1 &&
+ xenvif_rx_queue_slots(queue) < needed &&
time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
- RING_IDX prod, cons;
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
-
- return queue->stalled && prod - cons >= 1;
+ return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
}
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{
return xenvif_rx_ring_slots_available(queue) ||
(queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue) ||
xenvif_rx_queue_ready(queue))) ||
- kthread_should_stop() ||
+ (test_kthread && kthread_should_stop()) ||
queue->vif->disabled;
}
@@ -527,15 +555,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
return;
for (;;) {
long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
break;
+ if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_RX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->rx_irq, 0);
+
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
if (!ret)
break;