Merge in changes that allow mctp to use a receive queue.

Based on this: https://github.com/torvalds/linux/compare/master...CodeConstruct:linux:test/mctp-usb

Change-Id: I2c2c69d8b566833ed7053a3bf79b49b73d1ad4e9
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index c5ac44f..3a91327 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -25,6 +25,9 @@
  */
 static const unsigned long RX_RETRY_DELAY = HZ/4;
 
+/* number of IN urbs to queue */
+const unsigned int n_rx_queue = 8;
+
 struct mctp_usb {
 	struct usb_device *usbdev;
 	struct usb_interface *intf;
@@ -36,8 +39,9 @@ struct mctp_usb {
 	u8 ep_out;
 
 	struct urb *tx_urb;
-	struct urb *rx_urb;
-
+	struct usb_anchor rx_anchor;
+	/* number of urbs currently queued */
+	atomic_t rx_qlen;
 	struct delayed_work rx_retry_work;
 };
 
@@ -119,35 +123,35 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
 
 static void mctp_usb_in_complete(struct urb *urb);
 
-static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
+static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, struct urb *urb,
+		             gfp_t gfp)
 {
 	struct sk_buff *skb;
 	int rc;
 
-	skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
-	if (!skb) {
-		rc = -ENOMEM;
-		goto err_retry;
-	}
+	/* no point allocating if the queue is going to be rejected */
+	if (READ_ONCE(mctp_usb->stopped))
+		return 0;
 
-	usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
+	skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
+	if (!skb)
+         	return -ENOMEM;
+
+	usb_fill_bulk_urb(urb, mctp_usb->usbdev,
 			  usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
 			  skb->data, MCTP_USB_XFER_SIZE,
 			  mctp_usb_in_complete, skb);
 
-	rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
+	rc = usb_submit_urb(urb, gfp);
 	if (rc) {
 		netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
 		kfree_skb(skb);
-		if (rc == -ENOMEM)
-			goto err_retry;
+		return rc;
 	}
 
-	return rc;
+        atomic_inc(&mctp_usb->rx_qlen);
 
-err_retry:
-	schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
-	return rc;
+	return 0;
 }
 
 static void mctp_usb_in_complete(struct urb *urb)
@@ -157,15 +161,18 @@ static void mctp_usb_in_complete(struct urb *urb)
 	struct mctp_usb *mctp_usb = netdev_priv(netdev);
 	struct mctp_skb_cb *cb;
 	unsigned int len;
-	int status;
+	int status, rc;
 
 	status = urb->status;
+	atomic_dec(&mctp_usb->rx_qlen);
 
 	switch (status) {
 	case -ENOENT:
 	case -ECONNRESET:
 	case -ESHUTDOWN:
 	case -EPROTO:
+		usb_unanchor_urb(urb);
+		usb_free_urb(urb);
 		kfree_skb(skb);
 		return;
 	case 0:
@@ -239,18 +246,54 @@ static void mctp_usb_in_complete(struct urb *urb)
 	if (skb)
 		kfree_skb(skb);
 
-	mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
+	rc = mctp_usb_rx_queue(mctp_usb, urb, GFP_ATOMIC);
+	if (rc) {
+		usb_free_urb(urb);
+		schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
+	}
+}
+
+static int mctp_usb_rx_queue_fill(struct mctp_usb *mctp_usb)
+{
+	int i, qlen, rc = 0;
+
+	qlen = atomic_read(&mctp_usb->rx_qlen);
+	if (qlen < 0 || qlen >= n_rx_queue)
+		return 0;
+
+	for (i = 0; i < n_rx_queue - qlen; i++) {
+		struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
+
+		if (!urb) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		usb_anchor_urb(urb, &mctp_usb->rx_anchor);
+
+		rc = mctp_usb_rx_queue(mctp_usb, urb, GFP_KERNEL);
+		if (rc) {
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			break;
+		}
+	}
+
+	return rc;
 }
 
 static void mctp_usb_rx_retry_work(struct work_struct *work)
 {
 	struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
 						 rx_retry_work.work);
+        int rc;
 
 	if (READ_ONCE(mctp_usb->stopped))
 		return;
 
-	mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+	rc = mctp_usb_rx_queue_fill(mctp_usb);
+	if (rc)
+		schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
 }
 
 static int mctp_usb_open(struct net_device *dev)
@@ -259,7 +302,7 @@ static int mctp_usb_open(struct net_device *dev)
 
 	WRITE_ONCE(mctp_usb->stopped, false);
 
-	return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+	return mctp_usb_rx_queue_fill(mctp_usb);
 }
 
 static int mctp_usb_stop(struct net_device *dev)
@@ -271,7 +314,7 @@ static int mctp_usb_stop(struct net_device *dev)
 	/* prevent RX submission retry */
 	WRITE_ONCE(mctp_usb->stopped, true);
 
-	usb_kill_urb(mctp_usb->rx_urb);
+	usb_kill_anchored_urbs(&mctp_usb->rx_anchor);
 	usb_kill_urb(mctp_usb->tx_urb);
 
 	cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
@@ -333,24 +376,25 @@ static int mctp_usb_probe(struct usb_interface *intf,
 	dev->ep_in = ep_in->bEndpointAddress;
 	dev->ep_out = ep_out->bEndpointAddress;
 
+	rc = -ENOMEM;
 	dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
-	dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!dev->tx_urb || !dev->rx_urb) {
+	if (!dev->tx_urb) {
 		rc = -ENOMEM;
-		goto err_free_urbs;
+		goto err_free_tx_urb;
 	}
 
+	init_usb_anchor(&dev->rx_anchor);
+
 	INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
 
 	rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
 	if (rc)
-		goto err_free_urbs;
+		goto err_free_tx_urb;
 
 	return 0;
 
-err_free_urbs:
+err_free_tx_urb:
 	usb_free_urb(dev->tx_urb);
-	usb_free_urb(dev->rx_urb);
 	free_netdev(netdev);
 	return rc;
 }
@@ -361,7 +405,6 @@ static void mctp_usb_disconnect(struct usb_interface *intf)
 
 	mctp_unregister_netdev(dev->netdev);
 	usb_free_urb(dev->tx_urb);
-	usb_free_urb(dev->rx_urb);
 	usb_put_dev(dev->usbdev);
 	free_netdev(dev->netdev);
 }
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 09b1613..416b6ba 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -165,6 +165,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
 	mctp_dev_release_key(key->dev, key);
 	spin_unlock_irqrestore(&key->lock, flags);
 
+	kfree_skb(key->reasm_head);
 	kfree(key);
 }
 
@@ -282,6 +283,7 @@ static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
 static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {}
 #endif
 
+/* takes ownership of skb, both in success and failure cases */
 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 {
 	struct mctp_hdr *hdr = mctp_hdr(skb);
@@ -291,8 +293,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 		& MCTP_HDR_SEQ_MASK;
 
 	if (!key->reasm_head) {
-		/* Since we're manipulating the shared frag_list, ensure it isn't
-		 * shared with any other SKBs.
+		/* Since we're manipulating the shared frag_list, ensure it
+		 * isn't shared with any other SKBs. In the cloned case,
+		 * this will free the skb; callers can no longer access it
+		 * safely.
 		 */
 		key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
 		if (!key->reasm_head)
@@ -306,10 +310,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 	exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
 
 	if (this_seq != exp_seq)
-		return -EINVAL;
+		goto err_free;
 
 	if (key->reasm_head->len + skb->len > mctp_message_maxlen)
-		return -EINVAL;
+		goto err_free;
 
 	skb->next = NULL;
 	skb->sk = NULL;
@@ -323,6 +327,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 	key->reasm_head->truesize += skb->truesize;
 
 	return 0;
+
+ err_free:
+	kfree_skb(skb);
+	return -EINVAL;
 }
 
 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
@@ -423,19 +431,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
 			 * key isn't observable yet
 			 */
 			mctp_frag_queue(key, skb);
+			skb = NULL;
 
 			/* if the key_add fails, we've raced with another
 			 * SOM packet with the same src, dest and tag. There's
 			 * no way to distinguish future packets, so all we
-			 * can do is drop; we'll free the skb on exit from
-			 * this function.
+			 * can do is drop.
 			 */
 			rc = mctp_key_add(key, msk);
 			if (rc) {
 				kfree(key);
 			} else {
 				trace_mctp_key_acquire(key);
-
+				skb = NULL;
 				/* we don't need to release key->lock on exit */
 				mctp_key_unref(key);
 			}
@@ -459,10 +467,15 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
 		 */
 
 		/* we need to be continuing an existing reassembly... */
-		if (!key->reasm_head)
+		if (!key->reasm_head) {
 			rc = -EINVAL;
-		else
+		} else {
 			rc = mctp_frag_queue(key, skb);
+			skb = NULL;
+		}
+
+		if (rc)
+			goto out_unlock;
 
 		/* end of message? deliver to socket, and we're done with
 		 * the reassembly/response key