aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/dwc3/gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/dwc3/gadget.c')
-rw-r--r--drivers/usb/dwc3/gadget.c166
1 files changed, 125 insertions, 41 deletions
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index caf8f8edb77a..7a28048faa3e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1068,7 +1068,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
unsigned int rem = length % maxp;
unsigned chain = true;
- if (sg_is_last(s))
+ /*
+ * IOMMU driver is coalescing the list of sgs which shares a
+ * page boundary into one and giving it to USB driver. With
+ * this the number of sgs mapped is not equal to the number of
+ * sgs passed. So mark the chain bit to false if it isthe last
+ * mapped sg.
+ */
+ if (i == remaining - 1)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
@@ -1210,6 +1217,8 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
}
}
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1249,14 +1258,20 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
- /*
- * FIXME we need to iterate over the list of requests
- * here and stop, unmap, free and del each of the linked
- * requests instead of what we do now.
- */
- if (req->trb)
- memset(req->trb, 0, sizeof(struct dwc3_trb));
- dwc3_gadget_del_and_unmap_request(dep, req, ret);
+ struct dwc3_request *tmp;
+
+ if (ret == -EAGAIN)
+ return ret;
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ /* If ep isn't started, then there's no end transfer pending */
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
return ret;
}
@@ -1447,6 +1462,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ /* Start the transfer only after the END_TRANSFER is completed */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ dep->flags |= DWC3_EP_DELAY_START;
+ return 0;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1492,6 +1513,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
{
int i;
+ /* If req->trb is not set, then the request has not started */
+ if (!req->trb)
+ return;
+
/*
* If request was already started, this means we had to
* stop the transfer. With that we also need to ignore
@@ -1505,7 +1530,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
for (i = 0; i < req->num_trbs; i++) {
struct dwc3_trb *trb;
- trb = req->trb + i;
+ trb = &dep->trb_pool[dep->trb_dequeue];
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
@@ -1582,6 +1607,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
+ struct dwc3_request *req;
+ struct dwc3_request *tmp;
int ret;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
@@ -1618,13 +1645,37 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
else
dep->flags |= DWC3_EP_STALL;
} else {
+ /*
+ * Don't issue CLEAR_STALL command to control endpoints. The
+ * controller automatically clears the STALL when it receives
+ * the SETUP token.
+ */
+ if (dep->number <= 1) {
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return 0;
+ }
ret = dwc3_send_clear_stall_ep_cmd(dep);
- if (ret)
+ if (ret) {
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
- else
- dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return ret;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ list_for_each_entry_safe(req, tmp, &dep->pending_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) {
+ dep->flags &= ~DWC3_EP_DELAY_START;
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ }
}
return ret;
@@ -1712,7 +1763,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
u32 reg;
u8 link_state;
- u8 speed;
/*
* According to the Databook Remote wakeup request should
@@ -1722,16 +1772,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- speed = reg & DWC3_DSTS_CONNECTSPD;
- if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
- return 0;
-
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ case DWC3_LINK_STATE_RESUME:
break;
default:
return -EINVAL;
@@ -2171,7 +2218,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int mdwidth;
- int kbytes;
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
@@ -2187,17 +2233,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
- kbytes = size / 1024;
- if (kbytes == 0)
- kbytes = 1;
-
/*
- * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
- * internal overhead. We don't really know how these are used,
- * but documentation say it exists.
+ * To meet performance requirement, a minimum TxFIFO size of 3x
+ * MaxPacketSize is recommended for endpoints that support burst and a
+ * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
+ * support burst. Use those numbers and we can calculate the max packet
+ * limit as below.
*/
- size -= mdwidth * (kbytes + 1);
- size /= kbytes;
+ if (dwc->maximum_speed >= USB_SPEED_SUPER)
+ size /= 3;
+ else
+ size /= 2;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
@@ -2215,8 +2261,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
+ int mdwidth;
+ int size;
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+
+ /* MDWIDTH is represented in bits, convert to bytes */
+ mdwidth /= 8;
- usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
+ /* All OUT endpoints share a single RxFIFO space */
+ size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
+ if (dwc3_is_usb31(dwc))
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+
+ /* FIFO depth is in MDWDITH bytes */
+ size *= mdwidth;
+
+ /*
+ * To meet performance requirement, a minimum recommended RxFIFO size
+ * is defined as follow:
+ * RxFIFO size >= (3 x MaxPacketSize) +
+ * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
+ *
+ * Then calculate the max packet limit as below.
+ */
+ size -= (3 * 8) + 16;
+ if (size < 0)
+ size = 0;
+ else
+ size /= 3;
+
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
@@ -2382,7 +2459,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
- if (event->status & DEPEVT_STATUS_IOC)
+ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
+ (trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
return 0;
@@ -2402,9 +2480,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
- if (trb->ctrl & DWC3_TRB_CTRL_HWO)
- break;
-
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -2429,7 +2504,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- return req->request.actual == req->request.length;
+ return req->num_pending_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2453,8 +2528,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req) &&
- req->num_pending_sgs) {
+ if (!dwc3_gadget_ep_request_completed(req)) {
__dwc3_gadget_kick_transfer(dep);
goto out;
}
@@ -2508,10 +2582,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
- if (stop) {
+ if (stop)
dwc3_stop_active_transfer(dep, true, true);
- dep->flags = DWC3_EP_ENABLED;
- }
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
@@ -2580,8 +2652,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
case DWC3_DEPEVT_STREAMEVT:
@@ -2638,7 +2716,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
u32 cmd;
int ret;
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+ (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
@@ -2681,6 +2760,11 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ if (!interrupt)
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ else
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+
if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
udelay(100);
}