aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c112
1 files changed, 84 insertions, 28 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9828c1eff9a5..681d5bb99d99 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -350,16 +350,29 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
- u64 temp_64;
+ struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
+ union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
+ u64 crcr;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
reinit_completion(&xhci->cmd_ring_stop_completion);
- temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
- &xhci->op_regs->cmd_ring);
+ /*
+ * The control bits like command stop, abort are located in lower
+ * dword of the command ring control register.
+ * Some controllers require all 64 bits to be written to abort the ring.
+ * Make sure the upper dword is valid, pointing to the next command,
+ * avoiding corrupting the command ring pointer in case the command ring
+ * is stopped by the time the upper dword is written.
+ */
+ next_trb(xhci, NULL, &new_seg, &new_deq);
+ if (trb_is_link(new_deq))
+ next_trb(xhci, NULL, &new_seg, &new_deq);
+
+ crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
+ xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
* completion of the Command Abort operation. If CRR is not negated in 5
@@ -444,6 +457,26 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
}
}
+static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
+ xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
+ return NULL;
+ }
+ if (ep_index >= EP_CTX_PER_DEV) {
+ xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
+ return NULL;
+ }
+ if (!xhci->devs[slot_id]) {
+ xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
+ return NULL;
+ }
+
+ return &xhci->devs[slot_id]->eps[ep_index];
+}
+
/* Get the right ring for the given slot_id, ep_index and stream_id.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
@@ -454,7 +487,10 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep;
- ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return NULL;
+
/* Common case: no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
@@ -681,11 +717,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
@@ -724,11 +765,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
memset(&deq_state, 0, sizeof(deq_state));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
vdev = xhci->devs[slot_id];
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_stop_ep(ep_ctx);
- ep = &xhci->devs[slot_id]->eps[ep_index];
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
@@ -1052,9 +1096,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
- dev = xhci->devs[slot_id];
- ep = &dev->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+ dev = xhci->devs[slot_id];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
@@ -1127,9 +1173,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
}
cleanup:
- dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
- dev->eps[ep_index].queued_deq_seg = NULL;
- dev->eps[ep_index].queued_deq_ptr = NULL;
+ ep->ep_state &= ~SET_DEQ_PENDING;
+ ep->queued_deq_seg = NULL;
+ ep->queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
@@ -1138,10 +1184,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
vdev = xhci->devs[slot_id];
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_reset_ep(ep_ctx);
@@ -1171,7 +1222,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state */
- xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+ ep->ep_state &= ~EP_HALTED;
}
}
@@ -2347,14 +2398,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
- xdev = xhci->devs[slot_id];
- if (!xdev) {
- xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
- slot_id);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep) {
+ xhci_err(xhci, "ERROR Invalid Transfer event\n");
goto err_out;
}
- ep = &xdev->eps[ep_index];
+ xdev = xhci->devs[slot_id];
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2907,6 +2957,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
@@ -3250,12 +3302,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {