summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-mem.c32
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c53
-rw-r--r--drivers/usb/host/xhci.h1
4 files changed, 51 insertions, 38 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3ec2ac9636fe..a6ff8252699e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -79,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
- struct xhci_segment *next, bool link_trbs)
+ struct xhci_segment *next, bool link_trbs, bool isoc)
{
u32 val;
@@ -95,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
- if (xhci_link_trb_quirk(xhci))
+ /* Set chain bit for isoc rings on AMD 0.96 host */
+ if (xhci_link_trb_quirk(xhci) ||
+ (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, bool link_trbs, gfp_t flags)
+ unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, flags);
if (!next)
goto fail;
- xhci_link_segments(xhci, prev, next, link_trbs);
+ xhci_link_segments(xhci, prev, next, link_trbs, isoc);
prev = next;
num_segs--;
}
- xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
- struct xhci_ring *ring)
+ struct xhci_ring *ring, bool isoc)
{
struct xhci_segment *seg = ring->first_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */
- xhci_link_segments(xhci, seg, seg->next, 1);
+ xhci_link_segments(xhci, seg, seg->next, 1, isoc);
seg = seg->next;
} while (seg != ring->first_seg);
xhci_initialize_ring_info(ring);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ xhci_ring_alloc(xhci, 1, true, false, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -874,7 +876,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1315,10 +1317,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 8, true, mem_flags);
+ xhci_ring_alloc(xhci, 8, true, true, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ xhci_ring_alloc(xhci, 1, true, false, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1327,7 +1329,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -2236,7 +2239,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2267,7 +2270,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
+ flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e66e2b03fbbe..732837eafabe 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,6 +128,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+ xhci->quirks |= XHCI_AMD_0x96_HOST;
+
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a3679635382a..e4b7f003d702 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming)
+ bool consumer, bool more_trbs_coming, bool isoc)
{
u32 chain;
union xhci_trb *next;
@@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (!chain && !more_trbs_coming)
break;
- /* If we're not dealing with 0.95 hardware,
+ /* If we're not dealing with 0.95 hardware or
+ * isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
- if (!xhci_link_trb_quirk(xhci)) {
+ if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
+ && !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
@@ -2391,7 +2393,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming,
+ bool consumer, bool more_trbs_coming, bool isoc,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
@@ -2401,7 +2403,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
- inc_enq(xhci, ring, consumer, more_trbs_coming);
+ inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
}
/*
@@ -2409,7 +2411,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+ u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@@ -2451,10 +2453,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
- /* If we're not dealing with 0.95 hardware,
- * clear the chain bit.
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
*/
- if (!xhci_link_trb_quirk(xhci))
+ if (!xhci_link_trb_quirk(xhci) && !(isoc &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2487,6 +2490,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
+ bool isoc,
gfp_t mem_flags)
{
int ret;
@@ -2504,7 +2508,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, mem_flags);
+ num_trbs, isoc, mem_flags);
if (ret)
return ret;
@@ -2727,7 +2731,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
@@ -2822,7 +2826,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -2913,7 +2917,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;
@@ -2985,7 +2989,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3045,7 +3049,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
+ num_trbs, urb, 0, false, mem_flags);
if (ret < 0)
return ret;
@@ -3078,7 +3082,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
- queue_trb(xhci, ep_ring, false, true,
+ queue_trb(xhci, ep_ring, false, true, false,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3098,7 +3102,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, true,
+ queue_trb(xhci, ep_ring, false, true, false,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
@@ -3114,7 +3118,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, false,
+ queue_trb(xhci, ep_ring, false, false, false,
0,
0,
TRB_INTR_TARGET(0),
@@ -3263,7 +3267,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
- urb->stream_id, trbs_per_td, urb, i, mem_flags);
+ urb->stream_id, trbs_per_td, urb, i, true,
+ mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
@@ -3333,7 +3338,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3415,7 +3420,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, mem_flags);
+ num_trbs, true, mem_flags);
if (ret)
return ret;
@@ -3474,7 +3479,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
- reserved_trbs, GFP_ATOMIC);
+ reserved_trbs, false, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
@@ -3482,8 +3487,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
- queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
- field4 | xhci->cmd_ring->cycle_state);
+ queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
+ field3, field4 | xhci->cmd_ring->cycle_state);
return 0;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e738466703a5..4050656bffed 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1462,6 +1462,7 @@ struct xhci_hcd {
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_SW_BW_CHECKING (1 << 8)
+#define XHCI_AMD_0x96_HOST (1 << 9)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */