usb: xhci: Extract secondary ring functions to a separate module

In preparation an upcoming change to enable CONFIG_USB_XHCI_HCD=y
in gki_defconfig, the downstream modifications need to be removed
so that the driver can be realigned with upstream.

Move the secondary ring handling functions into a separate file
that will be compiled as part of the usb_audio_qmi module which
is the primary (and only) user. Unfortunately some miscellaneous
xhci APIs (xhci_ring_alloc et al) which are referenced are not
currently exported from the XHCI module, so we will either need
to upstream a change to export them or duplicate them. Until then
the snd-usb-audio-qmi module is temporarily broken.

Change-Id: Ib06ccaacf43840a92ad0c71c6b09882763e061b4
Signed-off-by: Jack Pham <jackp@codeaurora.org>
This commit is contained in:
Jack Pham 2021-02-08 01:05:26 -08:00
parent 529e0792b5
commit 4cc0e9d430
7 changed files with 475 additions and 510 deletions

View File

@ -1833,147 +1833,6 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
erst->entries = NULL;
}
static void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
{
union xhci_trb *erdp_trb, *current_trb;
struct xhci_segment *seg;
u64 erdp_reg;
u32 iman_reg;
dma_addr_t deq;
unsigned long segment_offset;
/* disable irq, ack pending interrupt and ack all pending events */
iman_reg =
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
iman_reg &= ~IMAN_IE;
writel_relaxed(iman_reg,
&xhci->sec_ir_set[intr_num]->irq_pending);
iman_reg =
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
if (iman_reg & IMAN_IP)
writel_relaxed(iman_reg,
&xhci->sec_ir_set[intr_num]->irq_pending);
/* last acked event trb is in erdp reg */
erdp_reg =
xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
if (!deq) {
pr_debug("%s: event ring handling not required\n", __func__);
return;
}
seg = xhci->sec_event_ring[intr_num]->first_seg;
segment_offset = deq - seg->dma;
/* find out virtual address of the last acked event trb */
erdp_trb = current_trb = &seg->trbs[0] +
(segment_offset/sizeof(*current_trb));
/* read cycle state of the last acked trb to find out CCS */
xhci->sec_event_ring[intr_num]->cycle_state =
le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE;
while (1) {
/* last trb of the event ring: toggle cycle state */
if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
current_trb = &seg->trbs[0];
} else {
current_trb++;
}
/* cycle state transition */
if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
xhci->sec_event_ring[intr_num]->cycle_state)
break;
}
if (erdp_trb != current_trb) {
deq =
xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
current_trb);
if (deq == 0)
xhci_warn(xhci,
"WARN invalid SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
erdp_reg &= ERST_PTR_MASK;
erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C); event ring is empty. */
erdp_reg |= ERST_EHB;
xhci_write_64(xhci, erdp_reg,
&xhci->sec_ir_set[intr_num]->erst_dequeue);
}
static int sec_event_ring_cleanup(struct xhci_hcd *xhci, unsigned int intr_num)
{
int size;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (intr_num >= xhci->max_interrupters) {
xhci_err(xhci, "invalid secondary interrupter num %d\n",
intr_num);
return -EINVAL;
}
size =
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
if (xhci->sec_erst[intr_num].entries) {
xhci_handle_sec_intr_events(xhci, intr_num);
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
xhci->sec_erst[intr_num].erst_dma_addr);
xhci->sec_erst[intr_num].entries = NULL;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
intr_num);
if (xhci->sec_event_ring[intr_num])
xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
xhci->sec_event_ring[intr_num] = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed sec event ring");
return 0;
}
int xhci_sec_event_ring_cleanup(struct usb_device *udev, unsigned int intr_num)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
return 0;
return sec_event_ring_cleanup(hcd_to_xhci(hcd), intr_num);
}
EXPORT_SYMBOL(xhci_sec_event_ring_cleanup);
static void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
{
unsigned int i;
/* sec event ring clean up */
for (i = 1; i < xhci->max_interrupters; i++)
sec_event_ring_cleanup(xhci, i);
kfree(xhci->sec_ir_set);
xhci->sec_ir_set = NULL;
kfree(xhci->sec_erst);
xhci->sec_erst = NULL;
kfree(xhci->sec_event_ring);
xhci->sec_event_ring = NULL;
/* primary event ring clean up */
xhci_free_erst(xhci, &xhci->erst);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@ -1981,7 +1840,12 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
xhci_event_ring_cleanup(xhci);
xhci_free_erst(xhci, &xhci->erst);
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
@ -2226,6 +2090,30 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
return 0;
}
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0 && !in_interrupt())
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
__le32 __iomem *addr, int max_caps)
{
@ -2477,159 +2365,6 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
return 0;
}
static int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
unsigned int intr_num, gfp_t flags)
{
dma_addr_t deq;
u64 val_64;
unsigned int val;
int ret;
*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
if (!*er)
return -ENOMEM;
ret = xhci_alloc_erst(xhci, *er, erst, flags);
if (ret)
return ret;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
intr_num,
erst->num_entries,
erst->entries,
(unsigned long long)erst->erst_dma_addr);
/* set ERST count with the number of entries in the segment table */
val = readl_relaxed(&ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Write ERST size = %i to ir_set %d (some bits preserved)", val,
intr_num);
writel_relaxed(val, &ir_set->erst_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d: Set ERST entries to point to event ring.",
intr_num);
/* set the segment table base address */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Set ERST base address for ir_set %d = 0x%llx",
intr_num,
(unsigned long long)erst->erst_dma_addr);
val_64 = xhci_read_64(xhci, &ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, val_64, &ir_set->erst_base);
/* Set the event ring dequeue address */
deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
if (deq == 0 && !in_interrupt())
xhci_warn(xhci,
"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
intr_num);
/* Update HC event ring dequeue pointer */
val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
val_64 &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
val_64 &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
intr_num);
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
&ir_set->erst_dequeue);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set %d.", intr_num);
return 0;
}
int xhci_sec_event_ring_setup(struct usb_device *udev, unsigned int intr_num)
{
int ret;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
return 0;
if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
|| !xhci->sec_event_ring || !xhci->sec_erst ||
intr_num >= xhci->max_interrupters) {
xhci_err(xhci,
"%s:state %x ir_set %pK evt_ring %pK erst %pK intr# %d\n",
__func__, xhci->xhc_state, xhci->sec_ir_set,
xhci->sec_event_ring, xhci->sec_erst, intr_num);
return -EINVAL;
}
if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
&& xhci->sec_event_ring[intr_num]->first_seg)
goto done;
xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
ret = xhci_event_ring_setup(xhci,
&xhci->sec_event_ring[intr_num],
xhci->sec_ir_set[intr_num],
&xhci->sec_erst[intr_num],
intr_num, GFP_KERNEL);
if (ret) {
xhci_err(xhci, "sec event ring setup failed inter#%d\n",
intr_num);
return ret;
}
done:
return 0;
}
EXPORT_SYMBOL(xhci_sec_event_ring_setup);
static int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
{
int ret = 0;
/* primary + secondary */
xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Allocating primary event ring");
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = &xhci->run_regs->ir_set[0];
ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
&xhci->erst, 0, flags);
if (ret) {
xhci_err(xhci, "failed to setup primary event ring\n");
goto fail;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Allocating sec event ring related pointers");
xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
sizeof(*xhci->sec_ir_set), flags);
if (!xhci->sec_ir_set) {
ret = -ENOMEM;
goto fail;
}
xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
sizeof(*xhci->sec_event_ring), flags);
if (!xhci->sec_event_ring) {
ret = -ENOMEM;
goto fail;
}
xhci->sec_erst = kcalloc(xhci->max_interrupters,
sizeof(*xhci->sec_erst), flags);
if (!xhci->sec_erst)
ret = -ENOMEM;
fail:
return ret;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
@ -2637,7 +2372,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
int i;
int i, ret;
INIT_LIST_HEAD(&xhci->cmd_list);
@ -2757,17 +2492,50 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"// Doorbell array is located at offset 0x%x"
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
if (xhci_event_ring_init(xhci, GFP_KERNEL))
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
0, flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci) < 0)
goto fail;
ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
if (ret)
goto fail;
/* set ERST count with the number of entries in the segment table */
val = readl(&xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
writel(val, &xhci->ir_set->erst_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST entries to point to event ring.");
/* set the segment table base address */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set 0.");
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).

View File

@ -5252,147 +5252,6 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
spin_unlock_irqrestore(&xhci->lock, flags);
}
phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_device *udev,
unsigned int intr_num, dma_addr_t *dma)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = hcd->self.sysdev;
struct sg_table sgt;
phys_addr_t pa;
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
return 0;
if (intr_num >= xhci->max_interrupters) {
xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
xhci->max_interrupters);
return 0;
}
if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
&& xhci->sec_event_ring[intr_num]->first_seg) {
dma_get_sgtable(dev, &sgt,
xhci->sec_event_ring[intr_num]->first_seg->trbs,
xhci->sec_event_ring[intr_num]->first_seg->dma,
TRB_SEGMENT_SIZE);
*dma = xhci->sec_event_ring[intr_num]->first_seg->dma;
pa = page_to_phys(sg_page(sgt.sgl));
sg_free_table(&sgt);
return pa;
}
return 0;
}
EXPORT_SYMBOL(xhci_get_sec_event_ring_phys_addr);
phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
struct usb_host_endpoint *ep, dma_addr_t *dma)
{
int ret;
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct device *dev = hcd->self.sysdev;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct sg_table sgt;
phys_addr_t pa;
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
return 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
xhci_err(xhci, "%s: invalid args\n", __func__);
return 0;
}
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
if (virt_dev->eps[ep_index].ring &&
virt_dev->eps[ep_index].ring->first_seg) {
dma_get_sgtable(dev, &sgt,
virt_dev->eps[ep_index].ring->first_seg->trbs,
virt_dev->eps[ep_index].ring->first_seg->dma,
TRB_SEGMENT_SIZE);
*dma = virt_dev->eps[ep_index].ring->first_seg->dma;
pa = page_to_phys(sg_page(sgt.sgl));
sg_free_table(&sgt);
return pa;
}
return 0;
}
EXPORT_SYMBOL(xhci_get_xfer_ring_phys_addr);
int xhci_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct xhci_command *cmd;
unsigned long flags;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
return ret;
cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!cmd)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
if (!virt_dev) {
ret = -ENODEV;
goto err;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
if (virt_dev->eps[ep_index].ring &&
virt_dev->eps[ep_index].ring->dequeue) {
ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
ep_index, 0);
if (ret)
goto err;
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for stop endpoint command to finish */
wait_for_completion(cmd->completion);
if (cmd->status == COMP_COMMAND_ABORTED ||
cmd->status == COMP_STOPPED) {
xhci_warn(xhci,
"stop endpoint command timeout for ep%d%s\n",
usb_endpoint_num(&ep->desc),
usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
ret = -ETIME;
}
goto free_cmd;
}
err:
spin_unlock_irqrestore(&xhci->lock, flags);
free_cmd:
xhci_free_command(xhci, cmd);
return ret;
}
EXPORT_SYMBOL(xhci_stop_endpoint);
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",

View File

@ -16,7 +16,6 @@
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
#include <linux/usb/xhci-sec.h>
#include <linux/io-64-nonatomic-lo-hi.h>
/* Code sharing between pci-quirks and xhci hcd */
@ -1750,8 +1749,6 @@ struct xhci_hcd {
struct xhci_doorbell_array __iomem *dba;
/* Our HCD's current interrupter register set */
struct xhci_intr_reg __iomem *ir_set;
/* secondary interrupter */
struct xhci_intr_reg __iomem **sec_ir_set;
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
@ -1797,11 +1794,6 @@ struct xhci_hcd {
struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* secondary event ring and erst */
struct xhci_ring **sec_event_ring;
struct xhci_erst *sec_erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
/* Store LPM test failed devices' information */

View File

@ -1,54 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* xHCI secondary ring APIs
*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __LINUX_XHCI_SEC_H
#define __LINUX_XHCI_SEC_H
#include <linux/usb.h>
#if IS_ENABLED(CONFIG_USB_XHCI_HCD)
int xhci_sec_event_ring_setup(struct usb_device *udev, unsigned int intr_num);
int xhci_sec_event_ring_cleanup(struct usb_device *udev, unsigned int intr_num);
phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_device *udev,
unsigned int intr_num, dma_addr_t *dma);
phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
struct usb_host_endpoint *ep, dma_addr_t *dma);
int xhci_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep);
#else
static inline int xhci_sec_event_ring_setup(struct usb_device *udev,
unsigned int intr_num)
{
return -ENODEV;
}
static inline int xhci_sec_event_ring_cleanup(struct usb_device *udev,
unsigned int intr_num)
{
return -ENODEV;
}
static inline phys_addr_t xhci_get_sec_event_ring_phys_addr(
struct usb_device *udev, unsigned int intr_num,
dma_addr_t *dma)
{
return 0;
}
static inline phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
struct usb_host_endpoint *ep, dma_addr_t *dma)
{
return 0;
}
static inline int xhci_stop_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
return -ENODEV;
}
#endif
#endif /* __LINUX_XHCI_SEC_H */

View File

@ -35,5 +35,6 @@ obj-$(CONFIG_SND_USB_US122L) += snd-usbmidi-lib.o
obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
obj-$(CONFIG_SND_USB_LINE6) += line6/
snd-usb-audio-qmi-objs := usb_audio_qmi_v01.o usb_audio_qmi_svc.o
CFLAGS_xhci-sec.o := -I$(srctree)/drivers/usb/host
snd-usb-audio-qmi-objs := usb_audio_qmi_v01.o usb_audio_qmi_svc.o xhci-sec.o
obj-$(CONFIG_SND_USB_AUDIO_QMI) += snd-usb-audio-qmi.o

View File

@ -21,7 +21,6 @@
#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/usb/audio-v3.h>
#include <linux/usb/xhci-sec.h>
#include <linux/ipc_logging.h>
#include "usbaudio.h"
@ -52,6 +51,17 @@
#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
struct xhci_ring;
struct xhci_ring *xhci_sec_event_ring_setup(struct usb_device *udev,
unsigned int intr_num);
int xhci_sec_event_ring_cleanup(struct usb_device *udev, struct xhci_ring *ring);
phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_device *udev,
struct xhci_ring *ring, dma_addr_t *dma);
phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
struct usb_host_endpoint *ep, dma_addr_t *dma);
int xhci_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep);
struct iova_info {
struct list_head list;
unsigned long start_iova;
@ -98,6 +108,7 @@ struct uaudio_qmi_dev {
struct device *dev;
u32 sid;
u32 intr_num;
struct xhci_ring *sec_ring;
struct iommu_domain *domain;
/* list to keep track of available iova */
@ -671,25 +682,26 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
dma_coherent = dev_is_dma_coherent(subs->dev->bus->sysdev);
/* event ring */
ret = xhci_sec_event_ring_setup(subs->dev, resp->interrupter_num);
if (ret) {
uaudio_qdev->sec_ring = xhci_sec_event_ring_setup(subs->dev, resp->interrupter_num);
if (IS_ERR(uaudio_qdev->sec_ring)) {
ret = PTR_ERR(uaudio_qdev->sec_ring);
uaudio_err("failed to setup sec event ring ret %d\n", ret);
goto err;
}
xhci_pa = xhci_get_sec_event_ring_phys_addr(subs->dev,
resp->interrupter_num, &dma);
uaudio_qdev->sec_ring, &dma);
if (!xhci_pa) {
uaudio_err("failed to get sec event ring dma address\n");
ret = -ENODEV;
goto err;
goto free_sec_ring;
}
va = uaudio_iommu_map(MEM_EVENT_RING, dma_coherent, xhci_pa, PAGE_SIZE,
NULL);
if (!va) {
ret = -ENOMEM;
goto err;
goto free_sec_ring;
}
resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
@ -825,6 +837,8 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE, PAGE_SIZE);
unmap_er:
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, PAGE_SIZE);
free_sec_ring:
xhci_sec_event_ring_cleanup(subs->dev, uaudio_qdev->sec_ring);
err:
return ret;
}
@ -886,7 +900,7 @@ static void uaudio_dev_cleanup(struct uaudio_dev *dev)
if (!uaudio_qdev->card_slot) {
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE,
PAGE_SIZE);
xhci_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
xhci_sec_event_ring_cleanup(dev->udev, uaudio_qdev->sec_ring);
uaudio_dbg("all audio devices disconnected\n");
}
@ -966,7 +980,7 @@ static void uaudio_dev_release(struct kref *kref)
/* all audio devices are disconnected */
if (!uaudio_qdev->card_slot) {
xhci_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
xhci_sec_event_ring_cleanup(dev->udev, uaudio_qdev->sec_ring);
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE,
PAGE_SIZE);
uaudio_dbg("all audio devices disconnected\n");

385
sound/usb/xhci-sec.c Normal file
View File

@ -0,0 +1,385 @@
// SPDX-License-Identifier: GPL-2.0
/*
* xHCI secondary ring APIs
*
* Copyright (c) 2019,2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2008 Intel Corp.
*/
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include "xhci-trace.h"
#include "xhci.h"
struct xhci_sec {
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* secondary interrupter */
struct xhci_intr_reg __iomem *ir_set;
struct xhci_hcd *xhci;
int intr_num;
struct list_head list;
};
static LIST_HEAD(xhci_sec);
static int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
unsigned int intr_num, gfp_t flags)
{
dma_addr_t deq;
u64 val_64;
unsigned int val;
int ret;
*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
if (!*er)
return -ENOMEM;
ret = xhci_alloc_erst(xhci, *er, erst, flags);
if (ret)
return ret;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
intr_num,
erst->num_entries,
erst->entries,
(unsigned long long)erst->erst_dma_addr);
/* set ERST count with the number of entries in the segment table */
val = readl_relaxed(&ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Write ERST size = %i to ir_set %d (some bits preserved)", val,
intr_num);
writel_relaxed(val, &ir_set->erst_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d: Set ERST entries to point to event ring.",
intr_num);
/* set the segment table base address */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Set ERST base address for ir_set %d = 0x%llx",
intr_num,
(unsigned long long)erst->erst_dma_addr);
val_64 = xhci_read_64(xhci, &ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, val_64, &ir_set->erst_base);
/* Set the event ring dequeue address */
deq = trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
if (deq == 0 && !in_interrupt())
xhci_warn(xhci,
"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
intr_num);
/* Update HC event ring dequeue pointer */
val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
val_64 &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
val_64 &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
intr_num);
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
&ir_set->erst_dequeue);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set %d.", intr_num);
return 0;
}
struct xhci_ring *xhci_sec_event_ring_setup(struct usb_device *udev, unsigned int intr_num)
{
int ret;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_sec *sec;
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
return ERR_PTR(-ENODEV);
if (!xhci->max_interrupters)
xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
if ((xhci->xhc_state & XHCI_STATE_HALTED) ||
intr_num >= xhci->max_interrupters) {
xhci_err(xhci, "%s:state %x intr# %d\n", __func__,
xhci->xhc_state, intr_num);
return ERR_PTR(-EINVAL);
}
list_for_each_entry(sec, &xhci_sec, list) {
if (sec->xhci == xhci && sec->intr_num == intr_num)
goto done;
}
sec = kzalloc(sizeof(*sec), GFP_KERNEL);
if (!sec)
return ERR_PTR(-ENOMEM);
sec->intr_num = intr_num;
sec->xhci = xhci;
sec->ir_set = &xhci->run_regs->ir_set[intr_num];
ret = xhci_event_ring_setup(xhci, &sec->event_ring, sec->ir_set,
&sec->erst, intr_num, GFP_KERNEL);
if (ret) {
xhci_err(xhci, "sec event ring setup failed inter#%d\n",
intr_num);
kfree(sec);
return ERR_PTR(ret);
}
list_add_tail(&sec->list, &xhci_sec);
done:
return sec->event_ring;
}
static void xhci_handle_sec_intr_events(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_intr_reg __iomem *ir_set)
{
union xhci_trb *erdp_trb, *current_trb;
struct xhci_segment *seg;
u64 erdp_reg;
u32 iman_reg;
dma_addr_t deq;
unsigned long segment_offset;
/* disable irq, ack pending interrupt and ack all pending events */
iman_reg = readl_relaxed(&ir_set->irq_pending);
iman_reg &= ~IMAN_IE;
writel_relaxed(iman_reg, &ir_set->irq_pending);
iman_reg = readl_relaxed(&ir_set->irq_pending);
if (iman_reg & IMAN_IP)
writel_relaxed(iman_reg, &ir_set->irq_pending);
/* last acked event trb is in erdp reg */
erdp_reg = xhci_read_64(xhci, &ir_set->erst_dequeue);
deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
if (!deq) {
pr_debug("%s: event ring handling not required\n", __func__);
return;
}
seg = ring->first_seg;
segment_offset = deq - seg->dma;
/* find out virtual address of the last acked event trb */
erdp_trb = current_trb = &seg->trbs[0] +
(segment_offset/sizeof(*current_trb));
/* read cycle state of the last acked trb to find out CCS */
ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE;
while (1) {
/* last trb of the event ring: toggle cycle state */
if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
ring->cycle_state ^= 1;
current_trb = &seg->trbs[0];
} else {
current_trb++;
}
/* cycle state transition */
if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
ring->cycle_state)
break;
}
if (erdp_trb != current_trb) {
deq = trb_virt_to_dma(ring->deq_seg, current_trb);
if (deq == 0)
xhci_warn(xhci,
"WARN invalid SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
erdp_reg &= ERST_PTR_MASK;
erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C); event ring is empty. */
erdp_reg |= ERST_EHB;
xhci_write_64(xhci, erdp_reg, &ir_set->erst_dequeue);
}
static int sec_event_ring_cleanup(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst)
{
int size;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!HCD_RH_RUNNING(xhci_to_hcd(xhci)))
return 0;
size = sizeof(struct xhci_erst_entry)*(erst->num_entries);
if (erst->entries) {
xhci_handle_sec_intr_events(xhci, ring, ir_set);
dma_free_coherent(dev, size, erst->entries,
erst->erst_dma_addr);
erst->entries = NULL;
}
xhci_ring_free(xhci, ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed sec event ring");
return 0;
}
int xhci_sec_event_ring_cleanup(struct usb_device *udev, struct xhci_ring *ring)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_sec *sec;
list_for_each_entry(sec, &xhci_sec, list) {
if (sec->event_ring == ring) {
sec_event_ring_cleanup(xhci, ring, sec->ir_set,
&sec->erst);
list_del(&sec->list);
kfree(sec);
return 0;
}
}
return 0;
}
phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_device *udev,
struct xhci_ring *ring, dma_addr_t *dma)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = hcd->self.sysdev;
struct sg_table sgt;
phys_addr_t pa;
struct xhci_sec *sec;
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd) ||
(xhci->xhc_state & XHCI_STATE_HALTED))
return 0;
list_for_each_entry(sec, &xhci_sec, list) {
if (sec->event_ring == ring) {
dma_get_sgtable(dev, &sgt, ring->first_seg->trbs,
ring->first_seg->dma, TRB_SEGMENT_SIZE);
*dma = ring->first_seg->dma;
pa = page_to_phys(sg_page(sgt.sgl));
sg_free_table(&sgt);
return pa;
}
}
return 0;
}
phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
struct usb_host_endpoint *ep, dma_addr_t *dma)
{
int ret;
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct device *dev = hcd->self.sysdev;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct sg_table sgt;
phys_addr_t pa;
if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
return 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
xhci_err(xhci, "%s: invalid args\n", __func__);
return 0;
}
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
if (virt_dev->eps[ep_index].ring &&
virt_dev->eps[ep_index].ring->first_seg) {
dma_get_sgtable(dev, &sgt,
virt_dev->eps[ep_index].ring->first_seg->trbs,
virt_dev->eps[ep_index].ring->first_seg->dma,
TRB_SEGMENT_SIZE);
*dma = virt_dev->eps[ep_index].ring->first_seg->dma;
pa = page_to_phys(sg_page(sgt.sgl));
sg_free_table(&sgt);
return pa;
}
return 0;
}
int xhci_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct xhci_command *cmd;
unsigned long flags;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
return ret;
cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!cmd)
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
if (!virt_dev) {
ret = -ENODEV;
goto err;
}
ep_index = xhci_get_endpoint_index(&ep->desc);
if (virt_dev->eps[ep_index].ring &&
virt_dev->eps[ep_index].ring->dequeue) {
ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
ep_index, 0);
if (ret)
goto err;
ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for stop endpoint command to finish */
wait_for_completion(cmd->completion);
if (cmd->status == COMP_COMMAND_ABORTED ||
cmd->status == COMP_STOPPED) {
xhci_warn(xhci,
"stop endpoint command timeout for ep%d%s\n",
usb_endpoint_num(&ep->desc),
usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
ret = -ETIME;
}
goto free_cmd;
}
err:
spin_unlock_irqrestore(&xhci->lock, flags);
free_cmd:
xhci_free_command(xhci, cmd);
return ret;
}