aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 13:06:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 13:06:10 -0700
commite1f5b94fd0c93c3e27ede88b7ab652d086dc960f (patch)
treee8de7a132eb88521dd1c19e128eba2d5349bdf4f /drivers/usb/host
parent6fd03301d76bc439382710e449f58efbb233df1b (diff)
parent1b6ed69f974f6f32c8be0d9a7fc952822eb83b6f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (143 commits) USB: xhci depends on PCI. USB: xhci: Add Makefile, MAINTAINERS, and Kconfig entries. USB: xhci: Respect critical sections. USB: xHCI: Fix interrupt moderation. USB: xhci: Remove packed attribute from structures. usb; xhci: Fix TRB offset calculations. USB: xhci: replace if-elseif-else with switch-case USB: xhci: Make xhci-mem.c include linux/dmapool.h USB: xhci: drop spinlock in xhci_urb_enqueue() error path. USB: Change names of SuperSpeed ep companion descriptor structs. USB: xhci: Avoid compiler reordering in Link TRB giveback. USB: xhci: Clean up xhci_irq() function. USB: xhci: Avoid global namespace pollution. USB: xhci: Fix Link TRB handoff bit twiddling. USB: xhci: Fix register write order. USB: xhci: fix some compiler warnings in xhci.h USB: xhci: fix lots of compiler warnings. USB: xhci: use xhci_handle_event instead of handle_event USB: xhci: URB cancellation support. USB: xhci: Scatter gather list support for bulk transfers. ...
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig20
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c47
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c1
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-sched.c8
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/hwa-hc.c21
-rw-r--r--drivers/usb/host/ohci-dbg.c31
-rw-r--r--drivers/usb/host/ohci-hcd.c38
-rw-r--r--drivers/usb/host/ohci-pci.c24
-rw-r--r--drivers/usb/host/pci-quirks.c123
-rw-r--r--drivers/usb/host/r8a66597-hcd.c62
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-hcd.c23
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c485
-rw-r--r--drivers/usb/host/xhci-ext-caps.h145
-rw-r--r--drivers/usb/host/xhci-hcd.c1274
-rw-r--r--drivers/usb/host/xhci-hub.c308
-rw-r--r--drivers/usb/host/xhci-mem.c769
-rw-r--r--drivers/usb/host/xhci-pci.c166
-rw-r--r--drivers/usb/host/xhci-ring.c1648
-rw-r--r--drivers/usb/host/xhci.h1157
32 files changed, 6275 insertions, 177 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 845479f7c70..1576a0520ad 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -17,6 +17,26 @@ config USB_C67X00_HCD
To compile this driver as a module, choose M here: the
module will be called c67x00.
+config USB_XHCI_HCD
+ tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
+ depends on USB && PCI && EXPERIMENTAL
+ ---help---
+ The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+ "SuperSpeed" host controller hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xhci-hcd.
+
+config USB_XHCI_HCD_DEBUGGING
+ bool "Debugging for the xHCI host controller"
+ depends on USB_XHCI_HCD
+ ---help---
+ Say 'Y' to turn on debugging for the xHCI host controller driver.
+ This will spew debugging output, even in interrupt context.
+ This should only be used for debugging xHCI driver bugs.
+
+ If unsure, say N.
+
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
depends on USB && USB_ARCH_HAS_EHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571e33d..289d748bb41 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,6 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
ifeq ($(CONFIG_FHCI_DEBUG),y)
fhci-objs += fhci-dbg.o
endif
+xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
obj-$(CONFIG_USB_WHCI_HCD) += whci/
@@ -23,6 +24,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
+obj-$(CONFIG_USB_XHCI_HCD) += xhci.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index bf69f473910..c3a778bd359 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -97,6 +97,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da34f67..bf86809c512 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -309,6 +309,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c637207a1c8..2b72473544d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1024,6 +1024,51 @@ done:
return;
}
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ rescan:
+ spin_lock_irq(&ehci->lock);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_IDLE) {
+ qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+ } else {
+ /* It's not safe to write into the overlay area
+ * while the QH is active. Unlink it first and
+ * wait for the unlink to complete.
+ */
+ if (qh->qh_state == QH_STATE_LINKED) {
+ if (eptype == USB_ENDPOINT_XFER_BULK) {
+ unlink_async(ehci, qh);
+ } else {
+ intr_deschedule(ehci, qh);
+ (void) qh_schedule(ehci, qh);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+}
+
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
#ifdef DEBUG
- ehci_debug_root = debugfs_create_dir("ehci", NULL);
+ ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
goto err_debug;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a48a3d..f46ad27c9a9 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -391,7 +391,7 @@ static inline void create_companion_file(struct ehci_hcd *ehci)
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
- i = device_create_file(ehci_to_hcd(ehci)->self.dev,
+ i = device_create_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
@@ -399,7 +399,7 @@ static inline void remove_companion_file(struct ehci_hcd *ehci)
{
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
- device_remove_file(ehci_to_hcd(ehci)->self.dev,
+ device_remove_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 9c32063a0c2..a44bb4a9495 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -51,6 +51,7 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d487908012..770dd9aba62 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -149,6 +149,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -187,7 +188,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
-static int __init ehci_orion_drv_probe(struct platform_device *pdev)
+static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
struct resource *res;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5aa8bce90e1..f3683e1da16 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -268,7 +268,7 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int ehci_pci_suspend(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
@@ -293,12 +293,6 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW) {
- ehci_halt(ehci);
- ehci_reset(ehci);
- }
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
bail:
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
return rc;
}
-static int ehci_pci_resume(struct usb_hcd *hcd)
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- /* If CF is still set, we maintained PCI Vaux power.
+ /* If CF is still set and we aren't resuming from hibernation
+ * then we maintained PCI Vaux power.
* Just undo the effect of ehci_pci_suspend().
*/
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
+ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+ !hibernated) {
int mask = INTR_MASK;
if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
return 0;
}
- ehci_dbg(ehci, "lost power, restarting\n");
usb_root_hub_lost_power(hcd->self.root_hub);
/* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -429,10 +425,11 @@ static struct pci_driver ehci_pci_driver = {
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
#endif
- .shutdown = usb_hcd_pci_shutdown,
};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b704f5..fbd272288fc 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -61,6 +61,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index bb870b8f81b..eecd2a0680a 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -65,6 +65,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1976b1b3778..3192f683f80 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,22 +93,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END(ehci);
- /* Except for control endpoints, we make hardware maintain data
- * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
- * and set the pseudo-toggle in udev. Only usb_clear_halt() will
- * ever clear it.
- */
- if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
- unsigned is_out, epnum;
-
- is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
- epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
- }
- }
-
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@ done:
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_hc32(ehci, info1);
qh->hw_info2 = cpu_to_hc32(ehci, info2);
- usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
}
@@ -881,7 +864,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
}
- /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ /* clear halt and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh (ehci, qh);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 556d0ec0c1f..9d1babc7ff6 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -760,8 +760,10 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
- frame = qh->period - 1;
- do {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++ehci->random_frame % qh->period;
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule (ehci,
frame, uframe, qh,
@@ -769,7 +771,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
if (status == 0)
break;
}
- } while (status && frame--);
+ }
/* qh->period == 0 means every uframe */
} else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6cff195e1a3..90ad3395bb2 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -116,6 +116,7 @@ struct ehci_hcd { /* one per controller */
struct timer_list watchdog;
unsigned long actions;
unsigned stamp;
+ unsigned random_frame;
unsigned long next_statechange;
u32 command;
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index ea8a4255c5d..e799f86dab1 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
{
struct device *dev = fhci_to_hcd(fhci)->self.controller;
- fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
if (!fhci->dfs_root) {
WARN_ON(1);
return;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index cbf30e515f2..88b03214622 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -172,25 +172,6 @@ error_cluster_id_get:
}
-static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc, *(unsigned long *) &msg);
- return -ENOSYS;
-}
-
-static int hwahc_op_resume(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
-}
-
/*
* No need to abort pipes, as when this is called, all the children
* has been disconnected and that has done it [through
@@ -598,8 +579,6 @@ static struct hc_driver hwahc_hc_driver = {
.flags = HCD_USB2, /* FIXME */
.reset = hwahc_op_reset,
.start = hwahc_op_start,
- .pci_suspend = hwahc_op_suspend,
- .pci_resume = hwahc_op_resume,
.stop = hwahc_op_stop,
.get_frame_number = hwahc_op_get_frame_number,
.urb_enqueue = hwahc_op_urb_enqueue,
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656aa4..811f5dfdc58 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -431,7 +431,7 @@ static struct dentry *ohci_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
- struct device *dev;
+ struct ohci_hcd *ohci;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *page;
@@ -505,15 +505,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
size_t temp;
unsigned long flags;
- bus = dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
/* display control and bulk lists together, for simplicity */
spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ed **seen, *ed;
unsigned long flags;
@@ -542,9 +536,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
return 0;
seen_count = 0;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
next = buf->page;
size = PAGE_SIZE;
@@ -626,7 +618,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
char *next;
u32 rdata;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
+ hcd = ohci_to_hcd(ohci);
regs = ohci->regs;
next = buf->page;
size = PAGE_SIZE;
@@ -710,7 +700,7 @@ done:
return PAGE_SIZE - size;
}
-static struct debug_buffer *alloc_buffer(struct device *dev,
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
@@ -718,7 +708,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
if (buf) {
- buf->dev = dev;
+ buf->ohci = ohci;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
}
@@ -810,26 +800,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
static inline void create_debug_files (struct ohci_hcd *ohci)
{
struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
- struct device *dev = bus->dev;
ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
if (!ohci->debug_dir)
goto dir_error;
ohci->debug_async = debugfs_create_file("async", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_async_fops);
if (!ohci->debug_async)
goto async_error;
ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_periodic_fops);
if (!ohci->debug_periodic)
goto periodic_error;
ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_registers_fops);
if (!ohci->debug_registers)
goto registers_error;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 25db704f3a2..58151687d35 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -571,7 +571,7 @@ static int ohci_init (struct ohci_hcd *ohci)
*/
static int ohci_run (struct ohci_hcd *ohci)
{
- u32 mask, temp;
+ u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
@@ -580,8 +580,8 @@ static int ohci_run (struct ohci_hcd *ohci)
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
- temp = ohci_readl (ohci, &ohci->regs->fminterval);
- ohci->fminterval = temp & 0x3fff;
+ val = ohci_readl (ohci, &ohci->regs->fminterval);
+ ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
@@ -600,25 +600,25 @@ static int ohci_run (struct ohci_hcd *ohci)
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
- temp = 0;
+ val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
- temp = 10 /* msec wait */;
+ val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
- temp = 50 /* msec wait */;
+ val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
- msleep(temp);
+ msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
@@ -628,9 +628,9 @@ static int ohci_run (struct ohci_hcd *ohci)
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
- temp = 30; /* ... allow extra time */
+ val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
- if (--temp == 0) {
+ if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
@@ -699,23 +699,23 @@ retry:
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
- temp = roothub_a (ohci);
- temp &= ~(RH_A_PSM | RH_A_OCPM);
+ val = roothub_a (ohci);
+ val &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
- temp |= RH_A_NOCP;
- temp &= ~(RH_A_POTPGT | RH_A_NPS);
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NOCP;
+ val &= ~(RH_A_POTPGT | RH_A_NPS);
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
- temp |= RH_A_NPS;
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NPS;
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
- ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
+ ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@ retry:
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
- mdelay ((temp >> 23) & 0x1fe);
+ mdelay ((val >> 23) & 0x1fe);
hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@ static int __init ohci_hcd_mod_init(void)
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
#ifdef DEBUG
- ohci_debug_root = debugfs_create_dir("ohci", NULL);
+ ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f9961b4c0da..d2ba04dd785 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -372,7 +372,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
#ifdef CONFIG_PM
-static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
+static int ohci_pci_suspend(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
@@ -394,10 +394,6 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- ohci_usb_reset(ohci);
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
bail:
spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
}
-static int ohci_pci_resume (struct usb_hcd *hcd)
+static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ ohci_usb_reset(hcd_to_ohci(hcd));
+
ohci_finish_controller_resume(hcd);
return 0;
}
@@ -484,12 +485,11 @@ static struct pci_driver ohci_pci_driver = {
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
#endif
-
- .shutdown = usb_hcd_pci_shutdown,
};
-
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 033c2846ce5..83b5f9cea85 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include "pci-quirks.h"
+#include "xhci-ext-caps.h"
#define UHCI_USBLEGSUP 0xc0 /* legacy support */
@@ -341,7 +342,127 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
return;
}
+/*
+ * handshake - spin reading a register until handshake completes
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @wait_usec: timeout in microseconds
+ * @delay_usec: delay in microseconds to wait between polling
+ *
+ * Polls a register every delay_usec microseconds.
+ * Returns 0 when the mask bits have the value done.
+ * Returns -ETIMEDOUT if this condition is not true after
+ * wait_usec microseconds have passed.
+ */
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ int wait_usec, int delay_usec)
+{
+ u32 result;
+
+ do {
+ result = readl(ptr);
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(delay_usec);
+ wait_usec -= delay_usec;
+ } while (wait_usec > 0);
+ return -ETIMEDOUT;
+}
+
+/**
+ * PCI Quirks for xHCI.
+ *
+ * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+ * It signals to the BIOS that the OS wants control of the host controller,
+ * and then waits 5 seconds for the BIOS to hand over control.
+ * If we timeout, assume the BIOS is broken and take control anyway.
+ */
+static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+{
+ void __iomem *base;
+ int ext_cap_offset;
+ void __iomem *op_reg_base;
+ u32 val;
+ int timeout;
+
+ if (!mmio_resource_enabled(pdev, 0))
+ return;
+
+ base = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (base == NULL)
+ return;
+ /*
+ * Find the Legacy Support Capability register -
+ * this is optional for xHCI host controllers.
+ */
+ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+ do {
+ if (!ext_cap_offset)
+ /* We've reached the end of the extended capabilities */
+ goto hc_init;
+ val = readl(base + ext_cap_offset);
+ if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+ break;
+ ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
+ } while (1);
+
+ /* If the BIOS owns the HC, signal that the OS wants it, and wait */
+ if (val & XHCI_HC_BIOS_OWNED) {
+ writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+
+ /* Wait for 5 seconds with 10 microsecond polling interval */
+ timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+ 0, 5000, 10);
+
+ /* Assume a buggy BIOS and take HC ownership anyway */
+ if (timeout) {
+ dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
+ " (BIOS bug ?) %08x\n", val);
+ writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
+ }
+ }
+
+ /* Disable any BIOS SMIs */
+ writel(XHCI_LEGACY_DISABLE_SMI,
+ base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+hc_init:
+ op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+ /* Wait for the host controller to be ready before writing any
+ * operational or runtime registers. Wait 5 seconds and no more.
+ */
+ timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+ 5000, 10);
+ /* Assume a buggy HC and start HC initialization anyway */
+ if (timeout) {
+ val = readl(op_reg_base + XHCI_STS_OFFSET);
+ dev_warn(&pdev->dev,
+ "xHCI HW not ready after 5 sec (HC bug?) "
+ "status = 0x%x\n", val);
+ }
+
+ /* Send the halt and disable interrupts command */
+ val = readl(op_reg_base + XHCI_CMD_OFFSET);
+ val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
+ writel(val, op_reg_base + XHCI_CMD_OFFSET);
+
+ /* Wait for the HC to halt - poll every 125 usec (one microframe). */
+ timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
+ XHCI_MAX_HALT_USEC, 125);
+ if (timeout) {
+ val = readl(op_reg_base + XHCI_STS_OFFSET);
+ dev_warn(&pdev->dev,
+ "xHCI HW did not halt within %d usec "
+ "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
+ }
+
+ iounmap(base);
+}
static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
{
@@ -351,5 +472,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
quirk_usb_handoff_ohci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
quirk_usb_disable_ehci(pdev);
+ else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+ quirk_usb_handoff_xhci(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f1626e58c14..56976cc0352 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -46,31 +46,10 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:r8a66597_hcd");
-#define DRIVER_VERSION "10 Apr 2008"
+#define DRIVER_VERSION "2009-05-26"
static const char hcd_name[] = "r8a66597_hcd";
-/* module parameters */
-#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
-static unsigned short clock = XTAL12;
-module_param(clock, ushort, 0644);
-MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
- "(default=0)");
-#endif
-
-static unsigned short vif = LDRV;
-module_param(vif, ushort, 0644);
-MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
-
-static unsigned short endian;
-module_param(endian, ushort, 0644);
-MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
-
-static unsigned short irq_sense = 0xff;
-module_param(irq_sense, ushort, 0644);
-MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
- "(default=32)");
-
static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
static int r8a66597_get_frame(struct usb_hcd *hcd);
@@ -136,7 +115,8 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
}
} while ((tmp & USBE) != USBE);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
- r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0);
+ r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
+ SYSCFG0);
i = 0;
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@ static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
static int enable_controller(struct r8a66597 *r8a66597)
{
int ret, port;
+ u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+ u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+ u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
ret = r8a66597_clock_enable(r8a66597);
if (ret < 0)
@@ -2373,7 +2356,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
return 0;
}
-static int __init r8a66597_probe(struct platform_device *pdev)
+static int __devinit r8a66597_probe(struct platform_device *pdev)
{
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
char clk_name[8];
@@ -2418,6 +2401,12 @@ static int __init r8a66597_probe(struct platform_device *pdev)
goto clean_up;
}
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -ENODEV;
+ goto clean_up;
+ }
+
/* initialize hcd */
hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
if (!hcd) {
@@ -2428,6 +2417,8 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597 = hcd_to_r8a66597(hcd);
memset(r8a66597, 0, sizeof(struct r8a66597));
dev_set_drvdata(&pdev->dev, r8a66597);
+ r8a66597->pdata = pdev->dev.platform_data;
+ r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
- /* irq_sense setting on cmdline takes precedence over resource
- * settings, so the introduction of irqflags in IRQ resourse
- * won't disturb existing setups */
- switch (irq_sense) {
- case INTL:
- irq_trigger = IRQF_TRIGGER_LOW;
- break;
- case 0:
- irq_trigger = IRQF_TRIGGER_FALLING;
- break;
- case 0xff:
- if (irq_trigger)
- irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
- INTL : 0;
- else {
- irq_sense = INTL;
- irq_trigger = IRQF_TRIGGER_LOW;
- }
- break;
- default:
- dev_err(&pdev->dev, "Unknown irq_sense value.\n");
- }
-
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd\n");
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f49208f1bb7..d72680b433f 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -30,6 +30,8 @@
#include <linux/clk.h>
#endif
+#include <linux/usb/r8a66597.h>
+
#define SYSCFG0 0x00
#define SYSCFG1 0x02
#define SYSSTS0 0x04
@@ -488,6 +490,7 @@ struct r8a66597 {
#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
struct clk *clk;
#endif
+ struct r8a66597_platdata *pdata;
struct r8a66597_device device0;
struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@ struct r8a66597 {
unsigned long child_connect_map[4];
unsigned bus_suspended:1;
+ unsigned irq_sense_low:1;
};
static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@ static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
{
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (power)
- r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
- else
- r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+ if (r8a66597->pdata->port_power) {
+ r8a66597->pdata->port_power(port, power);
+ } else {
+ if (power)
+ r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+ else
+ r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+ }
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+ u16 clock = 0;
+
+ switch (pdata->xtal) {
+ case R8A66597_PLATDATA_XTAL_12MHZ:
+ clock = XTAL12;
+ break;
+ case R8A66597_PLATDATA_XTAL_24MHZ:
+ clock = XTAL24;
+ break;
+ case R8A66597_PLATDATA_XTAL_48MHZ:
+ clock = XTAL48;
+ break;
+ default:
+ printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+ break;
+ }
+
+ return clock;
}
#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf7ea4..274751b4409 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -769,7 +769,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
return rc;
}
-static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int uhci_pci_suspend(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int rc = 0;
@@ -795,10 +795,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
/* FIXME: Enable non-PME# remote wakeup? */
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- uhci_hc_died(uhci);
-
done_okay:
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
done:
@@ -806,7 +802,7 @@ done:
return rc;
}
-static int uhci_pci_resume(struct usb_hcd *hcd)
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
@@ -820,6 +816,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
spin_lock_irq(&uhci->lock);
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ uhci_hc_died(uhci);
+
/* FIXME: Disable non-PME# remote wakeup? */
/* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@ static struct pci_driver uhci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume = usb_hcd_pci_resume,
-#endif /* PM */
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+#endif
};
static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@ static int __init uhci_hcd_init(void)
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
if (!errbuf)
goto errbuf_failed;
- uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
+ uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
if (!uhci_debugfs_root)
goto debug_failed;
}
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3e5807d14ff..64e57bfe236 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -260,7 +260,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
INIT_LIST_HEAD(&qh->node);
if (udev) { /* Normal QH */
- qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ qh->type = usb_endpoint_type(&hep->desc);
if (qh->type != USB_ENDPOINT_XFER_ISOC) {
qh->dummy_td = uhci_alloc_td(uhci);
if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 00000000000..2501c571f85
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,485 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "xhci.h"
+
+#define XHCI_INIT_VALUE 0x0
+
+/* Add verbose debugging later, just print everything for now */
+
+void xhci_dbg_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+ xhci->cap_regs);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+ &xhci->cap_regs->hc_capbase, temp);
+ xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+#if 0
+ xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+#endif
+
+ xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+ &xhci->cap_regs->run_regs_off,
+ (unsigned int) temp & RTSOFF_MASK);
+ xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+ xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+}
+
+static void xhci_print_cap_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+ xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Max device slots: %u\n",
+ (unsigned int) HCS_MAX_SLOTS(temp));
+ xhci_dbg(xhci, " Max interrupters: %u\n",
+ (unsigned int) HCS_MAX_INTRS(temp));
+ xhci_dbg(xhci, " Max ports: %u\n",
+ (unsigned int) HCS_MAX_PORTS(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
+ (unsigned int) HCS_IST(temp));
+ xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
+ (unsigned int) HCS_ERST_MAX(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
+ (unsigned int) HCS_U1_LATENCY(temp));
+ xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
+ (unsigned int) HCS_U2_LATENCY(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
+ xhci_dbg(xhci, " HC generates %s bit addresses\n",
+ HCC_64BIT_ADDR(temp) ? "64" : "32");
+ /* FIXME */
+ xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+}
+
+static void xhci_print_command_reg(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & CMD_RUN) ? "running" : "being stopped");
+ xhci_dbg(xhci, " HC has %sfinished hard reset\n",
+ (temp & CMD_RESET) ? "not " : "");
+ xhci_dbg(xhci, " Event Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " Host System Error Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " HC has %sfinished light reset\n",
+ (temp & CMD_LRESET) ? "not " : "");
+}
+
+static void xhci_print_status(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
+ xhci_dbg(xhci, " Event ring is %sempty\n",
+ (temp & STS_EINT) ? "not " : "");
+ xhci_dbg(xhci, " %sHost System Error\n",
+ (temp & STS_FATAL) ? "WARNING: " : "No ");
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & STS_HALT) ? "halted" : "running");
+}
+
+static void xhci_print_op_regs(struct xhci_hcd *xhci)
+{
+ xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+ xhci_print_command_reg(xhci);
+ xhci_print_status(xhci);
+}
+
+static void xhci_print_ports(struct xhci_hcd *xhci)
+{
+ u32 __iomem *addr;
+ int i, j;
+ int ports;
+ char *names[NUM_PORT_REGS] = {
+ "status",
+ "power",
+ "link",
+ "reserved",
+ };
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ addr = &xhci->op_regs->port_status_base;
+ for (i = 0; i < ports; i++) {
+ for (j = 0; j < NUM_PORT_REGS; ++j) {
+ xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+ addr, names[j],
+ (unsigned int) xhci_readl(xhci, addr));
+ addr++;
+ }
+ }
+}
+
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+{
+ void *addr;
+ u32 temp;
+
+ addr = &ir_set->irq_pending;
+ temp = xhci_readl(xhci, addr);
+ if (temp == XHCI_INIT_VALUE)
+ return;
+
+ xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
+
+ xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->irq_control;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->erst_size;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->rsvd;
+ temp = xhci_readl(xhci, addr);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
+ addr, (unsigned int)temp);
+
+ addr = &ir_set->erst_base[0];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_base[1];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_dequeue[0];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
+ addr, (unsigned int) temp);
+
+ addr = &ir_set->erst_dequeue[1];
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
+ addr, (unsigned int) temp);
+}
+
+void xhci_print_run_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int i;
+
+ xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+ temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
+ &xhci->run_regs->microframe_index,
+ (unsigned int) temp);
+ for (i = 0; i < 7; ++i) {
+ temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
+ &xhci->run_regs->rsvd[i],
+ i, (unsigned int) temp);
+ }
+}
+
+void xhci_print_registers(struct xhci_hcd *xhci)
+{
+ xhci_print_cap_regs(xhci);
+ xhci_print_op_regs(xhci);
+ xhci_print_ports(xhci);
+}
+
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+ i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ u64 address;
+ u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+
+ switch (type) {
+ case TRB_TYPE(TRB_LINK):
+ xhci_dbg(xhci, "Link TRB:\n");
+ xhci_print_trb_offsets(xhci, trb);
+
+ address = trb->link.segment_ptr[0] +
+ (((u64) trb->link.segment_ptr[1]) << 32);
+ xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+ xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+ GET_INTR_TARGET(trb->link.intr_target));
+ xhci_dbg(xhci, "Cycle bit = %u\n",
+ (unsigned int) (trb->link.control & TRB_CYCLE));
+ xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+ (unsigned int) (trb->link.control & LINK_TOGGLE));
+ xhci_dbg(xhci, "No Snoop bit = %u\n",
+ (unsigned int) (trb->link.control & TRB_NO_SNOOP));
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ address = trb->trans_event.buffer[0] +
+ (((u64) trb->trans_event.buffer[1]) << 32);
+ /*
+ * FIXME: look at flags to figure out if it's an address or if
+ * the data is directly in the buffer field.
+ */
+ xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+ break;
+ case TRB_TYPE(TRB_COMPLETION):
+ address = trb->event_cmd.cmd_trb[0] +
+ (((u64) trb->event_cmd.cmd_trb[1]) << 32);
+ xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+ xhci_dbg(xhci, "Completion status = %u\n",
+ (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
+ xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+ break;
+ default:
+ xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+ (unsigned int) type>>10);
+ xhci_print_trb_offsets(xhci, trb);
+ break;
+ }
+}
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns? As long as we don't
+ * write, that should be fine... We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ int i;
+ u32 addr = (u32) seg->dma;
+ union xhci_trb *trb = seg->trbs;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+ trb = &seg->trbs[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+ (unsigned int) trb->link.segment_ptr[0],
+ (unsigned int) trb->link.segment_ptr[1],
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+ addr += sizeof(*trb);
+ }
+}
+
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+ ring->dequeue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
+ ring->dequeue));
+ xhci_dbg(xhci, "Ring deq updated %u times\n",
+ ring->deq_updates);
+ xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+ ring->enqueue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue));
+ xhci_dbg(xhci, "Ring enq updated %u times\n",
+ ring->enq_updates);
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring. Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ /* FIXME: Throw an error if any segment doesn't have a Link TRB */
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg = ring->first_seg;
+ xhci_debug_segment(xhci, first_seg);
+
+ if (!ring->enq_updates && !ring->deq_updates) {
+ xhci_dbg(xhci, " Ring has not been updated\n");
+ return;
+ }
+ for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+ xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+ u32 addr = (u32) erst->erst_dma_addr;
+ int i;
+ struct xhci_erst_entry *entry;
+
+ for (i = 0; i < erst->num_entries; ++i) {
+ entry = &erst->entries[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+ (unsigned int) addr,
+ (unsigned int) entry->seg_addr[0],
+ (unsigned int) entry->seg_addr[1],
+ (unsigned int) entry->seg_size,
+ (unsigned int) entry->rsvd);
+ addr += sizeof(*entry);
+ }
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+ u32 val;
+
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+}
+
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
+{
+ int i, j;
+ int last_ep_ctx = 31;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+ &ctx->drop_flags, (unsigned long long)dma,
+ ctx->drop_flags);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+ &ctx->add_flags, (unsigned long long)dma,
+ ctx->add_flags);
+ dma += field_size;
+ for (i = 0; i > 6; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->rsvd[i], (unsigned long long)dma,
+ ctx->rsvd[i], i);
+ dma += field_size;
+ }
+
+ xhci_dbg(xhci, "Slot Context:\n");
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+ &ctx->slot.dev_info,
+ (unsigned long long)dma, ctx->slot.dev_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+ &ctx->slot.dev_info2,
+ (unsigned long long)dma, ctx->slot.dev_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+ &ctx->slot.tt_info,
+ (unsigned long long)dma, ctx->slot.tt_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+ &ctx->slot.dev_state,
+ (unsigned long long)dma, ctx->slot.dev_state);
+ dma += field_size;
+ for (i = 0; i > 4; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->slot.reserved[i], (unsigned long long)dma,
+ ctx->slot.reserved[i], i);
+ dma += field_size;
+ }
+
+ if (last_ep < 31)
+ last_ep_ctx = last_ep + 1;
+ for (i = 0; i < last_ep_ctx; ++i) {
+ xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+ &ctx->ep[i].ep_info,
+ (unsigned long long)dma, ctx->ep[i].ep_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+ &ctx->ep[i].ep_info2,
+ (unsigned long long)dma, ctx->ep[i].ep_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
+ &ctx->ep[i].deq[0],
+ (unsigned long long)dma, ctx->ep[i].deq[0]);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
+ &ctx->ep[i].deq[1],
+ (unsigned long long)dma, ctx->ep[i].deq[1]);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+ &ctx->ep[i].tx_info,
+ (unsigned long long)dma, ctx->ep[i].tx_info);
+ dma += field_size;
+ for (j = 0; j < 3; ++j) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ctx->ep[i].reserved[j],
+ (unsigned long long)dma,
+ ctx->ep[i].reserved[j], j);
+ dma += field_size;
+ }
+ }
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 00000000000..ecc131c3fe3
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,145 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
+#define XHCI_MAX_HALT_USEC (16*125)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT (1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET 0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET 0x00
+#define XHCI_STS_OFFSET 0x04
+
+#define XHCI_MAX_EXT_CAPS 50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff)
+#define XHCI_EXT_CAPS_VAL(p) ((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY 1
+#define XHCI_EXT_CAPS_PROTOCOL 2
+#define XHCI_EXT_CAPS_PM 3
+#define XHCI_EXT_CAPS_VIRT 4
+#define XHCI_EXT_CAPS_ROUTE 5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG 10
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED (1 << 16)
+#define XHCI_HC_OS_OWNED (1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
+
+/* USB Legacy Support Control and Status Register - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
+/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN (1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE (1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE (1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE (1 << 10)
+
+#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR (1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Return the next extended capability pointer register.
+ *
+ * @base PCI register base address.
+ *
+ * @ext_offset Offset of the 32-bit register that contains the extended
+ * capabilites pointer. If searching for the first extended capability, pass
+ * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability,
+ * pass in the offset of the current extended capability register.
+ *
+ * Returns 0 if there is no next extended capability register or returns the register offset
+ * from the PCI registers base address.
+ */
+static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
+{
+ u32 next;
+
+ next = readl(base + ext_offset);
+
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ /* Find the first extended capability */
+ next = XHCI_HCC_EXT_CAPS(next);
+ else
+ /* Find the next extended capability */
+ next = XHCI_EXT_CAPS_NEXT(next);
+ if (!next)
+ return 0;
+ /*
+ * Address calculation from offset of extended capabilities
+ * (or HCCPARAMS) register - see section 5.3.6 and section 7.
+ */
+ return ext_offset + (next << 2);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base PCI MMIO registers base address.
+ * @ext_offset Offset from base of the first extended capability to look at,
+ * or the address of HCCPARAMS.
+ * @id Extended capability ID to search for.
+ *
+ * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
+ * to make sure that the list doesn't contain a loop.
+ */
+static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
+{
+ u32 val;
+ int limit = XHCI_MAX_EXT_CAPS;
+
+ while (ext_offset && limit > 0) {
+ val = readl(base + ext_offset);
+ if (XHCI_EXT_CAPS_ID(val) == id)
+ break;
+ ext_offset = xhci_find_next_cap_offset(base, ext_offset);
+ limit--;
+ }
+ if (limit > 0)
+ return ext_offset;
+ return 0;
+}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644
index 00000000000..dba3e07ccd0
--- /dev/null
+++ b/drivers/usb/host/xhci-hcd.c
@@ -0,0 +1,1274 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "xhci.h"
+
+#define DRIVER_AUTHOR "Sarah Sharp"
+#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = xhci_readl(xhci, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+ u32 halted;
+ u32 cmd;
+ u32 mask;
+
+ xhci_dbg(xhci, "// Halt the HC\n");
+ /* Disable all interrupts from the host controller */
+ mask = ~(XHCI_IRQS);
+ halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+ if (!halted)
+ mask &= ~CMD_RUN;
+
+ cmd = xhci_readl(xhci, &xhci->op_regs->command);
+ cmd &= mask;
+ xhci_writel(xhci, cmd, &xhci->op_regs->command);
+
+ return handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+}
+
+/*
+ * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+ u32 command;
+ u32 state;
+
+ state = xhci_readl(xhci, &xhci->op_regs->status);
+ BUG_ON((state & STS_HALT) == 0);
+
+ xhci_dbg(xhci, "// Reset the HC\n");
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command |= CMD_RESET;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+
+ return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+}
+
+/*
+ * Stop the HC from processing the endpoint queues.
+ */
+static void xhci_quiesce(struct xhci_hcd *xhci)
+{
+ /*
+ * Queues are per endpoint, so we need to disable an endpoint or slot.
+ *
+ * To disable a slot, we need to insert a disable slot command on the
+ * command ring and ring the doorbell. This will also free any internal
+ * resources associated with the slot (which might not be what we want).
+ *
+ * A Release Endpoint command sounds better - doesn't free internal HC
+ * memory, but removes the endpoints from the schedule and releases the
+ * bandwidth, disables the doorbells, and clears the endpoint enable
+ * flag. Usually used prior to a set interface command.
+ *
+ * TODO: Implement after command ring code is done.
+ */
+ BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
+ xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
+}
+
+#if 0
+/* Set up MSI-X table for entry 0 (may claim other entries later) */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+ int ret;
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ xhci->msix_count = 0;
+ /* XXX: did I do this right? ixgbe does kcalloc for more than one */
+ xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+ if (!xhci->msix_entries) {
+ xhci_err(xhci, "Failed to allocate MSI-X entries\n");
+ return -ENOMEM;
+ }
+ xhci->msix_entries[0].entry = 0;
+
+ ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ if (ret) {
+ xhci_err(xhci, "Failed to enable MSI-X\n");
+ goto free_entries;
+ }
+
+ /*
+ * Pass the xhci pointer value as the request_irq "cookie".
+ * If more irqs are added, this will need to be unique for each one.
+ */
+ ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
+ "xHCI", xhci_to_hcd(xhci));
+ if (ret) {
+ xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
+ goto disable_msix;
+ }
+ xhci_dbg(xhci, "Finished setting up MSI-X\n");
+ return 0;
+
+disable_msix:
+ pci_disable_msix(pdev);
+free_entries:
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ return ret;
+}
+
+/* XXX: code duplication; can xhci_setup_msix call this? */
+/* Free any IRQs and disable MSI-X */
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ if (!xhci->msix_entries)
+ return;
+
+ free_irq(xhci->msix_entries[0].vector, xhci);
+ pci_disable_msix(pdev);
+ kfree(xhci->msix_entries);
+ xhci->msix_entries = NULL;
+ xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+}
+#endif
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+int xhci_init(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int retval = 0;
+
+ xhci_dbg(xhci, "xhci_init\n");
+ spin_lock_init(&xhci->lock);
+ retval = xhci_mem_init(xhci, GFP_KERNEL);
+ xhci_dbg(xhci, "Finished xhci_init\n");
+
+ return retval;
+}
+
+/*
+ * Called in interrupt context when there might be work
+ * queued on the event ring
+ *
+ * xhci->lock must be held by caller.
+ */
+static void xhci_work(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ /*
+ * Clear the op reg interrupt status first,
+ * so we can receive interrupts from other MSI-X interrupters.
+ * Write 1 to clear the interrupt status.
+ */
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp |= STS_EINT;
+ xhci_writel(xhci, temp, &xhci->op_regs->status);
+ /* FIXME when MSI-X is supported and there are multiple vectors */
+ /* Clear the MSI-X event interrupt status */
+
+ /* Acknowledge the interrupt */
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ temp |= 0x3;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+ /* Flush posted writes */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+ /* FIXME this should be a delayed service routine that clears the EHB */
+ xhci_handle_event(xhci);
+
+ /* Clear the event handler busy flag; the event ring should be empty. */
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+ /* Flush posted writes -- FIXME is this necessary? */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 temp, temp2;
+
+ spin_lock(&xhci->lock);
+ /* Check if the xHC generated the interrupt, or the irq is shared */
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+ spin_unlock(&xhci->lock);
+ return IRQ_NONE;
+ }
+
+ if (temp & STS_FATAL) {
+ xhci_warn(xhci, "WARNING: Host System Error\n");
+ xhci_halt(xhci);
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ spin_unlock(&xhci->lock);
+ return -ESHUTDOWN;
+ }
+
+ xhci_work(xhci);
+ spin_unlock(&xhci->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+void xhci_event_ring_work(unsigned long arg)
+{
+ unsigned long flags;
+ int temp;
+ struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+ int i, j;
+
+ xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+ xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
+ xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+ xhci->error_bitmask = 0;
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+ xhci_dbg(xhci, "Command ring:\n");
+ xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+ for (i = 0; i < MAX_HC_SLOTS; ++i) {
+ if (xhci->devs[i]) {
+ for (j = 0; j < 31; ++j) {
+ if (xhci->devs[i]->ep_rings[j]) {
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
+ xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
+ }
+ }
+ }
+ }
+
+ if (xhci->noops_submitted != NUM_TEST_NOOPS)
+ if (xhci_setup_one_noop(xhci))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ if (!xhci->zombie)
+ mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+ else
+ xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
+/*
+ * Start the HC after it was halted.
+ *
+ * This function is called by the USB core when the HC driver is added.
+ * Its opposite is xhci_stop().
+ *
+ * xhci_init() must be called once before this function can be called.
+ * Reset the HC, enable device slot contexts, program DCBAAP, and
+ * set command ring pointer and event ring pointer.
+ *
+ * Setup MSI-X vectors and enable interrupts.
+ */
+int xhci_run(struct usb_hcd *hcd)
+{
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ void (*doorbell)(struct xhci_hcd *) = NULL;
+
+ hcd->uses_new_polling = 1;
+ hcd->poll_rh = 0;
+
+ xhci_dbg(xhci, "xhci_run\n");
+#if 0 /* FIXME: MSI not setup yet */
+ /* Do this at the very last minute */
+ ret = xhci_setup_msix(xhci);
+ if (!ret)
+ return ret;
+
+ return -ENOSYS;
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ init_timer(&xhci->event_ring_timer);
+ xhci->event_ring_timer.data = (unsigned long) xhci;
+ xhci->event_ring_timer.function = xhci_event_ring_work;
+ /* Poll the event ring */
+ xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+ xhci->zombie = 0;
+ xhci_dbg(xhci, "Setting event ring polling timer\n");
+ add_timer(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= (u32) 160;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+ /* Set the HCD state before we enable the irqs */
+ hcd->state = HC_STATE_RUNNING;
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_EIE);
+ xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+ xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ if (NUM_TEST_NOOPS > 0)
+ doorbell = xhci_setup_one_noop(xhci);
+
+ xhci_dbg(xhci, "Command ring memory map follows:\n");
+ xhci_debug_ring(xhci, xhci->cmd_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ xhci_dbg(xhci, "ERST memory map follows:\n");
+ xhci_dbg_erst(xhci, &xhci->erst);
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_ring(xhci, xhci->event_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
+ xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_RUN);
+ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+ /* Flush PCI posted writes */
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
+ if (doorbell)
+ (*doorbell)(xhci);
+
+ xhci_dbg(xhci, "Finished xhci_run\n");
+ return 0;
+}
+
+/*
+ * Stop xHCI driver.
+ *
+ * This function is called by the USB core when the HC driver is removed.
+ * Its opposite is xhci_run().
+ *
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+void xhci_stop(struct usb_hcd *hcd)
+{
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ if (HC_IS_RUNNING(hcd->state))
+ xhci_quiesce(xhci);
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0 /* No MSI yet */
+ xhci_cleanup_msix(xhci);
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Tell the event ring poll function not to reschedule */
+ xhci->zombie = 1;
+ del_timer_sync(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_writel(xhci, ER_IRQ_DISABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_mem_cleanup(xhci);
+ xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*
+ * Shutdown HC (not bus-specific)
+ *
+ * This is called when the machine is rebooting or halting. We assume that the
+ * machine will be powered off, and the HC's internal state will be reset.
+ * Don't bother to free memory.
+ */
+void xhci_shutdown(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0
+ xhci_cleanup_msix(xhci);
+#endif
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs. Find the index for an endpoint given its descriptor. Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index;
+ if (usb_endpoint_xfer_control(desc))
+ index = (unsigned int) (usb_endpoint_num(desc)*2);
+ else
+ index = (unsigned int) (usb_endpoint_num(desc)*2) +
+ (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+ return index;
+}
+
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Compute the last valid endpoint context index. Basically, this is the
+ * endpoint index plus one. For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+ return fls(added_ctxs) - 1;
+}
+
+/* Returns 1 if the arguments are OK;
+ * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
+ */
+int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep, int check_ep, const char *func) {
+ if (!hcd || (check_ep && !ep) || !udev) {
+ printk(KERN_DEBUG "xHCI %s called with invalid args\n",
+ func);
+ return -EINVAL;
+ }
+ if (!udev->parent) {
+ printk(KERN_DEBUG "xHCI %s called for root hub\n",
+ func);
+ return 0;
+ }
+ if (!udev->slot_id) {
+ printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
+ func);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ */
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+ int ret = 0;
+ unsigned int slot_id, ep_index;
+
+ if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
+ return -EINVAL;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci->devs || !xhci->devs[slot_id]) {
+ if (!in_interrupt())
+ dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!in_interrupt())
+ xhci_dbg(xhci, "urb submitted during PCI suspend\n");
+ ret = -ESHUTDOWN;
+ goto exit;
+ }
+ if (usb_endpoint_xfer_control(&urb->ep->desc))
+ ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
+ slot_id, ep_index);
+ else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
+ ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
+ slot_id, ep_index);
+ else
+ ret = -EINVAL;
+exit:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+}
+
+/*
+ * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
+ * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
+ * should pick up where it left off in the TD, unless a Set Transfer Ring
+ * Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled URB will be "removed" from
+ * the ring. Since the ring is a contiguous structure, they can't be physically
+ * removed. Instead, there are two options:
+ *
+ * 1) If the HC is in the middle of processing the URB to be canceled, we
+ * simply move the ring's dequeue pointer past those TRBs using the Set
+ * Transfer Ring Dequeue Pointer command. This will be the common case,
+ * when drivers timeout on the last submitted URB and attempt to cancel.
+ *
+ * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
+ * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
+ * HC will need to invalidate the any TRBs it has cached after the stop
+ * endpoint command, as noted in the xHCI 0.95 errata.
+ *
+ * 3) The TD may have completed by the time the Stop Endpoint Command
+ * completes, so software needs to handle that case too.
+ *
+ * This function should protect against the TD enqueueing code ringing the
+ * doorbell while this code is waiting for a Stop Endpoint command to complete.
+ * It also needs to account for multiple cancellations on happening at the same
+ * time for the same endpoint.
+ *
+ * Note that this function can be called in any context, or so says
+ * usb_hcd_unlink_urb()
+ */
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ unsigned long flags;
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_td *td;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+
+ xhci = hcd_to_xhci(hcd);
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Make sure the URB hasn't completed or been unlinked already */
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret || !urb->hcpriv)
+ goto done;
+
+ xhci_dbg(xhci, "Cancel URB %p\n", urb);
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+ td = (struct xhci_td *) urb->hcpriv;
+
+ ep_ring->cancels_pending++;
+ list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
+ /* Queue a stop endpoint command, but only if this is
+ * the first cancellation to be handled.
+ */
+ if (ep_ring->cancels_pending == 1) {
+ xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
+ xhci_ring_cmd_db(xhci);
+ }
+done:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+}
+
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint that is being
+ * disabled, so there's no need for mutual exclusion to protect
+ * the xhci->devs[slot_id] structure.
+ */
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int last_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 drop_flag;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+
+ drop_flag = xhci_get_endpoint_flag(&ep->desc);
+ if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+ xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+ __func__, drop_flag);
+ return 0;
+ }
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HC already knows the endpoint is disabled,
+ * or the HCD has noted it is disabled, ignore this request
+ */
+ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
+ in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ in_ctx->drop_flags |= drop_flag;
+ new_drop_flags = in_ctx->drop_flags;
+
+ in_ctx->add_flags = ~drop_flag;
+ new_add_flags = in_ctx->add_flags;
+
+ last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+ /* Update the last valid endpoint context, if we deleted the last one */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+
+ xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+ xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint until the
+ * configuration or alt setting is installed in the device, so there's no need
+ * for mutual exclusion to protect the xhci->devs[slot_id] structure.
+ */
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_device_control *in_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 added_ctxs;
+ unsigned int last_ctx;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret = 0;
+
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+ last_ctx = xhci_last_valid_endpoint(added_ctxs);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ /* FIXME when we have to issue an evaluate endpoint command to
+ * deal with ep0 max packet size changing once we get the
+ * descriptors
+ */
+ xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+ __func__, added_ctxs);
+ return 0;
+ }
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+ /* If the HCD has already noted the endpoint is enabled,
+ * ignore this request.
+ */
+ if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ /*
+ * Configuration and alternate setting changes must be done in
+ * process context, not interrupt context (or so documenation
+ * for usb_set_interface() and usb_set_configuration() claim).
+ */
+ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+ udev, ep, GFP_KERNEL) < 0) {
+ dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+ __func__, ep->desc.bEndpointAddress);
+ return -ENOMEM;
+ }
+
+ in_ctx->add_flags |= added_ctxs;
+ new_add_flags = in_ctx->add_flags;
+
+ /* If xhci_endpoint_disable() was called for this endpoint, but the
+ * xHC hasn't been notified yet through the check_bandwidth() call,
+ * this re-adds a new state for the endpoint from the new endpoint
+ * descriptors. We must drop and re-add this endpoint, so we leave the
+ * drop flags alone.
+ */
+ new_drop_flags = in_ctx->drop_flags;
+
+ /* Update the last valid endpoint context, if we just added one past */
+ if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+ in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+ }
+ new_slot_info = in_ctx->slot.dev_info;
+
+ xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ int i;
+
+ /* When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ virt_dev->in_ctx->drop_flags = 0;
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+ /* Endpoint 0 is always valid */
+ virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+ for (i = 1; i < 31; ++i) {
+ ep_ctx = &virt_dev->in_ctx->ep[i];
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+/* Called after one or more calls to xhci_add_endpoint() or
+ * xhci_drop_endpoint(). If this call fails, the USB core is expected
+ * to call xhci_reset_bandwidth().
+ *
+ * Since we are in the middle of changing either configuration or
+ * installing a new alt setting, the USB core won't allow URBs to be
+ * enqueued for any endpoint on the old config or interface. Nothing
+ * else should be touching the xhci->devs[slot_id] structure, so we
+ * don't need to take the xhci->lock for manipulating that.
+ */
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int i;
+ int ret = 0;
+ int timeleft;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+
+ if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+ virt_dev->in_ctx->add_flags |= SLOT_FLAG;
+ virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
+ virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+ xhci_dbg(xhci, "New Input Control Context:\n");
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
+ udev->slot_id);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ return -ENOMEM;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &virt_dev->cmd_completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the configure endpoint command */
+ return -ETIME;
+ }
+
+ switch (virt_dev->cmd_status) {
+ case COMP_ENOMEM:
+ dev_warn(&udev->dev, "Not enough host controller resources "
+ "for new device state.\n");
+ ret = -ENOMEM;
+ /* FIXME: can we allocate more resources for the HC? */
+ break;
+ case COMP_BW_ERR:
+ dev_warn(&udev->dev, "Not enough bandwidth "
+ "for new device state.\n");
+ ret = -ENOSPC;
+ /* FIXME: can we go back to the old state? */
+ break;
+ case COMP_TRB_ERR:
+ /* the HCD set up something wrong */
+ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
+ "and endpoint is not disabled.\n");
+ ret = -EINVAL;
+ break;
+ case COMP_SUCCESS:
+ dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", virt_dev->cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ /* Callee should call reset_bandwidth() */
+ return ret;
+ }
+
+ xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
+ LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+ xhci_zero_in_ctx(virt_dev);
+ /* Free any old rings */
+ for (i = 1; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->ep_rings[i]);
+ virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+
+ return ret;
+}
+
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ int i, ret;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return;
+ xhci = hcd_to_xhci(hcd);
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return;
+ }
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ virt_dev = xhci->devs[udev->slot_id];
+ /* Free any rings allocated for added endpoints */
+ for (i = 0; i < 31; ++i) {
+ if (virt_dev->new_ep_rings[i]) {
+ xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
+ virt_dev->new_ep_rings[i] = NULL;
+ }
+ }
+ xhci_zero_in_ctx(virt_dev);
+}
+
+/*
+ * At this point, the struct usb_device is about to go away, the device has
+ * disconnected, and all traffic has been stopped and the endpoints have been
+ * disabled. Free any HC data structures associated with that device.
+ */
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+
+ if (udev->slot_id == 0)
+ return;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ /*
+ * Event command completion handler will free any data structures
+ * associated with the slot. XXX Can free sleep?
+ */
+}
+
+/*
+ * Returns 0 if the xHC ran out of device slots, the Enable Slot command
+ * timed out, or allocating memory failed. Returns 1 on success.
+ */
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+ int timeleft;
+ int ret;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return 0;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* XXX: how much time for xHC slot assignment? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the enable slot request */
+ return 0;
+ }
+
+ if (!xhci->slot_id) {
+ xhci_err(xhci, "Error while assigning device slot ID\n");
+ return 0;
+ }
+ /* xhci_alloc_virt_device() does not touch rings; no need to lock */
+ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+ /* Disable slot, if we can do it without mem alloc */
+ xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
+ }
+ udev->slot_id = xhci->slot_id;
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+}
+
+/*
+ * Issue an Address Device command (which will issue a SetAddress request to
+ * the device).
+ * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
+ * we should only issue and wait on one address command at the same time.
+ *
+ * We add one to the device address issued by the hardware because the USB core
+ * uses address 1 for the root hubs (even though they're not really devices).
+ */
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ unsigned long flags;
+ int timeleft;
+ struct xhci_virt_device *virt_dev;
+ int ret = 0;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 temp;
+
+ if (!udev->slot_id) {
+ xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+ return -EINVAL;
+ }
+
+ virt_dev = xhci->devs[udev->slot_id];
+
+ /* If this is a Set Address to an unconfigured device, setup ep 0 */
+ if (!udev->config)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ /* Otherwise, assume the core has the device configured how it wants */
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
+ udev->slot_id);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ return ret;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+ USB_CTRL_SET_TIMEOUT);
+ /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+ * the SetAddress() "recovery interval" required by USB and aborting the
+ * command on a timeout.
+ */
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ /* FIXME cancel the address device command */
+ return -ETIME;
+ }
+
+ switch (virt_dev->cmd_status) {
+ case COMP_CTX_STATE:
+ case COMP_EBADSLT:
+ xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
+ udev->slot_id);
+ ret = -EINVAL;
+ break;
+ case COMP_TX_ERR:
+ dev_warn(&udev->dev, "Device not responding to set address.\n");
+ ret = -EPROTO;
+ break;
+ case COMP_SUCCESS:
+ xhci_dbg(xhci, "Successful Address Device command\n");
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", virt_dev->cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret) {
+ return ret;
+ }
+ temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
+ temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
+ xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
+ xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
+ xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
+ xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
+ xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
+ xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+ (unsigned long long)virt_dev->out_ctx_dma);
+ xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
+ xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
+ /*
+ * USB core uses address 1 for the roothubs, so we add one to the
+ * address given back to us by the HC.
+ */
+ udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
+ /* Zero the input context control for later use */
+ virt_dev->in_ctx->add_flags = 0;
+ virt_dev->in_ctx->drop_flags = 0;
+ /* Mirror flags in the output context for future ep enable/disable */
+ virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+ virt_dev->out_ctx->drop_flags = 0;
+
+ xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
+ /* XXX Meh, not sure if anyone else but choose_address uses this. */
+ set_bit(udev->devnum, udev->bus->devmap.devicemap);
+
+ return 0;
+}
+
+int xhci_get_frame(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ /* EHCI mods by the periodic size. Why? */
+ return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static int __init xhci_hcd_init(void)
+{
+#ifdef CONFIG_PCI
+ int retval = 0;
+
+ retval = xhci_register_pci();
+
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering PCI driver.");
+ return retval;
+ }
+#endif
+ /*
+ * Check the compiler generated sizes of structures that must be laid
+ * out in specific ways for hardware access.
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+ /* xhci_device_control has eight fields, and also
+ * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+ BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_PCI
+ xhci_unregister_pci();
+#endif
+}
+module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 00000000000..eac5b53aa9e
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,308 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+
+static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 temp;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* USB 3.0 hubs have a different descriptor, but we fake this for now */
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* Why does core/hcd.h define bitmap? It's just confusing. */
+ memset(&desc->DeviceRemovable[0], 0, temp);
+ memset(&desc->DeviceRemovable[temp], 0xff, temp);
+
+ /* Ugh, these should be #defines, FIXME */
+ /* Using table 11-13 in USB 2.0 spec. */
+ temp = 0;
+ /* Bits 1:0 - support port power switching, or power always on */
+ if (HCC_PPC(xhci->hcc_params))
+ temp |= 0x0001;
+ else
+ temp |= 0x0002;
+ /* Bit 2 - root hubs are not part of a compound device */
+ /* Bits 4:3 - individual port over current protection */
+ temp |= 0x0008;
+ /* Bits 6:5 - no TTs in root ports */
+ /* Bit 7 - no port indicators */
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+}
+
+static unsigned int xhci_port_speed(unsigned int port_status)
+{
+ if (DEV_LOWSPEED(port_status))
+ return 1 << USB_PORT_FEAT_LOWSPEED;
+ if (DEV_HIGHSPEED(port_status))
+ return 1 << USB_PORT_FEAT_HIGHSPEED;
+ if (DEV_SUPERSPEED(port_status))
+ return 1 << USB_PORT_FEAT_SUPERSPEED;
+ /*
+ * FIXME: Yes, we should check for full speed, but the core uses that as
+ * a default in portspeed() in usb/core/hub.c (which is the only place
+ * USB_PORT_FEAT_*SPEED is used).
+ */
+ return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define XHCI_PORT_RW1S ((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define XHCI_PORT_RW ((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+static u32 xhci_port_state_to_neutral(u32 state)
+{
+ /* Save read-only status and port state */
+ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ unsigned long flags;
+ u32 temp, status;
+ int retval = 0;
+ u32 __iomem *addr;
+ char *port_change_bit;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ switch (typeReq) {
+ case GetHubStatus:
+ /* No power source, over-current reported per port */
+ memset(buf, 0, 4);
+ break;
+ case GetHubDescriptor:
+ xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ if (temp & PORT_PEC)
+ status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ if ((temp & PORT_OCC))
+ status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ /*
+ * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+ * changes
+ */
+ if (temp & PORT_CONNECT) {
+ status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= xhci_port_speed(temp);
+ }
+ if (temp & PORT_PE)
+ status |= 1 << USB_PORT_FEAT_ENABLE;
+ if (temp & PORT_OC)
+ status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ if (temp & PORT_RESET)
+ status |= 1 << USB_PORT_FEAT_RESET;
+ if (temp & PORT_POWER)
+ status |= 1 << USB_PORT_FEAT_POWER;
+ xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ break;
+ case SetPortFeature:
+ wIndex &= 0xff;
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /*
+ * Turn on ports, even if there isn't per-port switching.
+ * HC will report connect events even before this is set.
+ * However, khubd will ignore the roothub events until
+ * the roothub is registered.
+ */
+ xhci_writel(xhci, temp | PORT_POWER, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ case USB_PORT_FEAT_RESET:
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ default:
+ goto error;
+ }
+ temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ default:
+ goto error;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, temp | status, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, temp);
+ temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ break;
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ unsigned long flags;
+ u32 temp, status;
+ int i, retval;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ u32 __iomem *addr;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* Initial status is no changes */
+ buf[0] = 0;
+ status = 0;
+ if (ports > 7) {
+ buf[1] = 0;
+ retval = 2;
+ } else {
+ retval = 1;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* For each port, did anything change? If so, set that bit in buf. */
+ for (i = 0; i < ports; i++) {
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ temp = xhci_readl(xhci, addr);
+ if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
+ status = 1;
+ }
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 00000000000..c8a72de1c50
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,769 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+
+#include "xhci.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_segment *seg;
+ dma_addr_t dma;
+
+ seg = kzalloc(sizeof *seg, flags);
+ if (!seg)
+ return 0;
+ xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
+
+ seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return 0;
+ }
+ xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)dma);
+
+ memset(seg->trbs, 0, SEGMENT_SIZE);
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ if (!seg)
+ return;
+ if (seg->trbs) {
+ xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)seg->dma);
+ dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+ seg->trbs = NULL;
+ }
+ xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
+ kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs)
+{
+ u32 val;
+
+ if (!prev || !next)
+ return;
+ prev->next = next;
+ if (link_trbs) {
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+ }
+ xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+ (unsigned long long)prev->dma,
+ (unsigned long long)next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg;
+
+ if (!ring || !ring->first_seg)
+ return;
+ first_seg = ring->first_seg;
+ seg = first_seg->next;
+ xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+ while (seg != first_seg) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first_seg);
+ ring->first_seg = NULL;
+ kfree(ring);
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+ struct xhci_ring *ring;
+ struct xhci_segment *prev;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ xhci_dbg(xhci, "Allocating ring at %p\n", ring);
+ if (!ring)
+ return 0;
+
+ INIT_LIST_HEAD(&ring->td_list);
+ INIT_LIST_HEAD(&ring->cancelled_td_list);
+ if (num_segs == 0)
+ return ring;
+
+ ring->first_seg = xhci_segment_alloc(xhci, flags);
+ if (!ring->first_seg)
+ goto fail;
+ num_segs--;
+
+ prev = ring->first_seg;
+ while (num_segs > 0) {
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+ if (!next)
+ goto fail;
+ xhci_link_segments(xhci, prev, next, link_trbs);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+ if (link_trbs) {
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
+ xhci_dbg(xhci, "Wrote link toggle flag to"
+ " segment %p (virtual), 0x%llx (DMA)\n",
+ prev, (unsigned long long)prev->dma);
+ }
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+ /* The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ */
+ ring->cycle_state = 1;
+
+ return ring;
+
+fail:
+ xhci_ring_free(xhci, ring);
+ return 0;
+}
+
+/* All the xhci_tds in the ring's TD list should be freed at this point */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *dev;
+ int i;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || !xhci->devs[slot_id])
+ return;
+
+ dev = xhci->devs[slot_id];
+ xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
+ xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+ if (!dev)
+ return;
+
+ for (i = 0; i < 31; ++i)
+ if (dev->ep_rings[i])
+ xhci_ring_free(xhci, dev->ep_rings[i]);
+
+ if (dev->in_ctx)
+ dma_pool_free(xhci->device_pool,
+ dev->in_ctx, dev->in_ctx_dma);
+ if (dev->out_ctx)
+ dma_pool_free(xhci->device_pool,
+ dev->out_ctx, dev->out_ctx_dma);
+ kfree(xhci->devs[slot_id]);
+ xhci->devs[slot_id] = 0;
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ struct usb_device *udev, gfp_t flags)
+{
+ dma_addr_t dma;
+ struct xhci_virt_device *dev;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || xhci->devs[slot_id]) {
+ xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+ return 0;
+ }
+
+ xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+ if (!xhci->devs[slot_id])
+ return 0;
+ dev = xhci->devs[slot_id];
+
+ /* Allocate the (output) device context that will be used in the HC */
+ dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+ if (!dev->out_ctx)
+ goto fail;
+ dev->out_ctx_dma = dma;
+ xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dma);
+ memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
+
+ /* Allocate the (input) device context for address device command */
+ dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+ if (!dev->in_ctx)
+ goto fail;
+ dev->in_ctx_dma = dma;
+ xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dma);
+ memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
+
+ /* Allocate endpoint 0 ring */
+ dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!dev->ep_rings[0])
+ goto fail;
+
+ init_completion(&dev->cmd_completion);
+
+ /*
+ * Point to output device context in dcbaa; skip the output control
+ * context, which is eight 32 bit fields (or 32 bytes long)
+ */
+ xhci->dcbaa->dev_context_ptrs[2*slot_id] =
+ (u32) dev->out_ctx_dma + (32);
+ xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+ slot_id,
+ &xhci->dcbaa->dev_context_ptrs[2*slot_id],
+ (unsigned long long)dev->out_ctx_dma);
+ xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+
+ return 1;
+fail:
+ xhci_free_virt_device(xhci, slot_id);
+ return 0;
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+ struct xhci_virt_device *dev;
+ struct xhci_ep_ctx *ep0_ctx;
+ struct usb_device *top_dev;
+
+ dev = xhci->devs[udev->slot_id];
+ /* Slot ID 0 is reserved */
+ if (udev->slot_id == 0 || !dev) {
+ xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+ udev->slot_id);
+ return -EINVAL;
+ }
+ ep0_ctx = &dev->in_ctx->ep[0];
+
+ /* 2) New slot context and endpoint 0 context are valid*/
+ dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+
+ /* 3) Only the control endpoint is valid - one endpoint context */
+ dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ dev->in_ctx->slot.dev_info |= (u32) udev->route;
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
+ break;
+ case USB_SPEED_HIGH:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
+ break;
+ case USB_SPEED_FULL:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
+ break;
+ case USB_SPEED_LOW:
+ dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
+ break;
+ case USB_SPEED_VARIABLE:
+ xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+ return -EINVAL;
+ break;
+ default:
+ /* Speed was set earlier, this shouldn't happen. */
+ BUG();
+ }
+ /* Find the root hub port this device is under */
+ for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+ top_dev = top_dev->parent)
+ /* Found device below root hub */;
+ dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
+ xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+
+ /* Is this a LS/FS device under a HS hub? */
+ /*
+ * FIXME: I don't think this is right, where does the TT info for the
+ * roothub or parent hub come from?
+ */
+ if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
+ udev->tt) {
+ dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
+ dev->in_ctx->slot.tt_info |= udev->ttport << 8;
+ }
+ xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+ xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+
+ /* Step 4 - ring already allocated */
+ /* Step 5 */
+ ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
+ /*
+ * See section 4.3 bullet 6:
+ * The default Max Packet size for ep0 is "8 bytes for a USB2
+ * LS/FS/HS device or 512 bytes for a USB3 SS device"
+ * XXX: Not sure about wireless USB devices.
+ */
+ if (udev->speed == USB_SPEED_SUPER)
+ ep0_ctx->ep_info2 |= MAX_PACKET(512);
+ else
+ ep0_ctx->ep_info2 |= MAX_PACKET(8);
+ /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+ ep0_ctx->ep_info2 |= MAX_BURST(0);
+ ep0_ctx->ep_info2 |= ERROR_COUNT(3);
+
+ ep0_ctx->deq[0] =
+ dev->ep_rings[0]->first_seg->dma;
+ ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
+ ep0_ctx->deq[1] = 0;
+
+ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+
+ return 0;
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes". If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval = 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ /* Max NAK rate */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ interval = ep->desc.bInterval;
+ /* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (ep->desc.bInterval == 0)
+ interval = 0;
+ else
+ interval = ep->desc.bInterval - 1;
+ if (interval > 15)
+ interval = 15;
+ if (interval != ep->desc.bInterval + 1)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ /* Convert bInterval (in 1-255 frames) to microframes and round down to
+ * nearest power of 2.
+ */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = fls(8*ep->desc.bInterval) - 1;
+ if (interval > 10)
+ interval = 10;
+ if (interval < 3)
+ interval = 3;
+ if ((1 << interval) != 8*ep->desc.bInterval)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ default:
+ BUG();
+ }
+ return EP_INTERVAL(interval);
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int in;
+ u32 type;
+
+ in = usb_endpoint_dir_in(&ep->desc);
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ type = EP_TYPE(CTRL_EP);
+ } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(BULK_IN_EP);
+ else
+ type = EP_TYPE(BULK_OUT_EP);
+ } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(ISOC_IN_EP);
+ else
+ type = EP_TYPE(ISOC_OUT_EP);
+ } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(INT_IN_EP);
+ else
+ type = EP_TYPE(INT_OUT_EP);
+ } else {
+ BUG();
+ }
+ return type;
+}
+
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep,
+ gfp_t mem_flags)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_ring *ep_ring;
+ unsigned int max_packet;
+ unsigned int max_burst;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ /* Set up the endpoint ring */
+ virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
+ if (!virt_dev->new_ep_rings[ep_index])
+ return -ENOMEM;
+ ep_ring = virt_dev->new_ep_rings[ep_index];
+ ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
+ ep_ctx->deq[1] = 0;
+
+ ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+
+ /* FIXME dig Mult and streams info out of ep companion desc */
+
+ /* Allow 3 retries for everything but isoc */
+ if (!usb_endpoint_xfer_isoc(&ep->desc))
+ ep_ctx->ep_info2 = ERROR_COUNT(3);
+ else
+ ep_ctx->ep_info2 = ERROR_COUNT(0);
+
+ ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+
+ /* Set the max packet size and max burst */
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = ep->desc.wMaxPacketSize;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ /* dig out max burst from ep companion desc */
+ max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+ ep_ctx->ep_info2 |= MAX_BURST(max_packet);
+ break;
+ case USB_SPEED_HIGH:
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+ }
+ /* Fall through */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+ break;
+ default:
+ BUG();
+ }
+ /* FIXME Debug endpoint context */
+ return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq[0] = 0;
+ ep_ctx->deq[1] = 0;
+ ep_ctx->tx_info = 0;
+ /* Don't free the endpoint ring until the set interface or configuration
+ * request succeeds.
+ */
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ int size;
+ int i;
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+ if (xhci->erst.entries)
+ pci_free_consistent(pdev, size,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+ xhci->erst.entries = NULL;
+ xhci_dbg(xhci, "Freed ERST\n");
+ if (xhci->event_ring)
+ xhci_ring_free(xhci, xhci->event_ring);
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
+ xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+
+ if (xhci->segment_pool)
+ dma_pool_destroy(xhci->segment_pool);
+ xhci->segment_pool = NULL;
+ xhci_dbg(xhci, "Freed segment pool\n");
+
+ if (xhci->device_pool)
+ dma_pool_destroy(xhci->device_pool);
+ xhci->device_pool = NULL;
+ xhci_dbg(xhci, "Freed device context pool\n");
+
+ xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
+ if (xhci->dcbaa)
+ pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+ xhci->dcbaa = NULL;
+
+ xhci->page_size = 0;
+ xhci->page_shift = 0;
+}
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ dma_addr_t dma;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ unsigned int val, val2;
+ struct xhci_segment *seg;
+ u32 page_size;
+ int i;
+
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ for (i = 0; i < 16; i++) {
+ if ((0x1 & page_size) != 0)
+ break;
+ page_size = page_size >> 1;
+ }
+ if (i < 16)
+ xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ else
+ xhci_warn(xhci, "WARN: no supported page size\n");
+ /* Use 4K pages, since that's common and the minimum the HC supports */
+ xhci->page_shift = 12;
+ xhci->page_size = 1 << xhci->page_shift;
+ xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+
+ /*
+ * Program the Number of Device Slots Enabled field in the CONFIG
+ * register with the max value of slots the HC can handle.
+ */
+ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+ xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
+ (unsigned int) val);
+ val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+ val |= (val2 & ~HCS_SLOTS_MASK);
+ xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
+ (unsigned int) val);
+ xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+ /*
+ * Section 5.4.8 - doorbell array must be
+ * "physically contiguous and 64-byte (cache line) aligned".
+ */
+ xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(*xhci->dcbaa), &dma);
+ if (!xhci->dcbaa)
+ goto fail;
+ memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+ xhci->dcbaa->dma = dma;
+ xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+ xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
+ xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
+
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments,
+ * so we pick the greater alignment need.
+ */
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ SEGMENT_SIZE, 64, xhci->page_size);
+ /* See Table 46 and Note on Figure 55 */
+ /* FIXME support 64-byte contexts */
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+ sizeof(struct xhci_device_control),
+ 64, xhci->page_size);
+ if (!xhci->segment_pool || !xhci->device_pool)
+ goto fail;
+
+ /* Set up the command ring to have one segments for now. */
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+ /* Set the address in the Command Ring Control register */
+ val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+ val = (val & ~CMD_RING_ADDR_MASK) |
+ (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
+ xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
+ xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
+ xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr\n", val);
+ xhci->dba = (void *) xhci->cap_regs + val;
+ xhci_dbg_regs(xhci);
+ xhci_print_run_regs(xhci);
+ /* Set ir_set to interrupt register set 0 */
+ xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST). Section 4.9.3.
+ */
+ xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ if (!xhci->event_ring)
+ goto fail;
+
+ xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+ if (!xhci->erst.entries)
+ goto fail;
+ xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ (unsigned long long)dma);
+
+ memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+ xhci->erst.num_entries = ERST_NUM_SEGS;
+ xhci->erst.erst_dma_addr = dma;
+ xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci->erst.num_entries,
+ xhci->erst.entries,
+ (unsigned long long)xhci->erst.erst_dma_addr);
+
+ /* set ring base address and size for each segment table entry */
+ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+ struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+ entry->seg_addr[0] = seg->dma;
+ entry->seg_addr[1] = 0;
+ entry->seg_size = TRBS_PER_SEGMENT;
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ /* set ERST count with the number of entries in the segment table */
+ val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ val);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+ xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ /* set the segment table base address */
+ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ (unsigned long long)xhci->erst.erst_dma_addr);
+ val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
+ val &= ERST_PTR_MASK;
+ val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+
+ /* Set the event ring dequeue address */
+ xhci_set_hc_event_deq(xhci);
+ xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ /*
+ * XXX: Might need to set the Interrupter Moderation Register to
+ * something other than the default (~1ms minimum between interrupts).
+ * See section 5.5.1.2.
+ */
+ init_completion(&xhci->addr_dev);
+ for (i = 0; i < MAX_HC_SLOTS; ++i)
+ xhci->devs[i] = 0;
+
+ return 0;
+fail:
+ xhci_warn(xhci, "Couldn't initialize memory\n");
+ xhci_mem_cleanup(xhci);
+ return -ENOMEM;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 00000000000..1462709e26c
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,166 @@
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+
+#include "xhci.h"
+
+static const char hcd_name[] = "xhci_hcd";
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
+{
+ /*
+ * TODO: Implement finding debug ports later.
+ * TODO: see if there are any quirks that need to be added to handle
+ * new extended capabilities.
+ */
+
+ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+ if (!pci_set_mwi(pdev))
+ xhci_dbg(xhci, "MWI active\n");
+
+ xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
+ return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
+
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ return retval;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ return retval;
+ xhci_dbg(xhci, "Reset complete\n");
+
+ xhci_dbg(xhci, "Calling HCD init\n");
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ return retval;
+ xhci_dbg(xhci, "Called HCD init\n");
+
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+ /* Find any debug ports */
+ return xhci_pci_reinit(xhci, pdev);
+}
+
+static const struct hc_driver xhci_pci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "xHCI Host Controller",
+ .hcd_priv_size = sizeof(struct xhci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = xhci_irq,
+ .flags = HCD_MEMORY | HCD_USB3,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = xhci_pci_setup,
+ .start = xhci_run,
+ /* suspend and resume implemented later */
+ .stop = xhci_stop,
+ .shutdown = xhci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = xhci_urb_enqueue,
+ .urb_dequeue = xhci_urb_dequeue,
+ .alloc_dev = xhci_alloc_dev,
+ .free_dev = xhci_free_dev,
+ .add_endpoint = xhci_add_endpoint,
+ .drop_endpoint = xhci_drop_endpoint,
+ .check_bandwidth = xhci_check_bandwidth,
+ .reset_bandwidth = xhci_reset_bandwidth,
+ .address_device = xhci_address_device,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = xhci_get_frame,
+
+ /* Root hub support */
+ .hub_control = xhci_hub_control,
+ .hub_status_data = xhci_hub_status_data,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids[] = { {
+ /* handle any USB 3.0 xHCI controller */
+ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
+ .driver_data = (unsigned long) &xhci_pci_hc_driver,
+ },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver xhci_pci_driver = {
+ .name = (char *) hcd_name,
+ .id_table = pci_ids,
+
+ .probe = usb_hcd_pci_probe,
+ .remove = usb_hcd_pci_remove,
+ /* suspend and resume implemented later */
+
+ .shutdown = usb_hcd_pci_shutdown,
+};
+
+int xhci_register_pci()
+{
+ return pci_register_driver(&xhci_pci_driver);
+}
+
+void xhci_unregister_pci()
+{
+ pci_unregister_driver(&xhci_pci_driver);
+}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 00000000000..02d81985c45
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,1648 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ * Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ * least one free TRB in the ring. This is useful if you want to turn that
+ * into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ * link TRB, then load the pointer with the address in the link TRB. If the
+ * link TRB had its toggle bit set, you may need to update the ring cycle
+ * state (see cycle bit rules). You may have to do this multiple times
+ * until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ * equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ * Update enqueue pointer between each write (which may update the ring
+ * cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ * and endpoint rings. If HC is the producer for the event ring,
+ * and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ * the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ * continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ * updates event ring dequeue pointer. HC is the consumer for the command and
+ * endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include "xhci.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ union xhci_trb *trb)
+{
+ unsigned long segment_offset;
+
+ if (!seg || !trb || trb < seg->trbs)
+ return 0;
+ /* offset in TRBs */
+ segment_offset = trb - seg->trbs;
+ if (segment_offset > TRBS_PER_SEGMENT)
+ return 0;
+ return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+ (seg->next == xhci->event_ring->first_seg);
+ else
+ return trb->link.control & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment? I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return trb == &seg->trbs[TRBS_PER_SEGMENT];
+ else
+ return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ struct xhci_segment **seg,
+ union xhci_trb **trb)
+{
+ if (last_trb(xhci, ring, *seg, *trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ *trb = (*trb)++;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+ union xhci_trb *next = ++(ring->dequeue);
+
+ ring->deq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->deq_seg, next)) {
+ if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ next = ring->dequeue;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.
+ * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+ u32 chain;
+ union xhci_trb *next;
+
+ chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+ next = ++(ring->enqueue);
+
+ ring->enq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+ if (!consumer) {
+ if (ring != xhci->event_ring) {
+ next->link.control &= ~TRB_CHAIN;
+ next->link.control |= chain;
+ /* Give this link TRB to the hardware */
+ wmb();
+ if (next->link.control & TRB_CYCLE)
+ next->link.control &= (u32) ~TRB_CYCLE;
+ else
+ next->link.control |= (u32) TRB_CYCLE;
+ }
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring. See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs)
+{
+ int i;
+ union xhci_trb *enq = ring->enqueue;
+ struct xhci_segment *enq_seg = ring->enq_seg;
+
+ /* Check if ring is empty */
+ if (enq == ring->dequeue)
+ return 1;
+ /* Make sure there's an extra empty TRB available */
+ for (i = 0; i <= num_trbs; ++i) {
+ if (enq == ring->dequeue)
+ return 0;
+ enq++;
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+ }
+ return 1;
+}
+
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+ temp &= ERST_PTR_MASK;
+ if (!in_interrupt())
+ xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+ xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue[0]);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// Ding dong!\n");
+ temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+ xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+}
+
+static void ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ u32 field;
+ __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ /* Don't ring the doorbell for this endpoint if there are pending
+ * cancellations because the we don't want to interrupt processing.
+ */
+ if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
+ field = xhci_readl(xhci, db_addr) & DB_MASK;
+ xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+ /* Flush PCI posted writes - FIXME Matthew Wilcox says this
+ * isn't time-critical and we shouldn't make the CPU wait for
+ * the flush.
+ */
+ xhci_readl(xhci, db_addr);
+ }
+}
+
+/*
+ * Find the segment that trb is in. Start searching in start_seg.
+ * If we must move past a segment that has a link TRB with a toggle cycle state
+ * bit set, then we will toggle the value pointed at by cycle_state.
+ */
+static struct xhci_segment *find_trb_seg(
+ struct xhci_segment *start_seg,
+ union xhci_trb *trb, int *cycle_state)
+{
+ struct xhci_segment *cur_seg = start_seg;
+ struct xhci_generic_trb *generic_trb;
+
+ while (cur_seg->trbs > trb ||
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+ generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+ if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ (generic_trb->field[3] & LINK_TOGGLE))
+ *cycle_state = ~(*cycle_state) & 0x1;
+ cur_seg = cur_seg->next;
+ if (cur_seg == start_seg)
+ /* Looped over the entire list. Oops! */
+ return 0;
+ }
+ return cur_seg;
+}
+
+struct dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+ int new_cycle_state;
+};
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the xHC stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
+ * any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void find_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_td *cur_td, struct dequeue_state *state)
+{
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+ struct xhci_generic_trb *trb;
+
+ state->new_cycle_state = 0;
+ state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+ ep_ring->stopped_trb,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
+
+ state->new_deq_ptr = cur_td->last_trb;
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+ state->new_deq_ptr,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+
+ trb = &state->new_deq_ptr->generic;
+ if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ (trb->field[3] & LINK_TOGGLE))
+ state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+ /* Don't update the ring cycle state for the producer (us). */
+ ep_ring->dequeue = state->new_deq_ptr;
+ ep_ring->deq_seg = state->new_deq_seg;
+}
+
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ struct xhci_td *cur_td)
+{
+ struct xhci_segment *cur_seg;
+ union xhci_trb *cur_trb;
+
+ for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
+ true;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK)) {
+ /* Unchain any chained Link TRBs, but
+ * leave the pointers intact.
+ */
+ cur_trb->generic.field[3] &= ~TRB_CHAIN;
+ xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
+ xhci_dbg(xhci, "Address = %p (0x%llx dma); "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ } else {
+ cur_trb->generic.field[0] = 0;
+ cur_trb->generic.field[1] = 0;
+ cur_trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB */
+ cur_trb->generic.field[3] &= TRB_CYCLE;
+ cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
+ xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ }
+ if (cur_trb == cur_td->last_trb)
+ break;
+ }
+}
+
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state);
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring. There are two ways to do that:
+ *
+ * 1. If the HW was in the middle of processing the TD that needs to be
+ * cancelled, then we must move the ring's dequeue pointer past the last TRB
+ * in the TD with a Set Dequeue Pointer Command.
+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ * bit cleared) so that the HW will skip over them.
+ */
+static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ struct list_head *entry;
+ struct xhci_td *cur_td = 0;
+ struct xhci_td *last_unlinked_td;
+
+ struct dequeue_state deq_state;
+#ifdef CONFIG_USB_HCD_STAT
+ ktime_t stop_time = ktime_get();
+#endif
+
+ memset(&deq_state, 0, sizeof(deq_state));
+ slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ if (list_empty(&ep_ring->cancelled_td_list))
+ return;
+
+ /* Fix up the ep ring first, so HW stops executing cancelled TDs.
+ * We have the xHCI lock, so nothing can modify this list until we drop
+ * it. We're also in the event handler, so we can't get re-interrupted
+ * if another Stop Endpoint command completes
+ */
+ list_for_each(entry, &ep_ring->cancelled_td_list) {
+ cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
+ xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
+ cur_td->first_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the xHC endpoint ring dequeue pointer past this TD.
+ */
+ if (cur_td == ep_ring->stopped_td)
+ find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
+ &deq_state);
+ else
+ td_to_noop(xhci, ep_ring, cur_td);
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list. Keep it in
+ * the cancelled TD list for URB completion later.
+ */
+ list_del(&cur_td->td_list);
+ ep_ring->cancels_pending--;
+ }
+ last_unlinked_td = cur_td;
+
+ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+ xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+ "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+ deq_state.new_deq_seg,
+ (unsigned long long)deq_state.new_deq_seg->dma,
+ deq_state.new_deq_ptr,
+ (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
+ deq_state.new_cycle_state);
+ queue_set_tr_deq(xhci, slot_id, ep_index,
+ deq_state.new_deq_seg,
+ deq_state.new_deq_ptr,
+ (u32) deq_state.new_cycle_state);
+ /* Stop the TD queueing code from ringing the doorbell until
+ * this command completes. The HC won't set the dequeue pointer
+ * if the ring is running, and ringing the doorbell starts the
+ * ring running.
+ */
+ ep_ring->state |= SET_DEQ_PENDING;
+ xhci_ring_cmd_db(xhci);
+ } else {
+ /* Otherwise just ring the doorbell to restart the ring */
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+ }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+ * New TDs to be cancelled might be added to the end of the list before
+ * we can complete all the URBs for the TDs we already unlinked.
+ * So stop when we've completed the URB for the last TD we unlinked.
+ */
+ do {
+ cur_td = list_entry(ep_ring->cancelled_td_list.next,
+ struct xhci_td, cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+
+ /* Clean up the cancelled URB */
+#ifdef CONFIG_USB_HCD_STAT
+ hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
+ ktime_sub(stop_time, cur_td->start_time));
+#endif
+ cur_td->urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
+
+ xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
+ spin_unlock(&xhci->lock);
+ /* Doesn't matter what we pass for status, since the core will
+ * just overwrite it (because the URB has been unlinked).
+ */
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
+ kfree(cur_td);
+
+ spin_lock(&xhci->lock);
+ } while (cur_td != last_unlinked_td);
+
+ /* Return to the event handler with xhci->lock re-acquired */
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again. We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void handle_set_deq_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_device *dev;
+
+ slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+ ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ dev = xhci->devs[slot_id];
+ ep_ring = dev->ep_rings[ep_index];
+
+ if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
+ unsigned int ep_state;
+ unsigned int slot_state;
+
+ switch (GET_COMP_CODE(event->status)) {
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
+ "of stream ID configuration\n");
+ break;
+ case COMP_CTX_STATE:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
+ "to incorrect slot or ep state.\n");
+ ep_state = dev->out_ctx->ep[ep_index].ep_info;
+ ep_state &= EP_STATE_MASK;
+ slot_state = dev->out_ctx->slot.dev_state;
+ slot_state = GET_SLOT_STATE(slot_state);
+ xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+ slot_state, ep_state);
+ break;
+ case COMP_EBADSLT:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
+ "slot %u was not enabled.\n", slot_id);
+ break;
+ default:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
+ "completion code of %u.\n",
+ GET_COMP_CODE(event->status));
+ break;
+ }
+ /* OK what do we do now? The endpoint state is hosed, and we
+ * should never get to this point if the synchronization between
+ * queueing, and endpoint state are correct. This might happen
+ * if the device gets disconnected after we've finished
+ * cancelling URBs, which might not be an error...
+ */
+ } else {
+ xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
+ "deq[1] = 0x%x.\n",
+ dev->out_ctx->ep[ep_index].deq[0],
+ dev->out_ctx->ep[ep_index].deq[1]);
+ }
+
+ ep_ring->state &= ~SET_DEQ_PENDING;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ int slot_id = TRB_TO_SLOT_ID(event->flags);
+ u64 cmd_dma;
+ dma_addr_t cmd_dequeue_dma;
+
+ cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+ cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+ xhci->cmd_ring->dequeue);
+ /* Is the command ring deq ptr out of sync with the deq seg ptr? */
+ if (cmd_dequeue_dma == 0) {
+ xhci->error_bitmask |= 1 << 4;
+ return;
+ }
+ /* Does the DMA address match our internal dequeue pointer address? */
+ if (cmd_dma != (u64) cmd_dequeue_dma) {
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
+ switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+ if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
+ xhci->slot_id = slot_id;
+ else
+ xhci->slot_id = 0;
+ complete(&xhci->addr_dev);
+ break;
+ case TRB_TYPE(TRB_DISABLE_SLOT):
+ if (xhci->devs[slot_id])
+ xhci_free_virt_device(xhci, slot_id);
+ break;
+ case TRB_TYPE(TRB_CONFIG_EP):
+ xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
+ case TRB_TYPE(TRB_ADDR_DEV):
+ xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+ complete(&xhci->addr_dev);
+ break;
+ case TRB_TYPE(TRB_STOP_RING):
+ handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
+ break;
+ case TRB_TYPE(TRB_SET_DEQ):
+ handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+ break;
+ case TRB_TYPE(TRB_CMD_NOOP):
+ ++xhci->noops_handled;
+ break;
+ default:
+ /* Skip over unknown commands on the event ring */
+ xhci->error_bitmask |= 1 << 6;
+ break;
+ }
+ inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+static void handle_port_status(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 port_id;
+
+ /* Port status change events always have a successful completion code */
+ if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+ xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+ xhci->error_bitmask |= 1 << 8;
+ }
+ /* FIXME: core doesn't care about all port link state changes yet */
+ port_id = GET_PORT_ID(event->generic.field[0]);
+ xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+
+ /* Update event ring dequeue pointer before dropping the lock */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+
+ spin_unlock(&xhci->lock);
+ /* Pass this up to the core */
+ usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+ spin_lock(&xhci->lock);
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+static struct xhci_segment *trb_in_td(
+ struct xhci_segment *start_seg,
+ union xhci_trb *start_trb,
+ union xhci_trb *end_trb,
+ dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
+
+ start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+ cur_seg = start_seg;
+
+ do {
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ return 0;
+ } else {
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
+ }
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (1);
+
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+ struct xhci_transfer_event *event)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ int ep_index;
+ struct xhci_td *td = 0;
+ dma_addr_t event_dma;
+ struct xhci_segment *event_seg;
+ union xhci_trb *event_trb;
+ struct urb *urb = 0;
+ int status = -EINPROGRESS;
+
+ xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+ if (!xdev) {
+ xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ return -ENODEV;
+ }
+
+ /* Endpoint ID is 1 based, our index is zero based */
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
+ ep_ring = xdev->ep_rings[ep_index];
+ if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+ return -ENODEV;
+ }
+
+ event_dma = event->buffer[0];
+ if (event->buffer[1] != 0)
+ xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
+
+ /* This TRB should be in the TD at the head of this ring's TD list */
+ if (list_empty(&ep_ring->td_list)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(event->flags), ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ urb = NULL;
+ goto cleanup;
+ }
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
+ /* Is this a TRB in the currently executing TD? */
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, event_dma);
+ if (!event_seg) {
+ /* HC is busted, give up! */
+ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
+ return -ESHUTDOWN;
+ }
+ event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+ xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
+ (unsigned int) event->buffer[0]);
+ xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
+ (unsigned int) event->buffer[1]);
+ xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+ (unsigned int) event->transfer_len);
+ xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+ (unsigned int) event->flags);
+
+ /* Look for common error cases */
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ /* Skip codes that require special handling depending on
+ * transfer type
+ */
+ case COMP_SUCCESS:
+ case COMP_SHORT_TX:
+ break;
+ case COMP_STOP:
+ xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+ break;
+ case COMP_STOP_INVAL:
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ break;
+ case COMP_STALL:
+ xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ status = -EPIPE;
+ break;
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+ status = -EILSEQ;
+ break;
+ case COMP_TX_ERR:
+ xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ status = -EPROTO;
+ break;
+ case COMP_DB_ERR:
+ xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+ status = -ENOSR;
+ break;
+ default:
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
+ urb = NULL;
+ goto cleanup;
+ }
+ /* Now update the urb's actual_length and give back to the core */
+ /* Was this a control transfer? */
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ case COMP_SUCCESS:
+ if (event_trb == ep_ring->dequeue) {
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
+ status = -ESHUTDOWN;
+ } else if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
+ status = -ESHUTDOWN;
+ } else {
+ xhci_dbg(xhci, "Successful control transfer!\n");
+ status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ status = -EREMOTEIO;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ /*
+ * Did we transfer any data, despite the errors that might have
+ * happened? I.e. did we get past the setup stage?
+ */
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ } else {
+ /* Maybe the event was for the data stage? */
+ if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+ /* We didn't stop on a link TRB in the middle */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ }
+ }
+ } else {
+ switch (GET_COMP_CODE(event->transfer_len)) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ } else {
+ xhci_dbg(xhci, "Successful bulk transfer!\n");
+ status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ dev_dbg(&td->urb->dev->dev,
+ "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(event->transfer_len));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+ if (TRB_LEN(event->transfer_len) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(event->transfer_len);
+ if (td->urb->actual_length < 0) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+ TRB_LEN(event->transfer_len));
+ td->urb->actual_length = 0;
+ }
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ } else {
+ td->urb->actual_length = td->urb->transfer_buffer_length;
+ /* Ignore a short packet completion if the
+ * untransferred length was zero.
+ */
+ status = 0;
+ }
+ } else {
+ /* Slow path - walk the list, starting from the dequeue
+ * pointer, to get the actual length transferred.
+ */
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+
+ td->urb->actual_length = 0;
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+ cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
+ TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]);
+ }
+ /* If the ring didn't stop on a Link or No-op TRB, add
+ * in the actual bytes transferred from the Normal TRB
+ */
+ if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(cur_trb->generic.field[2]) -
+ TRB_LEN(event->transfer_len);
+ }
+ }
+ /* The Endpoint Stop Command completion will take care of
+ * any stopped TDs. A stopped TD may be restarted, so don't update the
+ * ring dequeue pointer or take this TD off any lists yet.
+ */
+ if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
+ GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+ ep_ring->stopped_td = td;
+ ep_ring->stopped_trb = event_trb;
+ } else {
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ list_del(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list)) {
+ list_del(&td->cancelled_td_list);
+ ep_ring->cancels_pending--;
+ }
+ kfree(td);
+ urb->hcpriv = NULL;
+ }
+cleanup:
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+
+ /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
+ if (urb) {
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ spin_lock(&xhci->lock);
+ }
+ return 0;
+}
+
+/*
+ * This function handles all OS-owned events on the event ring. It may drop
+ * xhci->lock between event processing (e.g. to pass up port status changes).
+ */
+void xhci_handle_event(struct xhci_hcd *xhci)
+{
+ union xhci_trb *event;
+ int update_ptrs = 1;
+ int ret;
+
+ if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+ xhci->error_bitmask |= 1 << 1;
+ return;
+ }
+
+ event = xhci->event_ring->dequeue;
+ /* Does the HC or OS own the TRB? */
+ if ((event->event_cmd.flags & TRB_CYCLE) !=
+ xhci->event_ring->cycle_state) {
+ xhci->error_bitmask |= 1 << 2;
+ return;
+ }
+
+ /* FIXME: Handle more event types. */
+ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+ case TRB_TYPE(TRB_COMPLETION):
+ handle_cmd_completion(xhci, &event->event_cmd);
+ break;
+ case TRB_TYPE(TRB_PORT_STATUS):
+ handle_port_status(xhci, event);
+ update_ptrs = 0;
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ ret = handle_tx_event(xhci, &event->trans_event);
+ if (ret < 0)
+ xhci->error_bitmask |= 1 << 9;
+ else
+ update_ptrs = 0;
+ break;
+ default:
+ xhci->error_bitmask |= 1 << 3;
+ }
+
+ if (update_ptrs) {
+ /* Update SW and HC event ring dequeue pointer */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+ }
+ /* Are there more items on the event ring? */
+ xhci_handle_event(xhci);
+}
+
+/**** Endpoint Ring Operations ****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ bool consumer,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ struct xhci_generic_trb *trb;
+
+ trb = &ring->enqueue->generic;
+ trb->field[0] = field1;
+ trb->field[1] = field2;
+ trb->field[2] = field3;
+ trb->field[3] = field4;
+ inc_enq(xhci, ring, consumer);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+ /* Make sure the endpoint has been added to xHC schedule */
+ xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
+ switch (ep_state) {
+ case EP_STATE_DISABLED:
+ /*
+ * USB core changed config/interfaces without notifying us,
+ * or hardware is reporting the wrong state.
+ */
+ xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+ return -ENOENT;
+ case EP_STATE_HALTED:
+ case EP_STATE_ERROR:
+ xhci_warn(xhci, "WARN waiting for halt or error on ep "
+ "to be cleared\n");
+ /* FIXME event handling code for error needs to clear it */
+ /* XXX not sure if this should be -ENOENT or not */
+ return -EINVAL;
+ case EP_STATE_STOPPED:
+ case EP_STATE_RUNNING:
+ break;
+ default:
+ xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
+ /*
+ * FIXME issue Configure Endpoint command to try to get the HC
+ * back into a known state.
+ */
+ return -EINVAL;
+ }
+ if (!room_on_ring(xhci, ep_ring, num_trbs)) {
+ /* FIXME allocate more room */
+ xhci_err(xhci, "ERROR no room on ep ring\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+ struct xhci_virt_device *xdev,
+ unsigned int ep_index,
+ unsigned int num_trbs,
+ struct urb *urb,
+ struct xhci_td **td,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+ xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+ *td = kzalloc(sizeof(struct xhci_td), mem_flags);
+ if (!*td)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&(*td)->td_list);
+ INIT_LIST_HEAD(&(*td)->cancelled_td_list);
+
+ ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ if (unlikely(ret)) {
+ kfree(*td);
+ return ret;
+ }
+
+ (*td)->urb = urb;
+ urb->hcpriv = (void *) (*td);
+ /* Add this TD to the tail of the endpoint ring's TD list */
+ list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
+ (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
+ (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+
+ return 0;
+}
+
+static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+{
+ int num_sgs, num_trbs, running_total, temp, i;
+ struct scatterlist *sg;
+
+ sg = NULL;
+ num_sgs = urb->num_sgs;
+ temp = urb->transfer_buffer_length;
+
+ xhci_dbg(xhci, "count sg list trbs: \n");
+ num_trbs = 0;
+ for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+ unsigned int previous_total_trbs = num_trbs;
+ unsigned int len = sg_dma_len(sg);
+
+ /* Scatter gather list entries may cross 64KB boundaries */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < sg_dma_len(sg)) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+ xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
+ i, (unsigned long long)sg_dma_address(sg),
+ len, len, num_trbs - previous_total_trbs);
+
+ len = min_t(int, len, temp);
+ temp -= len;
+ if (temp == 0)
+ break;
+ }
+ xhci_dbg(xhci, "\n");
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ num_trbs);
+ return num_trbs;
+}
+
+static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+{
+ if (num_trbs != 0)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ "TRBs, %d left\n", __func__,
+ urb->ep->desc.bEndpointAddress, num_trbs);
+ if (running_total != urb->transfer_buffer_length)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ "queued %#x (%d), asked for %#x (%d)\n",
+ __func__,
+ urb->ep->desc.bEndpointAddress,
+ running_total, running_total,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, int start_cycle,
+ struct xhci_generic_trb *start_trb, struct xhci_td *td)
+{
+ /*
+ * Pass all the TRBs to the hardware at once and make sure this write
+ * isn't reordered.
+ */
+ wmb();
+ start_trb->field[3] |= start_cycle;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ unsigned int num_trbs;
+ struct xhci_td *td;
+ struct scatterlist *sg;
+ int num_sgs;
+ int trb_buff_len, this_sg_len, running_total;
+ bool first_trb;
+ u64 addr;
+
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_sgs;
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, num_trbs, urb, &td, mem_flags);
+ if (trb_buff_len < 0)
+ return trb_buff_len;
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /*
+ * How much data is in the first TRB?
+ *
+ * There are three forces at work for TRB buffer pointers and lengths:
+ * 1. We don't want to walk off the end of this sg-list entry buffer.
+ * 2. The transfer length that the driver requested may be smaller than
+ * the amount of memory allocated for this scatter-gather list.
+ * 3. TRBs buffers can't cross 64KB boundaries.
+ */
+ sg = urb->sg->sg;
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len = urb->transfer_buffer_length;
+ xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
+ trb_buff_len);
+
+ first_trb = true;
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 field = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb)
+ first_trb = false;
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+ xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
+ "64KB boundary at %#x, end dma = %#x\n",
+ (unsigned int) addr, trb_buff_len, trb_buff_len,
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ if (TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+ xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ }
+ queue_trb(xhci, ep_ring, false,
+ (u32) addr,
+ (u32) ((u64) addr >> 32),
+ TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer --
+ * Are we done queueing all the TRBs for this sg entry?
+ */
+ this_sg_len -= trb_buff_len;
+ if (this_sg_len == 0) {
+ --num_sgs;
+ if (num_sgs == 0)
+ break;
+ sg = sg_next(sg);
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ } else {
+ addr += trb_buff_len;
+ }
+
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len =
+ urb->transfer_buffer_length - running_total;
+ } while (running_total < urb->transfer_buffer_length);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct xhci_td *td;
+ int num_trbs;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ int start_cycle;
+ u32 field;
+
+ int running_total, trb_buff_len, ret;
+ u64 addr;
+
+ if (urb->sg)
+ return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ num_trbs = 0;
+ /* How much data is (potentially) left before the 64KB boundary? */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+
+ /* If there's some data on this 64KB chunk, or we have to send a
+ * zero-length transfer, we need at least one TRB
+ */
+ if (running_total != 0 || urb->transfer_buffer_length == 0)
+ num_trbs++;
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < urb->transfer_buffer_length) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+ /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_trbs);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ num_trbs, urb, &td, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (urb->transfer_buffer_length < trb_buff_len)
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
+
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ field = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb)
+ first_trb = false;
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+ queue_trb(xhci, ep_ring, false,
+ (u32) addr,
+ (u32) ((u64) addr >> 32),
+ TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer */
+ addr += trb_buff_len;
+ trb_buff_len = urb->transfer_buffer_length - running_total;
+ if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ trb_buff_len = TRB_MAX_BUFF_SIZE;
+ } while (running_total < urb->transfer_buffer_length);
+
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ int num_trbs;
+ int ret;
+ struct usb_ctrlrequest *setup;
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+ u32 field;
+ struct xhci_td *td;
+
+ ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+ /*
+ * Need to copy setup packet into setup TRB, so we can't use the setup
+ * DMA address.
+ */
+ if (!urb->setup_packet)
+ return -EINVAL;
+
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
+ slot_id, ep_index);
+ /* 1 TRB for setup, 1 for status */
+ num_trbs = 2;
+ /*
+ * Don't need to check if we need additional event data and normal TRBs,
+ * since data in control transfers will never get bigger than 16MB
+ * XXX: can we get a buffer that crosses 64KB boundaries?
+ */
+ if (urb->transfer_buffer_length > 0)
+ num_trbs++;
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
+ urb, &td, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ /* Queue setup TRB - see section 6.4.1.2.1 */
+ /* FIXME better way to translate setup_packet into two u32 fields? */
+ setup = (struct usb_ctrlrequest *) urb->setup_packet;
+ queue_trb(xhci, ep_ring, false,
+ /* FIXME endianness is probably going to bite my ass here. */
+ setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
+ setup->wIndex | setup->wLength << 16,
+ TRB_LEN(8) | TRB_INTR_TARGET(0),
+ /* Immediate data in pointer */
+ TRB_IDT | TRB_TYPE(TRB_SETUP));
+
+ /* If there's data, queue data TRBs */
+ field = 0;
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false,
+ lower_32_bits(urb->transfer_dma),
+ upper_32_bits(urb->transfer_dma),
+ TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
+ /* Event on short tx */
+ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+ }
+
+ /* Save the DMA address of the last TRB in the TD */
+ td->last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+ /* If the device sent data, the status stage is an OUT transfer */
+ if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+ field = 0;
+ else
+ field = TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false,
+ 0,
+ 0,
+ TRB_INTR_TARGET(0),
+ /* Event on completion */
+ field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+
+ giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ return 0;
+}
+
+/**** Command Ring Operations ****/
+
+/* Generic function for queueing a command TRB on the command ring */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+ if (!in_interrupt())
+ xhci_err(xhci, "ERR: No room for command on command ring\n");
+ return -ENOMEM;
+ }
+ queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+ field4 | xhci->cmd_ring->cycle_state);
+ return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+ return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+ if (queue_cmd_noop(xhci) < 0)
+ return NULL;
+ xhci->noops_submitted++;
+ return xhci_ring_cmd_db;
+}
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id)
+{
+ return queue_command(xhci, in_ctx_ptr, 0, 0,
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id)
+{
+ return queue_command(xhci, in_ctx_ptr, 0, 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index)
+{
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_STOP_RING);
+
+ return queue_command(xhci, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type);
+}
+
+/* Set Transfer Ring Dequeue Pointer command.
+ * This should not be used for endpoints that have streams enabled.
+ */
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
+{
+ dma_addr_t addr;
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_SET_DEQ);
+
+ addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
+ if (addr == 0)
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+ deq_seg, deq_ptr);
+ return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
+ trb_slot_id | trb_ep_index | type);
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 00000000000..8936eeb5588
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,1157 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include "../core/hcd.h"
+/* Code sharing between pci-quirks and xhci hcd */
+#include "xhci-ext-caps.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET (0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS 256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS 127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ *
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase: length of the capabilities register and HC version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ */
+struct xhci_cap_regs {
+ u32 hc_capbase;
+ u32 hcs_params1;
+ u32 hcs_params2;
+ u32 hcs_params3;
+ u32 hcc_params;
+ u32 db_off;
+ u32 run_regs_off;
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+
+/* Number of registers per port */
+#define NUM_PORT_REGS 4
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command: USBCMD - xHC command register
+ * @status: USBSTS - xHC status register
+ * @page_size: This indicates the page size that the host controller
+ * supports. If bit n is set, the HC supports a page size
+ * of 2^(n+12), up to a 128MB page size.
+ * 4K is the minimum page size.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg: CONFIG - Configure Register
+ * @port_status_base: PORTSCn - base address for Port Status and Control
+ * Each port has a Port Status and Control register,
+ * followed by a Port Power Management Status and Control
+ * register, a Port Link Info register, and a reserved
+ * register.
+ * @port_power_base: PORTPMSCn - base address for
+ * Port Power Management Status and Control
+ * @port_link_base: PORTLIn - base address for Port Link Info (current
+ * Link PM state and control) for USB 2.1 and USB 3.0
+ * devices.
+ */
+struct xhci_op_regs {
+ u32 command;
+ u32 status;
+ u32 page_size;
+ u32 reserved1;
+ u32 reserved2;
+ u32 dev_notification;
+ u32 cmd_ring[2];
+ /* rsvd: offset 0x20-2F */
+ u32 reserved3[4];
+ u32 dcbaa_ptr[2];
+ u32 config_reg;
+ /* rsvd: offset 0x3C-3FF */
+ u32 reserved4[241];
+ /* port 1 registers, which serve as a base address for other ports */
+ u32 port_status_base;
+ u32 port_power_base;
+ u32 port_link_base;
+ u32 reserved5;
+ /* registers for ports 2-255 */
+ u32 reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET (1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET (1 << 7)
+/* FIXME: ignoring host controller save/restore state for now. */
+#define CMD_CSS (1 << 8)
+#define CMD_CRS (1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX (1 << 11)
+/* bits 12:31 are reserved (and should be preserved on writes). */
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT XHCI_STS_HALT
+/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
+#define STS_FATAL (1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT (1 << 3)
+/* port change detect */
+#define STS_PORT (1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE (1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE (1 << 9)
+/* true: save or restore error */
+#define STS_SRE (1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE (1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define DEV_NOTE_MASK (0xffff)
+#define ENABLE_DEV_NOTE(x) (1 << x)
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE (1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT (1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING (1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_ADDR_MASK (0xffffffc0)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p) ((p) & 0xff)
+/* bits 8:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+/* bit 24 reserved */
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+/* Bits 24:31 for port testing */
+
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ * Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+ u32 irq_pending;
+ u32 irq_control;
+ u32 erst_size;
+ u32 rsvd;
+ u32 erst_base[2];
+ u32 erst_dequeue[2];
+};
+
+/* irq_pending bitmasks */
+#define ER_IRQ_PENDING(p) ((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
+#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
+#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK (0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK (0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define ERST_SIZE_MASK (0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK (0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB (1 << 3)
+#define ERST_PTR_MASK (0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ * MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+ u32 microframe_index;
+ u32 rsvd[7];
+ struct xhci_intr_reg ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+ u32 doorbell[256];
+};
+
+#define DB_TARGET_MASK 0xFFFFFF00
+#define DB_STREAM_ID_MASK 0x0000FFFF
+#define DB_TARGET_HOST 0x0
+#define DB_STREAM_ID_HOST 0x0
+#define DB_MASK (0xff << 8)
+
+/* Endpoint Target - bits 0:7 */
+#define EPI_TO_DB(p) (((p) + 1) & 0xff)
+
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info: Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2: Max exit latency for device number, root hub port number
+ * @tt_info: tt_info is used to construct split transaction tokens
+ * @dev_state: slot state and device address
+ *
+ * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+ u32 dev_info;
+ u32 dev_info2;
+ u32 tt_info;
+ u32 dev_state;
+ /* offset 0x10 to 0x1f reserved for HC internal use */
+ u32 reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK (0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED (0xf << 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT (0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB (0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK (0x1f << 27)
+#define LAST_CTX(p) ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
+#define SLOT_FLAG (1 << 0)
+#define EP0_FLAG (1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT (0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device. '0' if attached to root hub port.
+ */
+#define TT_SLOT (0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT (0xff << 8)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK (0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE (0x1f << 27)
+#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
+
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info: endpoint state, streams, mult, and interval information.
+ * @ep_info2: information on endpoint type, max packet size, max burst size,
+ * error count, and whether the HC will force an event for all
+ * transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ * defines one stream, this points to the endpoint transfer ring.
+ * Otherwise, it points to a stream context array, which has a
+ * ring pointer for each flow.
+ * @tx_info:
+ * Average TRB lengths for the endpoint ring and
+ * max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+ u32 ep_info;
+ u32 ep_info2;
+ u32 deq[2];
+ u32 tx_info;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK (0xf)
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p) ((p & 0x3) << 8)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p) ((p & 0xff) << 16)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define FORCE_EVENT (0x1)
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define EP_TYPE(p) ((p) << 3)
+#define ISOC_OUT_EP 1
+#define BULK_OUT_EP 2
+#define INT_OUT_EP 3
+#define CTRL_EP 4
+#define ISOC_IN_EP 5
+#define BULK_IN_EP 6
+#define INT_IN_EP 7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p) (((p)&0xff) << 8)
+#define MAX_PACKET(p) (((p)&0xffff) << 16)
+
+
+/**
+ * struct xhci_device_control
+ * Input/Output context; see section 6.2.5.
+ *
+ * @drop_context: set the bit of the endpoint context you want to disable
+ * @add_context: set the bit of the endpoint context you want to enable
+ */
+struct xhci_device_control {
+ u32 drop_flags;
+ u32 add_flags;
+ u32 rsvd[6];
+ struct xhci_slot_ctx slot;
+ struct xhci_ep_ctx ep[31];
+};
+
+/* drop context bitmasks */
+#define DROP_EP(x) (0x1 << x)
+/* add context bitmasks */
+#define ADD_EP(x) (0x1 << x)
+
+
+struct xhci_virt_device {
+ /*
+ * Commands to the hardware are passed an "input context" that
+ * tells the hardware what to change in its data structures.
+ * The hardware will return changes in an "output context" that
+ * software must allocate for the hardware. We need to keep
+ * track of input and output contexts separately because
+ * these commands might fail and we don't trust the hardware.
+ */
+ struct xhci_device_control *out_ctx;
+ dma_addr_t out_ctx_dma;
+ /* Used for addressing devices and configuration changes */
+ struct xhci_device_control *in_ctx;
+ dma_addr_t in_ctx_dma;
+ /* FIXME when stream support is added */
+ struct xhci_ring *ep_rings[31];
+ /* Temporary storage in case the configure endpoint command fails and we
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ep_rings[31];
+ struct completion cmd_completion;
+ /* Status of the last command issued for this device */
+ u32 cmd_status;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+ /* 64-bit device addresses; we only write 32-bit addresses */
+ u32 dev_context_ptrs[2*MAX_HC_SLOTS];
+ /* private xHCD pointers */
+ dma_addr_t dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_stream_ctx {
+ /* 64-bit stream ring address, cycle state, and stream type */
+ u32 stream_ring[2];
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[2];
+};
+
+
+struct xhci_transfer_event {
+ /* 64-bit buffer address, or immediate data */
+ u32 buffer[2];
+ u32 transfer_len;
+ /* This field is interpreted differently based on the type of TRB */
+ u32 flags;
+};
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS 1
+/* Data Buffer Error */
+#define COMP_DB_ERR 2
+/* Babble Detected Error */
+#define COMP_BABBLE 3
+/* USB Transaction Error */
+#define COMP_TX_ERR 4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR 5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL 6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM 7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR 8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS 9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR 10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT 11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP 12
+/* Short Packet */
+#define COMP_SHORT_TX 13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN 14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN 15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL 16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL 17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER 18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE 19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR 20
+/* Event Ring is full */
+#define COMP_ER_FULL 21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT 23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP 24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT 25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP 26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL 27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT 28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER 31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES 32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN 33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR 34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR 35
+/* Split Transaction Error */
+#define COMP_SPLIT_ERR 36
+
+struct xhci_link_trb {
+ /* 64-bit segment pointer*/
+ u32 segment_ptr[2];
+ u32 intr_target;
+ u32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE (0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+ /* Pointer to command TRB, or the value passed by the event data trb */
+ u32 cmd_trb[2];
+ u32 status;
+ u32 flags;
+};
+
+/* flags bitmasks */
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
+#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define TRB_LEN(p) ((p) & 0x1ffff)
+/* TD size - number of bytes remaining in the TD (including this TRB):
+ * bits 17 - 21. Shift the number of bytes by 10. */
+#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE (1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT (1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP (1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP (1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN (1<<4)
+/* Interrupt on completion */
+#define TRB_IOC (1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT (1<<6)
+
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN (1<<16)
+
+struct xhci_generic_trb {
+ u32 field[4];
+};
+
+union xhci_trb {
+ struct xhci_link_trb link;
+ struct xhci_transfer_event trans_event;
+ struct xhci_event_cmd event_cmd;
+ struct xhci_generic_trb generic;
+};
+
+/* TRB bit mask */
+#define TRB_TYPE_BITMASK (0xfc00)
+#define TRB_TYPE(p) ((p) << 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL 1
+/* setup stage for control transfers */
+#define TRB_SETUP 2
+/* data stage for control transfers */
+#define TRB_DATA 3
+/* status stage for control transfers */
+#define TRB_STATUS 4
+/* isoc transfers */
+#define TRB_ISOC 5
+/* TRB for linking ring segments */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP 8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Transfer Ring Command */
+#define TRB_RESET_RING 14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ 16
+/* Reset Device Command */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT 18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH 19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT 20
+/* Get port bandwidth Command */
+#define TRB_GET_BW 21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER 32
+/* Command Completion Event */
+#define TRB_COMPLETION 33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS 34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT 35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL 36
+/* Host Controller Event */
+#define TRB_HC_EVENT 37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE 38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 64
+#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT 16
+#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
+
+struct xhci_segment {
+ union xhci_trb *trbs;
+ /* private to HCD */
+ struct xhci_segment *next;
+ dma_addr_t dma;
+};
+
+struct xhci_td {
+ struct list_head td_list;
+ struct list_head cancelled_td_list;
+ struct urb *urb;
+ struct xhci_segment *start_seg;
+ union xhci_trb *first_trb;
+ union xhci_trb *last_trb;
+};
+
+struct xhci_ring {
+ struct xhci_segment *first_seg;
+ union xhci_trb *enqueue;
+ struct xhci_segment *enq_seg;
+ unsigned int enq_updates;
+ union xhci_trb *dequeue;
+ struct xhci_segment *deq_seg;
+ unsigned int deq_updates;
+ struct list_head td_list;
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+ unsigned int cancels_pending;
+ unsigned int state;
+#define SET_DEQ_PENDING (1 << 0)
+ /* The TRB that was last reported in a stopped endpoint ring */
+ union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+ /*
+ * Write the cycle state into the TRB cycle field to give ownership of
+ * the TRB to the host controller (if we are the producer), or to check
+ * if we own the TRB (if we are the consumer). See section 4.9.1.
+ */
+ u32 cycle_state;
+};
+
+struct xhci_erst_entry {
+ /* 64-bit event ring segment address */
+ u32 seg_addr[2];
+ u32 seg_size;
+ /* Set to zero */
+ u32 rsvd;
+};
+
+struct xhci_erst {
+ struct xhci_erst_entry *entries;
+ unsigned int num_entries;
+ /* xhci->event_ring keeps track of segment dma addresses */
+ dma_addr_t erst_dma_addr;
+ /* Num entries the ERST can contain */
+ unsigned int erst_size;
+};
+
+/*
+ * Each segment table entry is 4*32bits long. 1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define ERST_NUM_SEGS 1
+/* Initial allocated size of the ERST, in number of entries */
+#define ERST_SIZE 64
+/* Initial number of event segment rings allocated */
+#define ERST_ENTRIES 1
+/* Poll every 60 seconds */
+#define POLL_TIMEOUT 60
+/* XXX: Make these module parameters */
+
+
+/* There is one ehci_hci structure per controller */
+struct xhci_hcd {
+ /* glue to PCI and HCD framework */
+ struct xhci_cap_regs __iomem *cap_regs;
+ struct xhci_op_regs __iomem *op_regs;
+ struct xhci_run_regs __iomem *run_regs;
+ struct xhci_doorbell_array __iomem *dba;
+ /* Our HCD's current interrupter register set */
+ struct xhci_intr_reg __iomem *ir_set;
+
+ /* Cached register copies of read-only HC data */
+ __u32 hcs_params1;
+ __u32 hcs_params2;
+ __u32 hcs_params3;
+ __u32 hcc_params;
+
+ spinlock_t lock;
+
+ /* packed release number */
+ u8 sbrn;
+ u16 hci_version;
+ u8 max_slots;
+ u8 max_interrupters;
+ u8 max_ports;
+ u8 isoc_threshold;
+ int event_ring_max;
+ int addr_64;
+ /* 4KB min, 128MB max */
+ int page_size;
+ /* Valid values are 12 to 20, inclusive */
+ int page_shift;
+ /* only one MSI vector for now, but might need more later */
+ int msix_count;
+ struct msix_entry *msix_entries;
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+ /* slot enabling and address device helpers */
+ struct completion addr_dev;
+ int slot_id;
+ /* Internal mirror of the HW's dcbaa */
+ struct xhci_virt_device *devs[MAX_HC_SLOTS];
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Poll the rings - for debugging */
+ struct timer_list event_ring_timer;
+ int zombie;
+#endif
+ /* Statistics */
+ int noops_submitted;
+ int noops_handled;
+ int error_bitmask;
+};
+
+/* For testing purposes */
+#define NUM_TEST_NOOPS 0
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+ return (struct xhci_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+ return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+#define XHCI_DEBUG 1
+#else
+#define XHCI_DEBUG 0
+#endif
+
+#define xhci_dbg(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+ dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+
+/* TODO: copied from ehci.h - can be refactored? */
+/* xHCI spec says all registers are little endian */
+static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
+ __u32 __iomem *regs)
+{
+ return readl(regs);
+}
+static inline void xhci_writel(struct xhci_hcd *xhci,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ if (!in_interrupt())
+ xhci_dbg(xhci,
+ "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
+ regs, val);
+ writel(val, regs);
+}
+
+/* xHCI debugging */
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
+
+/* xHCI memory managment */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ gfp_t mem_flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+
+#ifdef CONFIG_PCI
+/* xHCI PCI glue */
+int xhci_register_pci(void);
+void xhci_unregister_pci(void);
+#endif
+
+/* xHCI host controller glue */
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_init(struct usb_hcd *hcd);
+int xhci_run(struct usb_hcd *hcd);
+void xhci_stop(struct usb_hcd *hcd);
+void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+void *xhci_setup_one_noop(struct xhci_hcd *xhci);
+void xhci_handle_event(struct xhci_hcd *xhci);
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id);
+
+/* xHCI roothub code */
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+
+#endif /* __LINUX_XHCI_HCD_H */