aboutsummaryrefslogtreecommitdiffstats
path: root/usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch
diff options
Diffstat (limited to 'usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch')
-rw-r--r--usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch190
1 files changed, 190 insertions, 0 deletions
diff --git a/usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch b/usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch
new file mode 100644
index 00000000000000..2b44fc1afdcb62
--- /dev/null
+++ b/usb/usb-musb-gadget-unmapping-the-dma-buffer-when-switching-to-pio-mode.patch
@@ -0,0 +1,190 @@
+From balbi@ti.com Tue Oct 5 13:29:53 2010
+From: Felipe Balbi <balbi@ti.com>
+To: Greg KH <greg@kroah.com>
+Cc: Linux USB Mailing List <linux-usb@vger.kernel.org>,
+ Hema HK <hemahk@ti.com>, Felipe Balbi <balbi@ti.com>
+Subject: usb: musb: gadget: Unmapping the dma buffer when switching to PIO mode
+Date: Fri, 24 Sep 2010 13:44:08 +0300
+Message-Id: <1285325055-1247-8-git-send-email-balbi@ti.com>
+
+From: Hema HK <hemahk@ti.com>
+
+Buffer is mapped to dma when dma channel is allocated. buffer needs
+to be unmapped when fallback to PIO mode if dma channel_program
+fails.
+
+Signed-off-by: Hema HK <hemahk@ti.com>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Reviewed-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/usb/musb/musb_gadget.c | 122 +++++++++++++++++++++++++++--------------
+ 1 file changed, 82 insertions(+), 40 deletions(-)
+
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -92,6 +92,60 @@
+
+ /* ----------------------------------------------------------------------- */
+
++/* Maps the buffer to dma */
++
++static inline void map_dma_buffer(struct musb_request *request,
++ struct musb *musb)
++{
++ if (request->request.dma == DMA_ADDR_INVALID) {
++ request->request.dma = dma_map_single(
++ musb->controller,
++ request->request.buf,
++ request->request.length,
++ request->tx
++ ? DMA_TO_DEVICE
++ : DMA_FROM_DEVICE);
++ request->mapped = 1;
++ } else {
++ dma_sync_single_for_device(musb->controller,
++ request->request.dma,
++ request->request.length,
++ request->tx
++ ? DMA_TO_DEVICE
++ : DMA_FROM_DEVICE);
++ request->mapped = 0;
++ }
++}
++
++/* Unmap the buffer from dma and maps it back to cpu */
++static inline void unmap_dma_buffer(struct musb_request *request,
++ struct musb *musb)
++{
++ if (request->request.dma == DMA_ADDR_INVALID) {
++ DBG(20, "not unmapping a never mapped buffer\n");
++ return;
++ }
++
++ if (request->mapped) {
++ dma_unmap_single(musb->controller,
++ request->request.dma,
++ request->request.length,
++ request->tx
++ ? DMA_TO_DEVICE
++ : DMA_FROM_DEVICE);
++ request->request.dma = DMA_ADDR_INVALID;
++ request->mapped = 0;
++ } else {
++ dma_sync_single_for_cpu(musb->controller,
++ request->request.dma,
++ request->request.length,
++ request->tx
++ ? DMA_TO_DEVICE
++ : DMA_FROM_DEVICE);
++
++ }
++}
++
+ /*
+ * Immediately complete a request.
+ *
+@@ -119,24 +173,8 @@ __acquires(ep->musb->lock)
+
+ ep->busy = 1;
+ spin_unlock(&musb->lock);
+- if (is_dma_capable()) {
+- if (req->mapped) {
+- dma_unmap_single(musb->controller,
+- req->request.dma,
+- req->request.length,
+- req->tx
+- ? DMA_TO_DEVICE
+- : DMA_FROM_DEVICE);
+- req->request.dma = DMA_ADDR_INVALID;
+- req->mapped = 0;
+- } else if (req->request.dma != DMA_ADDR_INVALID)
+- dma_sync_single_for_cpu(musb->controller,
+- req->request.dma,
+- req->request.length,
+- req->tx
+- ? DMA_TO_DEVICE
+- : DMA_FROM_DEVICE);
+- }
++ if (is_dma_capable() && ep->dma)
++ unmap_dma_buffer(req, musb);
+ if (request->status == 0)
+ DBG(5, "%s done request %p, %d/%d\n",
+ ep->end_point.name, request,
+@@ -298,7 +336,7 @@ static void txstate(struct musb *musb, s
+ csr);
+
+ #ifndef CONFIG_MUSB_PIO_ONLY
+- if (is_dma_capable() && musb_ep->dma) {
++ if (is_dma_capable() && !musb_ep->dma && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+@@ -395,6 +433,13 @@ static void txstate(struct musb *musb, s
+ #endif
+
+ if (!use_dma) {
++ /*
++ * Unmap the dma buffer back to cpu if dma channel
++ * programming fails
++ */
++ if (is_dma_capable() && musb_ep->dma)
++ unmap_dma_buffer(req, musb);
++
+ musb_write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+@@ -711,6 +756,20 @@ static void rxstate(struct musb *musb, s
+ return;
+ }
+ #endif
++ /*
++ * Unmap the dma buffer back to cpu if dma channel
++ * programming fails. This buffer is mapped if the
++ * channel allocation is successful
++ */
++ if (is_dma_capable() && musb_ep->dma) {
++ unmap_dma_buffer(req, musb);
++
++ /* Clear DMAENAB for the
++ * PIO mode transfer
++ */
++ csr &= ~MUSB_RXCSR_DMAENAB;
++ musb_writew(epio, MUSB_RXCSR, csr);
++ }
+
+ musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+@@ -1153,28 +1212,11 @@ static int musb_gadget_queue(struct usb_
+ request->epnum = musb_ep->current_epnum;
+ request->tx = musb_ep->is_in;
+
+- if (is_dma_capable() && musb_ep->dma) {
+- if (request->request.dma == DMA_ADDR_INVALID) {
+- request->request.dma = dma_map_single(
+- musb->controller,
+- request->request.buf,
+- request->request.length,
+- request->tx
+- ? DMA_TO_DEVICE
+- : DMA_FROM_DEVICE);
+- request->mapped = 1;
+- } else {
+- dma_sync_single_for_device(musb->controller,
+- request->request.dma,
+- request->request.length,
+- request->tx
+- ? DMA_TO_DEVICE
+- : DMA_FROM_DEVICE);
+- request->mapped = 0;
+- }
+- } else if (!req->buf) {
++ if (is_dma_capable() && musb_ep->dma)
++ map_dma_buffer(request, musb);
++ else if (!req->buf)
+ return -ENODATA;
+- } else
++ else
+ request->mapped = 0;
+
+ spin_lock_irqsave(&musb->lock, lockflags);