diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d5c7949f4fd1f..2b59ebf6cf878 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -136,7 +136,7 @@ config BCM_SBA_RAID config DMA_BCM2835 tristate "BCM2835 DMA engine support" - depends on ARCH_BCM2835 || ARCH_BCM2708 || ARCH_BCM2709 + depends on ARCH_BCM2835 select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@ -683,6 +683,10 @@ config UNIPHIER_XDMAC UniPhier platform. This DMA controller can transfer data from memory to memory, memory to peripheral and peripheral to memory. +config DMA_BCM2708 + tristate "BCM2708 DMA legacy API support" + depends on DMA_BCM2835 + config XGENE_DMA tristate "APM X-Gene DMA support" depends on ARCH_XGENE || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 802ca916f05f5..fdb1e9a0ebaa4 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o +obj-$(CONFIG_DMA_BCM2708) += bcm2708-dmaengine.o obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o diff --git a/drivers/dma/bcm2708-dmaengine.c b/drivers/dma/bcm2708-dmaengine.c new file mode 100644 index 0000000000000..a9a7f92584c8c --- /dev/null +++ b/drivers/dma/bcm2708-dmaengine.c @@ -0,0 +1,281 @@ +/* + * BCM2708 legacy DMA API + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +#define CACHE_LINE_MASK 31 +#define DEFAULT_DMACHAN_BITMAP 0x10 /* channel 4 only */ + +/* valid only for channels 0 - 14, 15 has its own base address */ +#define BCM2708_DMA_CHAN(n) ((n) << 8) /* base address */ +#define BCM2708_DMA_CHANIO(dma_base, n) \ + ((void __iomem *)((char *)(dma_base) + BCM2708_DMA_CHAN(n))) + +struct vc_dmaman { + void __iomem *dma_base; + u32 chan_available; /* bitmap of available channels */ + u32 has_feature[BCM_DMA_FEATURE_COUNT]; /* bitmap of feature presence */ + struct mutex lock; +}; + +static struct device *dmaman_dev; /* we assume there's only one! */ +static struct vc_dmaman *g_dmaman; /* DMA manager */ + +/* DMA Auxiliary Functions */ + +/* A DMA buffer on an arbitrary boundary may separate a cache line into a + section inside the DMA buffer and another section outside it. + Even if we flush DMA buffers from the cache there is always the chance that + during a DMA someone will access the part of a cache line that is outside + the DMA buffer - which will then bring in unwelcome data. + Without being able to dictate our own buffer pools we must insist that + DMA buffers consist of a whole number of cache lines. +*/ +extern int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len) +{ + int i; + + for (i = 0; i < sg_len; i++) { + if (sg_ptr[i].offset & CACHE_LINE_MASK || + sg_ptr[i].length & CACHE_LINE_MASK) + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(bcm_sg_suitable_for_dma); + +extern void bcm_dma_start(void __iomem *dma_chan_base, + dma_addr_t control_block) +{ + dsb(sy); /* ARM data synchronization (push) operation */ + + writel(control_block, dma_chan_base + BCM2708_DMA_ADDR); + writel(BCM2708_DMA_ACTIVE, dma_chan_base + BCM2708_DMA_CS); +} +EXPORT_SYMBOL_GPL(bcm_dma_start); + +extern void bcm_dma_wait_idle(void __iomem *dma_chan_base) +{ + dsb(sy); + + /* ugly busy wait only option for now */ + while (readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE) + cpu_relax(); +} +EXPORT_SYMBOL_GPL(bcm_dma_wait_idle); + +extern bool bcm_dma_is_busy(void __iomem *dma_chan_base) +{ + dsb(sy); + + return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE; +} +EXPORT_SYMBOL_GPL(bcm_dma_is_busy); + +/* Complete an ongoing DMA (assuming its results are to be ignored) + Does nothing if there is no DMA in progress. + This routine waits for the current AXI transfer to complete before + terminating the current DMA. If the current transfer is hung on a DREQ used + by an uncooperative peripheral the AXI transfer may never complete. In this + case the routine times out and return a non-zero error code. + Use of this routine doesn't guarantee that the ongoing or aborted DMA + does not produce an interrupt. +*/ +extern int bcm_dma_abort(void __iomem *dma_chan_base) +{ + unsigned long int cs; + int rc = 0; + + cs = readl(dma_chan_base + BCM2708_DMA_CS); + + if (BCM2708_DMA_ACTIVE & cs) { + long int timeout = 10000; + + /* write 0 to the active bit - pause the DMA */ + writel(0, dma_chan_base + BCM2708_DMA_CS); + + /* wait for any current AXI transfer to complete */ + while (0 != (cs & BCM2708_DMA_ISPAUSED) && --timeout >= 0) + cs = readl(dma_chan_base + BCM2708_DMA_CS); + + if (0 != (cs & BCM2708_DMA_ISPAUSED)) { + /* we'll un-pause when we set of our next DMA */ + rc = -ETIMEDOUT; + + } else if (BCM2708_DMA_ACTIVE & cs) { + /* terminate the control block chain */ + writel(0, dma_chan_base + BCM2708_DMA_NEXTCB); + + /* abort the whole DMA */ + writel(BCM2708_DMA_ABORT | BCM2708_DMA_ACTIVE, + dma_chan_base + BCM2708_DMA_CS); + } + } + + return rc; +} +EXPORT_SYMBOL_GPL(bcm_dma_abort); + + /* DMA Manager Device Methods */ + +static void vc_dmaman_init(struct vc_dmaman *dmaman, void __iomem *dma_base, + u32 chans_available) +{ + dmaman->dma_base = dma_base; + dmaman->chan_available = chans_available; + dmaman->has_feature[BCM_DMA_FEATURE_FAST_ORD] = 0x0c; /* 2 & 3 */ + dmaman->has_feature[BCM_DMA_FEATURE_BULK_ORD] = 0x01; /* 0 */ + dmaman->has_feature[BCM_DMA_FEATURE_NORMAL_ORD] = 0xfe; /* 1 to 7 */ + dmaman->has_feature[BCM_DMA_FEATURE_LITE_ORD] = 0x7f00; /* 8 to 14 */ +} + +static int vc_dmaman_chan_alloc(struct vc_dmaman *dmaman, + unsigned required_feature_set) +{ + u32 chans; + int chan = 0; + int feature; + + chans = dmaman->chan_available; + for (feature = 0; feature < BCM_DMA_FEATURE_COUNT; feature++) + /* select the subset of available channels with the desired + features */ + if (required_feature_set & (1 << feature)) + chans &= dmaman->has_feature[feature]; + + if (!chans) + return -ENOENT; + + /* return the ordinal of the first channel in the bitmap */ + while (chans != 0 && (chans & 1) == 0) { + chans >>= 1; + chan++; + } + /* claim the channel */ + dmaman->chan_available &= ~(1 << chan); + + return chan; +} + +static int vc_dmaman_chan_free(struct vc_dmaman *dmaman, int chan) +{ + if (chan < 0) + return -EINVAL; + + if ((1 << chan) & dmaman->chan_available) + return -EIDRM; + + dmaman->chan_available |= (1 << chan); + + return 0; +} + +/* DMA Manager Monitor */ + +extern int bcm_dma_chan_alloc(unsigned required_feature_set, + void __iomem **out_dma_base, int *out_dma_irq) +{ + struct vc_dmaman *dmaman = g_dmaman; + struct platform_device *pdev = to_platform_device(dmaman_dev); + int chan; + int irq; + + if (!dmaman_dev) + return -ENODEV; + + mutex_lock(&dmaman->lock); + chan = vc_dmaman_chan_alloc(dmaman, required_feature_set); + if (chan < 0) + goto out; + + irq = platform_get_irq(pdev, (unsigned int)chan); + if (irq < 0) { + dev_err(dmaman_dev, "failed to get irq for DMA channel %d\n", + chan); + vc_dmaman_chan_free(dmaman, chan); + chan = -ENOENT; + goto out; + } + + *out_dma_base = BCM2708_DMA_CHANIO(dmaman->dma_base, chan); + *out_dma_irq = irq; + dev_dbg(dmaman_dev, + "Legacy API allocated channel=%d, base=%p, irq=%i\n", + chan, *out_dma_base, *out_dma_irq); + +out: + mutex_unlock(&dmaman->lock); + + return chan; +} +EXPORT_SYMBOL_GPL(bcm_dma_chan_alloc); + +extern int bcm_dma_chan_free(int channel) +{ + struct vc_dmaman *dmaman = g_dmaman; + int rc; + + if (!dmaman_dev) + return -ENODEV; + + mutex_lock(&dmaman->lock); + rc = vc_dmaman_chan_free(dmaman, channel); + mutex_unlock(&dmaman->lock); + + return rc; +} +EXPORT_SYMBOL_GPL(bcm_dma_chan_free); + +int bcm_dmaman_probe(struct platform_device *pdev, void __iomem *base, + u32 chans_available) +{ + struct device *dev = &pdev->dev; + struct vc_dmaman *dmaman; + + dmaman = devm_kzalloc(dev, sizeof(*dmaman), GFP_KERNEL); + if (!dmaman) + return -ENOMEM; + + mutex_init(&dmaman->lock); + vc_dmaman_init(dmaman, base, chans_available); + g_dmaman = dmaman; + dmaman_dev = dev; + + dev_info(dev, "DMA legacy API manager, dmachans=0x%x\n", + chans_available); + + return 0; +} +EXPORT_SYMBOL(bcm_dmaman_probe); + +int bcm_dmaman_remove(struct platform_device *pdev) +{ + dmaman_dev = NULL; + + return 0; +} +EXPORT_SYMBOL(bcm_dmaman_remove); + +MODULE_LICENSE("GPL"); diff --git a/include/linux/platform_data/dma-bcm2708.h b/include/linux/platform_data/dma-bcm2708.h new file mode 100644 index 0000000000000..6ca874d332a8b --- /dev/null +++ b/include/linux/platform_data/dma-bcm2708.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2010 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PLAT_BCM2708_DMA_H +#define _PLAT_BCM2708_DMA_H + +/* DMA CS Control and Status bits */ +#define BCM2708_DMA_ACTIVE BIT(0) +#define BCM2708_DMA_INT BIT(2) +#define BCM2708_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ +#define BCM2708_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ +#define BCM2708_DMA_ERR BIT(8) +#define BCM2708_DMA_ABORT BIT(30) /* stop current CB, go to next, WO */ +#define BCM2708_DMA_RESET BIT(31) /* WO, self clearing */ + +/* DMA control block "info" field bits */ +#define BCM2708_DMA_INT_EN BIT(0) +#define BCM2708_DMA_TDMODE BIT(1) +#define BCM2708_DMA_WAIT_RESP BIT(3) +#define BCM2708_DMA_D_INC BIT(4) +#define BCM2708_DMA_D_WIDTH BIT(5) +#define BCM2708_DMA_D_DREQ BIT(6) +#define BCM2708_DMA_S_INC BIT(8) +#define BCM2708_DMA_S_WIDTH BIT(9) +#define BCM2708_DMA_S_DREQ BIT(10) + +#define BCM2708_DMA_BURST(x) (((x) & 0xf) << 12) +#define BCM2708_DMA_PER_MAP(x) ((x) << 16) +#define BCM2708_DMA_WAITS(x) (((x) & 0x1f) << 21) + +#define BCM2708_DMA_DREQ_EMMC 11 +#define BCM2708_DMA_DREQ_SDHOST 13 + +#define BCM2708_DMA_CS 0x00 /* Control and Status */ +#define BCM2708_DMA_ADDR 0x04 +/* the current control block appears in the following registers - read only */ +#define BCM2708_DMA_INFO 0x08 +#define BCM2708_DMA_SOURCE_AD 0x0c +#define BCM2708_DMA_DEST_AD 0x10 +#define BCM2708_DMA_NEXTCB 0x1C +#define BCM2708_DMA_DEBUG 0x20 + +#define BCM2708_DMA4_CS (BCM2708_DMA_CHAN(4) + BCM2708_DMA_CS) +#define BCM2708_DMA4_ADDR (BCM2708_DMA_CHAN(4) + BCM2708_DMA_ADDR) + +#define BCM2708_DMA_TDMODE_LEN(w, h) ((h) << 16 | (w)) + +/* When listing features we can ask for when allocating DMA channels give + those with higher priority smaller ordinal numbers */ +#define BCM_DMA_FEATURE_FAST_ORD 0 +#define BCM_DMA_FEATURE_BULK_ORD 1 +#define BCM_DMA_FEATURE_NORMAL_ORD 2 +#define BCM_DMA_FEATURE_LITE_ORD 3 +#define BCM_DMA_FEATURE_FAST BIT(BCM_DMA_FEATURE_FAST_ORD) +#define BCM_DMA_FEATURE_BULK BIT(BCM_DMA_FEATURE_BULK_ORD) +#define BCM_DMA_FEATURE_NORMAL BIT(BCM_DMA_FEATURE_NORMAL_ORD) +#define BCM_DMA_FEATURE_LITE BIT(BCM_DMA_FEATURE_LITE_ORD) +#define BCM_DMA_FEATURE_COUNT 4 + +struct bcm2708_dma_cb { + u32 info; + u32 src; + u32 dst; + u32 length; + u32 stride; + u32 next; + u32 pad[2]; +}; + +struct scatterlist; +struct platform_device; + +#if defined(CONFIG_DMA_BCM2708) || defined(CONFIG_DMA_BCM2708_MODULE) + +int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len); +void bcm_dma_start(void __iomem *dma_chan_base, dma_addr_t control_block); +void bcm_dma_wait_idle(void __iomem *dma_chan_base); +bool bcm_dma_is_busy(void __iomem *dma_chan_base); +int bcm_dma_abort(void __iomem *dma_chan_base); + +/* return channel no or -ve error */ +int bcm_dma_chan_alloc(unsigned preferred_feature_set, + void __iomem **out_dma_base, int *out_dma_irq); +int bcm_dma_chan_free(int channel); + +int bcm_dmaman_probe(struct platform_device *pdev, void __iomem *base, + u32 chans_available); +int bcm_dmaman_remove(struct platform_device *pdev); + +#else /* CONFIG_DMA_BCM2708 */ + +static inline int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, + int sg_len) +{ + return 0; +} + +static inline void bcm_dma_start(void __iomem *dma_chan_base, + dma_addr_t control_block) { } + +static inline void bcm_dma_wait_idle(void __iomem *dma_chan_base) { } + +static inline bool bcm_dma_is_busy(void __iomem *dma_chan_base) +{ + return false; +} + +static inline int bcm_dma_abort(void __iomem *dma_chan_base) +{ + return -EINVAL; +} + +static inline int bcm_dma_chan_alloc(unsigned preferred_feature_set, + void __iomem **out_dma_base, + int *out_dma_irq) +{ + return -EINVAL; +} + +static inline int bcm_dma_chan_free(int channel) +{ + return -EINVAL; +} + +static inline int bcm_dmaman_probe(struct platform_device *pdev, + void __iomem *base, u32 chans_available) +{ + return 0; +} + +static inline int bcm_dmaman_remove(struct platform_device *pdev) +{ + return 0; +} + +#endif /* CONFIG_DMA_BCM2708 || CONFIG_DMA_BCM2708_MODULE */ + +#endif /* _PLAT_BCM2708_DMA_H */