From 7df4a91a2ab2792b88dd2eb4fc573bd5ff87bf12 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 27 May 2021 19:57:35 +0200 Subject: [PATCH 01/54] dt-bindings: usb: cdns,usb3: Fix interrupts order Correct the order of the descriptions for the "interrupts" property to match the order of the "interrupt-names" property. Fixes: 68989fe1c39d9b32 ("dt-bindings: usb: Convert cdns-usb3.txt to YAML schema") Signed-off-by: Geert Uytterhoeven --- Documentation/devicetree/bindings/usb/cdns,usb3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml index a407e1143cf42..8dedfa16c9929 100644 --- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml +++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml @@ -28,9 +28,9 @@ properties: interrupts: minItems: 3 items: - - description: OTG/DRD controller interrupt - description: XHCI host controller interrupt - description: Device controller interrupt + - description: OTG/DRD controller interrupt - description: interrupt used to wake up core, e.g when usbcmd.rs is cleared by xhci core, this interrupt is optional From b6e46948f518f18018f54a5ab6c94ff86f31087f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 27 May 2021 20:03:18 +0200 Subject: [PATCH 02/54] mmc: dw_mmc-pltfm: Remove unused As of commit 4cdc2ec1da322776 ("mmc: dw_mmc: move rockchip related code to a separate file"), dw_mmc-pltfm.c no longer uses the clock API. Signed-off-by: Geert Uytterhoeven --- drivers/mmc/host/dw_mmc-pltfm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 73731cd3ba231..9901208be7973 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "dw_mmc.h" #include "dw_mmc-pltfm.h" From 2302a757777d7e46917abb70cc054e64075a8ee6 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 16 May 2021 17:00:38 +0800 Subject: [PATCH 03/54] riscv: mm: Fix W+X mappings at boot When the kernel mapping was moved the last 2GB of the address space, (__va(PFN_PHYS(max_low_pfn))) is much smaller than the .data section start address, the last set_memory_nx() in protect_kernel_text_data() will fail, thus the .data section is still mapped as W+X. This results in below W+X mapping waring at boot. Fix it by passing the correct .data section page num to the set_memory_nx(). [ 0.396516] ------------[ cut here ]------------ [ 0.396889] riscv/mm: Found insecure W+X mapping at address (____ptrval____)/0xffffffff80c00000 [ 0.398347] WARNING: CPU: 0 PID: 1 at arch/riscv/mm/ptdump.c:258 note_page+0x244/0x24a [ 0.398964] Modules linked in: [ 0.399459] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.13.0-rc1+ #14 [ 0.400003] Hardware name: riscv-virtio,qemu (DT) [ 0.400591] epc : note_page+0x244/0x24a [ 0.401368] ra : note_page+0x244/0x24a [ 0.401772] epc : ffffffff80007c86 ra : ffffffff80007c86 sp : ffffffe000e7bc30 [ 0.402304] gp : ffffffff80caae88 tp : ffffffe000e70000 t0 : ffffffff80cb80cf [ 0.402800] t1 : ffffffff80cb80c0 t2 : 0000000000000000 s0 : ffffffe000e7bc80 [ 0.403310] s1 : ffffffe000e7bde8 a0 : 0000000000000053 a1 : ffffffff80c83ff0 [ 0.403805] a2 : 0000000000000010 a3 : 0000000000000000 a4 : 6c7e7a5137233100 [ 0.404298] a5 : 6c7e7a5137233100 a6 : 0000000000000030 a7 : ffffffffffffffff [ 0.404849] s2 : ffffffff80e00000 s3 : 0000000040000000 s4 : 0000000000000000 [ 0.405393] s5 : 0000000000000000 s6 : 0000000000000003 s7 : ffffffe000e7bd48 [ 0.405935] s8 : ffffffff81000000 s9 : ffffffffc0000000 s10: ffffffe000e7bd48 [ 0.406476] s11: 0000000000001000 t3 : 0000000000000072 t4 : ffffffffffffffff [ 0.407016] t5 : 0000000000000002 t6 : ffffffe000e7b978 [ 0.407435] status: 0000000000000120 badaddr: 0000000000000000 cause: 0000000000000003 [ 0.408052] Call Trace: [ 0.408343] [] note_page+0x244/0x24a [ 0.408855] [] ptdump_hole+0x14/0x1e [ 0.409263] [] walk_pgd_range+0x2a0/0x376 [ 0.409690] [] walk_page_range_novma+0x4e/0x6e [ 0.410146] [] ptdump_walk_pgd+0x48/0x78 [ 0.410570] [] ptdump_check_wx+0xb4/0xf8 [ 0.410990] [] mark_rodata_ro+0x26/0x2e [ 0.411407] [] kernel_init+0x44/0x108 [ 0.411814] [] ret_from_exception+0x0/0xc [ 0.412309] ---[ end trace 7ec3459f2547ea83 ]--- [ 0.413141] Checked W+X mappings: failed, 512 W+X pages found Fixes: 2bfc6cd81bd17e43 ("riscv: Move kernel mapping outside of linear mapping") Signed-off-by: Jisheng Zhang --- arch/riscv/mm/init.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4faf8bd157eaa..4c4c92ce0bb81 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void) unsigned long init_data_start = (unsigned long)__init_data_begin; unsigned long rodata_start = (unsigned long)__start_rodata; unsigned long data_start = (unsigned long)_data; - unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) + unsigned long end_va = kernel_virt_addr + load_sz; +#else + unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn))); +#endif set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT); set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT); /* rodata section is marked readonly in mark_rodata_ro */ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); - set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT); + set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT); } void mark_rodata_ro(void) From 0d8671d9182fb10497766cd613f730e239ef60e6 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Wed, 3 Mar 2021 15:28:37 +0100 Subject: [PATCH 04/54] mm: add a signature in struct page This is needed by the page_pool to avoid recycling a page not allocated via page_pool. The page->signature field is aliased to page->lru.next and page->compound_head, but it can't be set by mistake because the signature value is a bad pointer, and can't trigger a false positive in PageTail() because the last bit is 0. Co-developed-by: Matthew Wilcox (Oracle) Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Matteo Croce --- include/linux/mm.h | 12 +++++++----- include/linux/mm_types.h | 12 +++++++++++- include/linux/poison.h | 3 +++ net/core/page_pool.c | 5 +++++ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c274f75efcf97..b71074a5e82b7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1668,10 +1668,12 @@ struct address_space *page_mapping(struct page *page); static inline bool page_is_pfmemalloc(const struct page *page) { /* - * Page index cannot be this large so this must be - * a pfmemalloc page. + * This is not a tail page; compound_head of a head page is unused + * at return from the page allocator, and will be overwritten + * by callers who do not care whether the page came from the + * reserves. */ - return page->index == -1UL; + return page->compound_head & BIT(1); } /* @@ -1680,12 +1682,12 @@ static inline bool page_is_pfmemalloc(const struct page *page) */ static inline void set_page_pfmemalloc(struct page *page) { - page->index = -1UL; + page->compound_head = BIT(1); } static inline void clear_page_pfmemalloc(struct page *page) { - page->index = 0; + page->compound_head = 0; } /* diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5aacc1c10a45a..09f90598ff633 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -96,6 +96,13 @@ struct page { unsigned long private; }; struct { /* page_pool used by netstack */ + /** + * @pp_magic: magic value to avoid recycling non + * page_pool allocated pages. + */ + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; /** * @dma_addr: might require a 64-bit value on * 32-bit architectures. @@ -130,7 +137,10 @@ struct page { }; }; struct { /* Tail pages of compound page */ - unsigned long compound_head; /* Bit zero is set */ + /* Bit zero is set + * Bit one if pfmemalloc page + */ + unsigned long compound_head; /* First tail page only */ unsigned char compound_dtor; diff --git a/include/linux/poison.h b/include/linux/poison.h index aff1c9250c821..d62ef5a6b4e9c 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -78,4 +78,7 @@ /********** security/ **********/ #define KEY_DESTROY 0xbd +/********** net/core/page_pool.c **********/ +#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA) + #endif diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 3c4c4c7a04022..e698adf4eb399 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -17,6 +17,7 @@ #include #include #include /* for __put_page() */ +#include #include @@ -221,6 +222,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, return NULL; } + page->pp_magic |= PP_SIGNATURE; + /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); @@ -341,6 +344,8 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) DMA_ATTR_SKIP_CPU_SYNC); page_pool_set_dma_addr(page, 0); skip_dma_unmap: + page->pp_magic = 0; + /* This may be the last page returned, releasing the pool, so * it is not safe to reference pool afterwards. */ From f9193c977920c3eb39c28b06c8f12ec0df4f6eb3 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 4 Oct 2019 13:27:44 +0300 Subject: [PATCH 05/54] skbuff: add a parameter to __skb_frag_unref This is a prerequisite patch, the next one is enabling recycling of skbs and fragments. Add an extra argument on __skb_frag_unref() to handle recycling, and update the current users of the function with that. Signed-off-by: Matteo Croce --- drivers/net/ethernet/marvell/sky2.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- include/linux/skbuff.h | 8 +++++--- net/core/skbuff.c | 4 ++-- net/tls/tls_device.c | 2 +- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 222c32367b2c1..aa0cde1dc5c0d 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, if (length == 0) { /* don't need this page */ - __skb_frag_unref(frag); + __skb_frag_unref(frag, false); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e35e4d7ef4d1d..cea62b8f554cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, fail: while (nr > 0) { nr--; - __skb_frag_unref(skb_shinfo(skb)->frags + nr); + __skb_frag_unref(skb_shinfo(skb)->frags + nr, false); } return 0; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dbf820a50a390..7fcfea7e7b211 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3081,10 +3081,12 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment + * @recycle: recycle the page if allocated via page_pool * - * Releases a reference on the paged fragment @frag. + * Releases a reference on the paged fragment @frag + * or recycles the page via the page_pool API. */ -static inline void __skb_frag_unref(skb_frag_t *frag) +static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { put_page(skb_frag_page(frag)); } @@ -3098,7 +3100,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f]); + __skb_frag_unref(&skb_shinfo(skb)->frags[f], false); } /** diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3ad22870298c9..12b7e90dd2b5c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -664,7 +664,7 @@ static void skb_release_data(struct sk_buff *skb) skb_zcopy_clear(skb, true); for (i = 0; i < shinfo->nr_frags; i++) - __skb_frag_unref(&shinfo->frags[i]); + __skb_frag_unref(&shinfo->frags[i], false); if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); @@ -3495,7 +3495,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, skb_frag_size(fragfrom)); - __skb_frag_unref(fragfrom); + __skb_frag_unref(fragfrom, false); } /* Reposition in the original skb */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 76a6f8c2eec4b..ad11db2c4f638 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -127,7 +127,7 @@ static void destroy_record(struct tls_record_info *record) int i; for (i = 0; i < record->num_frags; i++) - __skb_frag_unref(&record->frags[i]); + __skb_frag_unref(&record->frags[i], false); kfree(record); } From dc06f569452c5fcdd6dec00677ccb7c0c948e79c Mon Sep 17 00:00:00 2001 From: Ilias Apalodimas Date: Fri, 4 Oct 2019 13:27:44 +0300 Subject: [PATCH 06/54] page_pool: Allow drivers to hint on SKB recycling Up to now several high speed NICs have custom mechanisms of recycling the allocated memory they use for their payloads. Our page_pool API already has recycling capabilities that are always used when we are running in 'XDP mode'. So let's tweak the API and the kernel network stack slightly and allow the recycling to happen even during the standard operation. The API doesn't take into account 'split page' policies used by those drivers currently, but can be extended once we have users for that. The idea is to be able to intercept the packet on skb_release_data(). If it's a buffer coming from our page_pool API recycle it back to the pool for further usage or just release the packet entirely. To achieve that we introduce a bit in struct sk_buff (pp_recycle:1) and a field in struct page (page->pp) to store the page_pool pointer. Storing the information in page->pp allows us to recycle both SKBs and their fragments. We could have skipped the skb bit entirely, since identical information can bederived from struct page. However, in an effort to affect the free path as less as possible, reading a single bit in the skb which is already in cache, is better that trying to derive identical information for the page stored data. The driver or page_pool has to take care of the sync operations on it's own during the buffer recycling since the buffer is, after opting-in to the recycling, never unmapped. Since the gain on the drivers depends on the architecture, we are not enabling recycling by default if the page_pool API is used on a driver. In order to enable recycling the driver must call skb_mark_for_recycle() to store the information we need for recycling in page->pp and enabling the recycling bit, or page_pool_store_mem_info() for a fragment. Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Co-developed-by: Matteo Croce Signed-off-by: Matteo Croce Signed-off-by: Ilias Apalodimas --- include/linux/skbuff.h | 28 +++++++++++++++++++++++++--- include/net/page_pool.h | 9 +++++++++ net/core/page_pool.c | 24 ++++++++++++++++++++++++ net/core/skbuff.c | 24 ++++++++++++++++++++---- 4 files changed, 78 insertions(+), 7 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7fcfea7e7b211..057b40ad29bd2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -40,6 +40,9 @@ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include #endif +#ifdef CONFIG_PAGE_POOL +#include +#endif /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -667,6 +670,8 @@ typedef unsigned char *sk_buff_data_t; * @head_frag: skb was allocated from page fragments, * not allocated by kmalloc() or vmalloc(). * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @pp_recycle: mark the packet for recycling instead of freeing (implies + * page_pool support on driver) * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed @@ -791,10 +796,12 @@ struct sk_buff { fclone:2, peeked:1, head_frag:1, - pfmemalloc:1; + pfmemalloc:1, + pp_recycle:1; /* page_pool recycle indicator */ #ifdef CONFIG_SKB_EXTENSIONS __u8 active_extensions; #endif + /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -3088,7 +3095,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) */ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - put_page(skb_frag_page(frag)); + struct page *page = skb_frag_page(frag); + +#ifdef CONFIG_PAGE_POOL + if (recycle && page_pool_return_skb_page(page_address(page))) + return; +#endif + put_page(page); } /** @@ -3100,7 +3113,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f], false); + __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); } /** @@ -4699,5 +4712,14 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) #endif } +#ifdef CONFIG_PAGE_POOL +static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page, + struct page_pool *pp) +{ + skb->pp_recycle = 1; + page_pool_store_mem_info(page, pp); +} +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b4b6de909c934..7b9b6a1c61f5e 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -146,6 +146,8 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) return pool->p.dma_dir; } +bool page_pool_return_skb_page(void *data); + struct page_pool *page_pool_create(const struct page_pool_params *params); #ifdef CONFIG_PAGE_POOL @@ -251,4 +253,11 @@ static inline void page_pool_ring_unlock(struct page_pool *pool) spin_unlock_bh(&pool->ring.producer_lock); } +/* Store mem_info on struct page and use it while recycling skb frags */ +static inline +void page_pool_store_mem_info(struct page *page, struct page_pool *pp) +{ + page->pp = pp; +} + #endif /* _NET_PAGE_POOL_H */ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index e698adf4eb399..2a020cca489fe 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -266,6 +266,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, put_page(page); continue; } + page->pp_magic |= PP_SIGNATURE; pool->alloc.cache[pool->alloc.count++] = page; /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; @@ -627,3 +628,26 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid) } } EXPORT_SYMBOL(page_pool_update_nid); + +bool page_pool_return_skb_page(void *data) +{ + struct page_pool *pp; + struct page *page; + + page = virt_to_head_page(data); + if (unlikely(page->pp_magic != PP_SIGNATURE)) + return false; + + pp = (struct page_pool *)page->pp; + + /* Driver set this to memory recycling info. Reset it on recycle. + * This will *not* work for NIC using a split-page memory model. + * The page will be returned to the pool here regardless of the + * 'flipped' fragment being in use or not. + */ + page->pp = NULL; + page_pool_put_full_page(pp, virt_to_head_page(data), false); + + return true; +} +EXPORT_SYMBOL(page_pool_return_skb_page); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 12b7e90dd2b5c..f769f08e7b32e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -70,6 +70,9 @@ #include #include #include +#ifdef CONFIG_PAGE_POOL +#include +#endif #include #include @@ -645,10 +648,15 @@ static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; - if (skb->head_frag) + if (skb->head_frag) { +#ifdef CONFIG_PAGE_POOL + if (skb->pp_recycle && page_pool_return_skb_page(head)) + return; +#endif skb_free_frag(head); - else + } else { kfree(head); + } } static void skb_release_data(struct sk_buff *skb) @@ -664,7 +672,7 @@ static void skb_release_data(struct sk_buff *skb) skb_zcopy_clear(skb, true); for (i = 0; i < shinfo->nr_frags; i++) - __skb_frag_unref(&shinfo->frags[i], false); + __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); @@ -1046,6 +1054,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n->nohdr = 0; n->peeked = 0; C(pfmemalloc); + C(pp_recycle); n->destructor = NULL; C(tail); C(end); @@ -3495,7 +3504,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, skb_frag_size(fragfrom)); - __skb_frag_unref(fragfrom, false); + __skb_frag_unref(fragfrom, skb->pp_recycle); } /* Reposition in the original skb */ @@ -5285,6 +5294,13 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, if (skb_cloned(to)) return false; + /* The page pool signature of struct page will eventually figure out + * which pages can be recycled or not but for now let's prohibit slab + * allocated and page_pool allocated SKBs from being coalesced. + */ + if (to->pp_recycle != from->pp_recycle) + return false; + if (len <= skb_tailroom(to)) { if (len) BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); From 0bed5fc187da5864f66ddaaa2efc60ea42513544 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 21 May 2021 03:05:25 +0200 Subject: [PATCH 07/54] stmmac: recycle buffers Signed-off-by: Matteo Croce --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5d956a5534345..d7d0f410bef14 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5245,7 +5245,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb, buf->page, rx_q->page_pool); buf->page = NULL; } @@ -5257,7 +5257,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->sec_page); + skb_mark_for_recycle(skb, buf->sec_page, rx_q->page_pool); buf->sec_page = NULL; } From 20f367831372e11e35a13a87e708a99ac5de2b88 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 21 May 2021 03:26:38 +0200 Subject: [PATCH 08/54] stmmac: use GFP_DMA32 Signed-off-by: Matteo Croce --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d7d0f410bef14..3491adaa8d0ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1430,14 +1430,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->page) return -ENOMEM; buf->page_offset = stmmac_rx_offset(priv); } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->sec_page) return -ENOMEM; @@ -4451,13 +4453,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) p = rx_q->dma_rx + entry; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->page) break; } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->sec_page) break; From 76ffc62e090e762b96fa30ef8948b89f3b83a04d Mon Sep 17 00:00:00 2001 From: Huan Feng Date: Fri, 8 Jan 2021 03:19:19 +0800 Subject: [PATCH 09/54] gpio: starfive-vic: Add StarFive VIC GPIO driver Note: include Update GPIO driver[v0.9->v1.0] improves the module init and exit function to make sure that the driver can be initialized earlier than other drivers which need to use GPIO. Signed-off-by: Huan Feng Signed-off-by: Emil Renner Berthing --- drivers/gpio/Kconfig | 8 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-starfive-vic.c | 563 ++++++++++++++++++++++++++++++ include/linux/gpio-starfive-vic.h | 384 ++++++++++++++++++++ 4 files changed, 956 insertions(+) create mode 100755 drivers/gpio/gpio-starfive-vic.c create mode 100644 include/linux/gpio-starfive-vic.h diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 1dd0ec6727fde..32f3a4a2eeefa 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -542,6 +542,14 @@ config GPIO_SIFIVE help Say yes here to support the GPIO device on SiFive SoCs. +config GPIO_STARFIVE_VIC + bool "Starfive VIC GPIO support" + depends on OF_GPIO + select GPIOLIB_IRQCHIP + default y if SOC_STARFIVE_VIC7100 + help + Say yes here to support the GPIO device on Starfive VIC SoCs. + config GPIO_SIOX tristate "SIOX GPIO support" depends on SIOX diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index d7c81e1611a4d..cb5dd75e61f5c 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -132,6 +132,7 @@ obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o obj-$(CONFIG_GPIO_SCH) += gpio-sch.o obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o +obj-$(CONFIG_GPIO_STARFIVE_VIC) += gpio-starfive-vic.o obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o diff --git a/drivers/gpio/gpio-starfive-vic.c b/drivers/gpio/gpio-starfive-vic.c new file mode 100755 index 0000000000000..484677c1eb43b --- /dev/null +++ b/drivers/gpio/gpio-starfive-vic.c @@ -0,0 +1,563 @@ +/* + ****************************************************************************** + * @file gpio-starfive-vic.c + * @author StarFive Technology + * @version V1.0 + * @date 08/13/2020 + * @brief + ****************************************************************************** + * @copy + * + * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS + * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE + * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY + * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING + * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE + * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. + * + * COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd. + */ + +#include +#include +#include +#include + +#define GPIO_EN 0x0 +#define GPIO_IS_LOW 0x10 +#define GPIO_IS_HIGH 0x14 +#define GPIO_IBE_LOW 0x18 +#define GPIO_IBE_HIGH 0x1c +#define GPIO_IEV_LOW 0x20 +#define GPIO_IEV_HIGH 0x24 +#define GPIO_IE_LOW 0x28 +#define GPIO_IE_HIGH 0x2c +#define GPIO_IC_LOW 0x30 +#define GPIO_IC_HIGH 0x34 +//read only +#define GPIO_RIS_LOW 0x38 +#define GPIO_RIS_HIGH 0x3c +#define GPIO_MIS_LOW 0x40 +#define GPIO_MIS_HIGH 0x44 +#define GPIO_DIN_LOW 0x48 +#define GPIO_DIN_HIGH 0x4c + +#define GPIO_DOUT_X_REG 0x50 +#define GPIO_DOEN_X_REG 0x54 + +#define MAX_GPIO 64 + +#define PROC_VIC "vic_gpio" + +struct starfive_gpio { + raw_spinlock_t lock; + void __iomem *base; + struct gpio_chip gc; + unsigned long enabled; + unsigned trigger[MAX_GPIO]; + unsigned int irq_parent[MAX_GPIO]; +}; + +static DEFINE_SPINLOCK(sfg_lock); + +static void __iomem *gpio_base = NULL; + +static int starfive_direction_input(struct gpio_chip *gc, unsigned offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return -EINVAL; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(0x1, chip->base + GPIO_DOEN_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int starfive_direction_output(struct gpio_chip *gc, unsigned offset, int value) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return -EINVAL; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(0x0, chip->base + GPIO_DOEN_X_REG + offset * 8); + writel_relaxed(value, chip->base + GPIO_DOUT_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int starfive_get_direction(struct gpio_chip *gc, unsigned offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + + if (offset >= gc->ngpio) + return -EINVAL; + + return readl_relaxed(chip->base + GPIO_DOEN_X_REG + offset * 8) & 0x1; +} + +static int starfive_get_value(struct gpio_chip *gc, unsigned offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + int value; + + if (offset >= gc->ngpio) + return -EINVAL; + + if (offset < 32) { + value = readl_relaxed(chip->base + GPIO_DIN_LOW); + return (value >> offset) & 0x1; + } else { + value = readl_relaxed(chip->base + GPIO_DIN_HIGH); + return (value >> (offset - 32)) & 0x1; + } +} + +static void starfive_set_value(struct gpio_chip *gc, unsigned offset, int value) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(value, chip->base + GPIO_DOUT_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); +} + +static void starfive_set_ie(struct starfive_gpio *chip, int offset) +{ + unsigned long flags; + int old_value, new_value; + int reg_offset, index; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + raw_spin_lock_irqsave(&chip->lock, flags); + old_value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + new_value = old_value | ( 1 << index); + writel_relaxed(new_value, chip->base + GPIO_IE_LOW + reg_offset); + raw_spin_unlock_irqrestore(&chip->lock, flags); +} + +static int starfive_irq_set_type(struct irq_data *d, unsigned trigger) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + unsigned int reg_is, reg_ibe, reg_iev; + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return -EINVAL; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + switch (trigger) { + case IRQ_TYPE_LEVEL_HIGH: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is &= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_LEVEL_LOW: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is &= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev &= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_BOTH: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + //reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe |= ~(1 << index); + //reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + //writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_RISING: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_FALLING: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev &= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + } + + chip->trigger[offset] = trigger; + starfive_set_ie(chip, offset); + return 0; +} + +/* chained_irq_{enter,exit} already mask the parent */ +static void starfive_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned int value; + int offset = irqd_to_hwirq(d); + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + value &= ~(0x1 << index); + writel_relaxed(value, chip->base + GPIO_IE_LOW + reg_offset); +} + +static void starfive_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned int value; + int offset = irqd_to_hwirq(d); + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + value |= (0x1 << index); + writel_relaxed(value, chip->base + GPIO_IE_LOW + reg_offset); +} + +static void starfive_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + + starfive_irq_unmask(d); + assign_bit(offset, &chip->enabled, 1); +} + +static void starfive_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % MAX_GPIO; // must not fail + + assign_bit(offset, &chip->enabled, 0); + starfive_set_ie(chip, offset); +} + +static struct irq_chip starfive_irqchip = { + .name = "starfive-gpio", + .irq_set_type = starfive_irq_set_type, + .irq_mask = starfive_irq_mask, + .irq_unmask = starfive_irq_unmask, + .irq_enable = starfive_irq_enable, + .irq_disable = starfive_irq_disable, +}; + +static irqreturn_t starfive_irq_handler(int irq, void *gc) +{ + int offset; + int reg_offset, index; + unsigned int value; + unsigned long flags; + struct starfive_gpio *chip = gc; + + for (offset = 0; offset < 64; offset++) { + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + raw_spin_lock_irqsave(&chip->lock, flags); + value = readl_relaxed(chip->base + GPIO_MIS_LOW + reg_offset); + if (value & BIT(index)) + writel_relaxed(BIT(index), chip->base + GPIO_IC_LOW + + reg_offset); + + /* + generic_handle_irq(irq_find_mapping(chip->gc.irq.domain, + offset)); + */ + raw_spin_unlock_irqrestore(&chip->lock, flags); + } + + return IRQ_HANDLED; +} + +void sf_vic_gpio_dout_reverse(int gpio, int en) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOUT_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0x1 << 31); + value |= (en & 0x1) << 31; + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_reverse); + +void sf_vic_gpio_dout_value(int gpio, int v) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOUT_X_REG; + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_value); + +void sf_vic_gpio_dout_low(int gpio) +{ + sf_vic_gpio_dout_value(gpio, 0); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_low); + +void sf_vic_gpio_dout_high(int gpio) +{ + sf_vic_gpio_dout_value(gpio, 1); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_high); + +void sf_vic_gpio_doen_reverse(int gpio, int en) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOEN_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0x1 << 31); + value |= (en & 0x1) << 31; + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_reverse); + +void sf_vic_gpio_doen_value(int gpio, int v) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOEN_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_value); + +void sf_vic_gpio_doen_low(int gpio) +{ + sf_vic_gpio_doen_value(gpio, 0); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_low); + +void sf_vic_gpio_doen_high(int gpio) +{ + sf_vic_gpio_doen_value(gpio, 1); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_high); + +void sf_vic_gpio_manual(int offset, int v) +{ + unsigned int value; + + if (!gpio_base) + return ; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_manual); + +static int starfive_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct starfive_gpio *chip; + struct gpio_irq_chip *girq; + struct resource *res; + int irq, ret, ngpio; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(dev, "out of memory\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base = devm_ioremap_resource(dev, res); + if (IS_ERR(chip->base)) { + dev_err(dev, "failed to allocate device memory\n"); + return PTR_ERR(chip->base); + } + gpio_base = chip->base ; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot get IRQ resource\n"); + return irq; + } + + raw_spin_lock_init(&chip->lock); + chip->gc.direction_input = starfive_direction_input; + chip->gc.direction_output = starfive_direction_output; + chip->gc.get_direction = starfive_get_direction; + chip->gc.get = starfive_get_value; + chip->gc.set = starfive_set_value; + chip->gc.base = 0; + chip->gc.ngpio = 64; + chip->gc.label = dev_name(dev); + chip->gc.parent = dev; + chip->gc.owner = THIS_MODULE; + + girq = &chip->gc.irq; + girq->chip = &starfive_irqchip; + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + + ret = gpiochip_add_data(&chip->gc, chip); + if (ret) { + dev_err(dev, "gpiochip_add_data ret=%d!\n", ret); + return ret; + } + + /* Disable all GPIO interrupts before enabling parent interrupts */ + iowrite32(0, chip->base + GPIO_IE_HIGH); + iowrite32(0, chip->base + GPIO_IE_LOW); + chip->enabled = 0; + + ret = devm_request_irq(dev, irq, starfive_irq_handler, IRQF_SHARED, + dev_name(dev), chip); + if (ret) { + dev_err(dev, "IRQ handler registering failed (%d)\n", ret); + return ret; + } + + writel_relaxed(1, chip->base + GPIO_EN); + + dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", ngpio); + + return 0; +} + +static const struct of_device_id starfive_gpio_match[] = { + { .compatible = "starfive,gpio0", }, + { }, +}; + +static struct platform_driver starfive_gpio_driver = { + .probe = starfive_gpio_probe, + .driver = { + .name = "starfive_gpio", + .of_match_table = of_match_ptr(starfive_gpio_match), + }, +}; + +static int __init starfive_gpio_init(void) +{ + return platform_driver_register(&starfive_gpio_driver); +} +subsys_initcall(starfive_gpio_init); + +static void __exit starfive_gpio_exit(void) +{ + platform_driver_unregister(&starfive_gpio_driver); +} +module_exit(starfive_gpio_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huan Feng "); +MODULE_DESCRIPTION("Starfive VIC GPIO generator driver"); diff --git a/include/linux/gpio-starfive-vic.h b/include/linux/gpio-starfive-vic.h new file mode 100644 index 0000000000000..0afcaf1876fbe --- /dev/null +++ b/include/linux/gpio-starfive-vic.h @@ -0,0 +1,384 @@ +#ifndef __GPIO_STARFIVE_VIC_H +#define __GPIO_STARFIVE_VIC_H + +extern void sf_vic_gpio_dout_reverse(int gpio, int en); +/* + * #define SET_GPIO_0_dout_cpu_jtag_tdo { \ + * uint32_t _ezchip_macro_read_value_=MA_INW(gpio_0_dout_REG_ADDR); \ + * _ezchip_macro_read_value_ &= ~(0xFF); \ + * _ezchip_macro_read_value_ |= (0x3&0xFF); \ + * MA_OUTW(gpio_0_dout_REG_ADDR,_ezchip_macro_read_value_); \ + * } + * in this example gpio is: 0, and v is: 0x3 + */ +extern void sf_vic_gpio_dout_value(int gpio, int v); +extern void sf_vic_gpio_dout_low(int gpio); +extern void sf_vic_gpio_dout_high(int gpio); + +extern void sf_vic_gpio_doen_reverse(int gpio, int en); +/* + * the same as sf_vic_gpio_dout_value + */ +extern void sf_vic_gpio_doen_value(int gpio, int v); +extern void sf_vic_gpio_doen_low(int gpio); +extern void sf_vic_gpio_doen_high(int gpio); + +/* + *#define SET_GPIO_uart2_pad_sin(gpio) { \ + * uint32_t _ezchip_macro_read_value_=MA_INW(gpio_uart2_pad_sin_REG_ADDR); \ + * _ezchip_macro_read_value_ &= ~(0xFF); \ + * _ezchip_macro_read_value_ |= ((gpio+2)&0xFF); \ + * MA_OUTW(gpio_uart2_pad_sin_REG_ADDR,_ezchip_macro_read_value_); \ + *} + * in this example offset is: 0x370, the offset of gpio_uart2_pad_sin_REG_ADDR + * and v is: gpio + 2 + */ +extern void sf_vic_gpio_manual(int offset, int v); + +#define SET_GPIO_dout_reverse_(gpionum, en) sf_vic_gpio_dout_reverse(gpionum, en) +#define SET_GPIO_dout_LOW(gpionum) sf_vic_gpio_dout_value(gpionum, 0x0) +#define SET_GPIO_dout_HIGH(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1) +#define SET_GPIO_dout_clk_gmac_tophyref(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2) +#define SET_GPIO_dout_cpu_jtag_tdo(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3) +#define SET_GPIO_dout_cpu_jtag_tdo_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4) +#define SET_GPIO_dout_dmic_clk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5) +#define SET_GPIO_dout_dsp_JTDOEn_pad(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6) +#define SET_GPIO_dout_dsp_JTDO_pad(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7) +#define SET_GPIO_dout_i2c0_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x8) +#define SET_GPIO_dout_i2c0_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x9) +#define SET_GPIO_dout_i2c1_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xa) +#define SET_GPIO_dout_i2c1_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xb) +#define SET_GPIO_dout_i2c2_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xc) +#define SET_GPIO_dout_i2c2_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xd) +#define SET_GPIO_dout_i2c3_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xe) +#define SET_GPIO_dout_i2c3_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xf) +#define SET_GPIO_dout_i2srx_bclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x10) +#define SET_GPIO_dout_i2srx_bclk_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x11) +#define SET_GPIO_dout_i2srx_lrck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x12) +#define SET_GPIO_dout_i2srx_lrck_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x13) +#define SET_GPIO_dout_i2srx_mclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x14) +#define SET_GPIO_dout_i2stx_bclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x15) +#define SET_GPIO_dout_i2stx_bclk_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x16) +#define SET_GPIO_dout_i2stx_lrck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x17) +#define SET_GPIO_dout_i2stx_lrckout_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x18) +#define SET_GPIO_dout_i2stx_mclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x19) +#define SET_GPIO_dout_i2stx_sdout0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1a) +#define SET_GPIO_dout_i2stx_sdout1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1b) +#define SET_GPIO_dout_lcd_pad_csm_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1c) +#define SET_GPIO_dout_pwm_pad_oe_n_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1d) +#define SET_GPIO_dout_pwm_pad_oe_n_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1e) +#define SET_GPIO_dout_pwm_pad_oe_n_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1f) +#define SET_GPIO_dout_pwm_pad_oe_n_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x20) +#define SET_GPIO_dout_pwm_pad_oe_n_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x21) +#define SET_GPIO_dout_pwm_pad_oe_n_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x22) +#define SET_GPIO_dout_pwm_pad_oe_n_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x23) +#define SET_GPIO_dout_pwm_pad_oe_n_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x24) +#define SET_GPIO_dout_pwm_pad_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x25) +#define SET_GPIO_dout_pwm_pad_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x26) +#define SET_GPIO_dout_pwm_pad_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x27) +#define SET_GPIO_dout_pwm_pad_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x28) +#define SET_GPIO_dout_pwm_pad_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x29) +#define SET_GPIO_dout_pwm_pad_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2a) +#define SET_GPIO_dout_pwm_pad_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2b) +#define SET_GPIO_dout_pwm_pad_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2c) +#define SET_GPIO_dout_pwmdac_left_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2d) +#define SET_GPIO_dout_pwmdac_right_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2e) +#define SET_GPIO_dout_qspi_csn1_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2f) +#define SET_GPIO_dout_qspi_csn2_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x30) +#define SET_GPIO_dout_qspi_csn3_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x31) +#define SET_GPIO_dout_register23_SCFG_cmsensor_rst0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x32) +#define SET_GPIO_dout_register23_SCFG_cmsensor_rst1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x33) +#define SET_GPIO_dout_register32_SCFG_gmac_phy_rstn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x34) +#define SET_GPIO_dout_sdio0_pad_card_power_en(gpionum) sf_vic_gpio_dout_value(gpionum, 0x35) +#define SET_GPIO_dout_sdio0_pad_cclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x36) +#define SET_GPIO_dout_sdio0_pad_ccmd_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x37) +#define SET_GPIO_dout_sdio0_pad_ccmd_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x38) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x39) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3a) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3b) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3c) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3d) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3e) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3f) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x40) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x41) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x42) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x43) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x44) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x45) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x46) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x47) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x48) +#define SET_GPIO_dout_sdio0_pad_rst_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x49) +#define SET_GPIO_dout_sdio1_pad_card_power_en(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4a) +#define SET_GPIO_dout_sdio1_pad_cclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4b) +#define SET_GPIO_dout_sdio1_pad_ccmd_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4c) +#define SET_GPIO_dout_sdio1_pad_ccmd_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4d) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4e) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4f) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x50) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x51) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x52) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x53) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x54) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x55) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x56) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x57) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x58) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x59) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5a) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5b) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5c) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5d) +#define SET_GPIO_dout_sdio1_pad_rst_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5e) +#define SET_GPIO_dout_spdif_tx_sdout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5f) +#define SET_GPIO_dout_spdif_tx_sdout_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x60) +#define SET_GPIO_dout_spi0_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x61) +#define SET_GPIO_dout_spi0_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x62) +#define SET_GPIO_dout_spi0_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x63) +#define SET_GPIO_dout_spi0_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x64) +#define SET_GPIO_dout_spi0_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x65) +#define SET_GPIO_dout_spi1_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x66) +#define SET_GPIO_dout_spi1_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x67) +#define SET_GPIO_dout_spi1_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x68) +#define SET_GPIO_dout_spi1_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x69) +#define SET_GPIO_dout_spi1_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6a) +#define SET_GPIO_dout_spi2_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6b) +#define SET_GPIO_dout_spi2_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6c) +#define SET_GPIO_dout_spi2_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6d) +#define SET_GPIO_dout_spi2_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6e) +#define SET_GPIO_dout_spi2_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6f) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x70) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x71) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x72) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x73) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x74) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x75) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x76) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x77) +#define SET_GPIO_dout_spi3_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x78) +#define SET_GPIO_dout_spi3_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x79) +#define SET_GPIO_dout_spi3_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7a) +#define SET_GPIO_dout_spi3_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7b) +#define SET_GPIO_dout_spi3_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7c) +#define SET_GPIO_dout_uart0_pad_dtrn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7d) +#define SET_GPIO_dout_uart0_pad_rtsn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7e) +#define SET_GPIO_dout_uart0_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7f) +#define SET_GPIO_dout_uart1_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x80) +#define SET_GPIO_dout_uart2_pad_dtr_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x81) +#define SET_GPIO_dout_uart2_pad_rts_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x82) +#define SET_GPIO_dout_uart2_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x83) +#define SET_GPIO_dout_uart3_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x84) +#define SET_GPIO_dout_usb_drv_bus(gpionum) sf_vic_gpio_dout_value(gpionum, 0x85) +#define SET_GPIO_doen_reverse_(gpionum, en) sf_vic_gpio_doen_reverse(gpionum, en) +#define SET_GPIO_doen_LOW(gpionum) sf_vic_gpio_doen_value(gpionum, 0x0) +#define SET_GPIO_doen_HIGH(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1) +#define SET_GPIO_doen_clk_gmac_tophyref(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2) +#define SET_GPIO_doen_cpu_jtag_tdo(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3) +#define SET_GPIO_doen_cpu_jtag_tdo_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4) +#define SET_GPIO_doen_dmic_clk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5) +#define SET_GPIO_doen_dsp_JTDOEn_pad(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6) +#define SET_GPIO_doen_dsp_JTDO_pad(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7) +#define SET_GPIO_doen_i2c0_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x8) +#define SET_GPIO_doen_i2c0_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x9) +#define SET_GPIO_doen_i2c1_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xa) +#define SET_GPIO_doen_i2c1_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xb) +#define SET_GPIO_doen_i2c2_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xc) +#define SET_GPIO_doen_i2c2_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xd) +#define SET_GPIO_doen_i2c3_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xe) +#define SET_GPIO_doen_i2c3_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xf) +#define SET_GPIO_doen_i2srx_bclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x10) +#define SET_GPIO_doen_i2srx_bclk_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x11) +#define SET_GPIO_doen_i2srx_lrck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x12) +#define SET_GPIO_doen_i2srx_lrck_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x13) +#define SET_GPIO_doen_i2srx_mclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x14) +#define SET_GPIO_doen_i2stx_bclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x15) +#define SET_GPIO_doen_i2stx_bclk_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x16) +#define SET_GPIO_doen_i2stx_lrck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x17) +#define SET_GPIO_doen_i2stx_lrckout_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x18) +#define SET_GPIO_doen_i2stx_mclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x19) +#define SET_GPIO_doen_i2stx_sdout0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1a) +#define SET_GPIO_doen_i2stx_sdout1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1b) +#define SET_GPIO_doen_lcd_pad_csm_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1c) +#define SET_GPIO_doen_pwm_pad_oe_n_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1d) +#define SET_GPIO_doen_pwm_pad_oe_n_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1e) +#define SET_GPIO_doen_pwm_pad_oe_n_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1f) +#define SET_GPIO_doen_pwm_pad_oe_n_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x20) +#define SET_GPIO_doen_pwm_pad_oe_n_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x21) +#define SET_GPIO_doen_pwm_pad_oe_n_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x22) +#define SET_GPIO_doen_pwm_pad_oe_n_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x23) +#define SET_GPIO_doen_pwm_pad_oe_n_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x24) +#define SET_GPIO_doen_pwm_pad_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x25) +#define SET_GPIO_doen_pwm_pad_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x26) +#define SET_GPIO_doen_pwm_pad_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x27) +#define SET_GPIO_doen_pwm_pad_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x28) +#define SET_GPIO_doen_pwm_pad_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x29) +#define SET_GPIO_doen_pwm_pad_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2a) +#define SET_GPIO_doen_pwm_pad_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2b) +#define SET_GPIO_doen_pwm_pad_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2c) +#define SET_GPIO_doen_pwmdac_left_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2d) +#define SET_GPIO_doen_pwmdac_right_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2e) +#define SET_GPIO_doen_qspi_csn1_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2f) +#define SET_GPIO_doen_qspi_csn2_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x30) +#define SET_GPIO_doen_qspi_csn3_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x31) +#define SET_GPIO_doen_register23_SCFG_cmsensor_rst0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x32) +#define SET_GPIO_doen_register23_SCFG_cmsensor_rst1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x33) +#define SET_GPIO_doen_register32_SCFG_gmac_phy_rstn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x34) +#define SET_GPIO_doen_sdio0_pad_card_power_en(gpionum) sf_vic_gpio_doen_value(gpionum, 0x35) +#define SET_GPIO_doen_sdio0_pad_cclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x36) +#define SET_GPIO_doen_sdio0_pad_ccmd_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x37) +#define SET_GPIO_doen_sdio0_pad_ccmd_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x38) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x39) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3a) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3b) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3c) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3d) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3e) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3f) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x40) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x41) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x42) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x43) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x44) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x45) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x46) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x47) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x48) +#define SET_GPIO_doen_sdio0_pad_rst_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x49) +#define SET_GPIO_doen_sdio1_pad_card_power_en(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4a) +#define SET_GPIO_doen_sdio1_pad_cclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4b) +#define SET_GPIO_doen_sdio1_pad_ccmd_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4c) +#define SET_GPIO_doen_sdio1_pad_ccmd_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4d) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4e) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4f) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x50) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x51) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x52) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x53) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x54) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x55) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x56) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x57) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x58) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x59) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5a) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5b) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5c) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5d) +#define SET_GPIO_doen_sdio1_pad_rst_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5e) +#define SET_GPIO_doen_spdif_tx_sdout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5f) +#define SET_GPIO_doen_spdif_tx_sdout_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x60) +#define SET_GPIO_doen_spi0_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x61) +#define SET_GPIO_doen_spi0_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x62) +#define SET_GPIO_doen_spi0_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x63) +#define SET_GPIO_doen_spi0_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x64) +#define SET_GPIO_doen_spi0_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x65) +#define SET_GPIO_doen_spi1_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x66) +#define SET_GPIO_doen_spi1_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x67) +#define SET_GPIO_doen_spi1_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x68) +#define SET_GPIO_doen_spi1_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x69) +#define SET_GPIO_doen_spi1_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6a) +#define SET_GPIO_doen_spi2_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6b) +#define SET_GPIO_doen_spi2_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6c) +#define SET_GPIO_doen_spi2_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6d) +#define SET_GPIO_doen_spi2_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6e) +#define SET_GPIO_doen_spi2_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6f) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x70) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x71) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x72) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x73) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x74) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x75) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x76) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x77) +#define SET_GPIO_doen_spi3_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x78) +#define SET_GPIO_doen_spi3_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x79) +#define SET_GPIO_doen_spi3_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7a) +#define SET_GPIO_doen_spi3_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7b) +#define SET_GPIO_doen_spi3_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7c) +#define SET_GPIO_doen_uart0_pad_dtrn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7d) +#define SET_GPIO_doen_uart0_pad_rtsn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7e) +#define SET_GPIO_doen_uart0_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7f) +#define SET_GPIO_doen_uart1_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x80) +#define SET_GPIO_doen_uart2_pad_dtr_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x81) +#define SET_GPIO_doen_uart2_pad_rts_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x82) +#define SET_GPIO_doen_uart2_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x83) +#define SET_GPIO_doen_uart3_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x84) +#define SET_GPIO_doen_usb_drv_bus(gpionum) sf_vic_gpio_doen_value(gpionum, 0x85) +#define SET_GPIO_cpu_jtag_tck(gpionum) sf_vic_gpio_manual(0x250, gpionum + 2) +#define SET_GPIO_cpu_jtag_tdi(gpionum) sf_vic_gpio_manual(0x254, gpionum + 2) +#define SET_GPIO_cpu_jtag_tms(gpionum) sf_vic_gpio_manual(0x258, gpionum + 2) +#define SET_GPIO_cpu_jtag_trst(gpionum) sf_vic_gpio_manual(0x25c, gpionum + 2) +#define SET_GPIO_dmic_sdin_bit0(gpionum) sf_vic_gpio_manual(0x260, gpionum + 2) +#define SET_GPIO_dmic_sdin_bit1(gpionum) sf_vic_gpio_manual(0x264, gpionum + 2) +#define SET_GPIO_dsp_JTCK_pad(gpionum) sf_vic_gpio_manual(0x268, gpionum + 2) +#define SET_GPIO_dsp_JTDI_pad(gpionum) sf_vic_gpio_manual(0x26c, gpionum + 2) +#define SET_GPIO_dsp_JTMS_pad(gpionum) sf_vic_gpio_manual(0x270, gpionum + 2) +#define SET_GPIO_dsp_TRST_pad(gpionum) sf_vic_gpio_manual(0x274, gpionum + 2) +#define SET_GPIO_i2c0_pad_sck_in(gpionum) sf_vic_gpio_manual(0x278, gpionum + 2) +#define SET_GPIO_i2c0_pad_sda_in(gpionum) sf_vic_gpio_manual(0x27c, gpionum + 2) +#define SET_GPIO_i2c1_pad_sck_in(gpionum) sf_vic_gpio_manual(0x280, gpionum + 2) +#define SET_GPIO_i2c1_pad_sda_in(gpionum) sf_vic_gpio_manual(0x284, gpionum + 2) +#define SET_GPIO_i2c2_pad_sck_in(gpionum) sf_vic_gpio_manual(0x288, gpionum + 2) +#define SET_GPIO_i2c2_pad_sda_in(gpionum) sf_vic_gpio_manual(0x28c, gpionum + 2) +#define SET_GPIO_i2c3_pad_sck_in(gpionum) sf_vic_gpio_manual(0x290, gpionum + 2) +#define SET_GPIO_i2c3_pad_sda_in(gpionum) sf_vic_gpio_manual(0x294, gpionum + 2) +#define SET_GPIO_i2srx_bclk_in(gpionum) sf_vic_gpio_manual(0x298, gpionum + 2) +#define SET_GPIO_i2srx_lrck_in(gpionum) sf_vic_gpio_manual(0x29c, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit0(gpionum) sf_vic_gpio_manual(0x2a0, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit1(gpionum) sf_vic_gpio_manual(0x2a4, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit2(gpionum) sf_vic_gpio_manual(0x2a8, gpionum + 2) +#define SET_GPIO_i2stx_bclk_in(gpionum) sf_vic_gpio_manual(0x2ac, gpionum + 2) +#define SET_GPIO_i2stx_lrck_in(gpionum) sf_vic_gpio_manual(0x2b0, gpionum + 2) +#define SET_GPIO_sdio0_pad_card_detect_n(gpionum) sf_vic_gpio_manual(0x2b4, gpionum + 2) +#define SET_GPIO_sdio0_pad_card_write_prt(gpionum) sf_vic_gpio_manual(0x2b8, gpionum + 2) +#define SET_GPIO_sdio0_pad_ccmd_in(gpionum) sf_vic_gpio_manual(0x2bc, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit0(gpionum) sf_vic_gpio_manual(0x2c0, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit1(gpionum) sf_vic_gpio_manual(0x2c4, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit2(gpionum) sf_vic_gpio_manual(0x2c8, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit3(gpionum) sf_vic_gpio_manual(0x2cc, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit4(gpionum) sf_vic_gpio_manual(0x2d0, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit5(gpionum) sf_vic_gpio_manual(0x2d4, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit6(gpionum) sf_vic_gpio_manual(0x2d8, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit7(gpionum) sf_vic_gpio_manual(0x2dc, gpionum + 2) +#define SET_GPIO_sdio1_pad_card_detect_n(gpionum) sf_vic_gpio_manual(0x2e0, gpionum + 2) +#define SET_GPIO_sdio1_pad_card_write_prt(gpionum) sf_vic_gpio_manual(0x2e4, gpionum + 2) +#define SET_GPIO_sdio1_pad_ccmd_in(gpionum) sf_vic_gpio_manual(0x2e8, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit0(gpionum) sf_vic_gpio_manual(0x2ec, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit1(gpionum) sf_vic_gpio_manual(0x2f0, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit2(gpionum) sf_vic_gpio_manual(0x2f4, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit3(gpionum) sf_vic_gpio_manual(0x2f8, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit4(gpionum) sf_vic_gpio_manual(0x2fc, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit5(gpionum) sf_vic_gpio_manual(0x300, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit6(gpionum) sf_vic_gpio_manual(0x304, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit7(gpionum) sf_vic_gpio_manual(0x308, gpionum + 2) +#define SET_GPIO_spdif_rx_sdin(gpionum) sf_vic_gpio_manual(0x30c, gpionum + 2) +#define SET_GPIO_spi0_pad_rxd(gpionum) sf_vic_gpio_manual(0x310, gpionum + 2) +#define SET_GPIO_spi0_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x314, gpionum + 2) +#define SET_GPIO_spi1_pad_rxd(gpionum) sf_vic_gpio_manual(0x318, gpionum + 2) +#define SET_GPIO_spi1_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x31c, gpionum + 2) +#define SET_GPIO_spi2_pad_rxd(gpionum) sf_vic_gpio_manual(0x320, gpionum + 2) +#define SET_GPIO_spi2_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x324, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit0(gpionum) sf_vic_gpio_manual(0x328, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit1(gpionum) sf_vic_gpio_manual(0x32c, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit2(gpionum) sf_vic_gpio_manual(0x330, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit3(gpionum) sf_vic_gpio_manual(0x334, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_ss_n(gpionum) sf_vic_gpio_manual(0x338, gpionum + 2) +#define SET_GPIO_spi2ahb_slv_sclkin(gpionum) sf_vic_gpio_manual(0x33c, gpionum + 2) +#define SET_GPIO_spi3_pad_rxd(gpionum) sf_vic_gpio_manual(0x340, gpionum + 2) +#define SET_GPIO_spi3_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x344, gpionum + 2) +#define SET_GPIO_uart0_pad_ctsn(gpionum) sf_vic_gpio_manual(0x348, gpionum + 2) +#define SET_GPIO_uart0_pad_dcdn(gpionum) sf_vic_gpio_manual(0x34c, gpionum + 2) +#define SET_GPIO_uart0_pad_dsrn(gpionum) sf_vic_gpio_manual(0x350, gpionum + 2) +#define SET_GPIO_uart0_pad_rin(gpionum) sf_vic_gpio_manual(0x354, gpionum + 2) +#define SET_GPIO_uart0_pad_sin(gpionum) sf_vic_gpio_manual(0x358, gpionum + 2) +#define SET_GPIO_uart1_pad_sin(gpionum) sf_vic_gpio_manual(0x35c, gpionum + 2) +#define SET_GPIO_uart2_pad_cts_n(gpionum) sf_vic_gpio_manual(0x360, gpionum + 2) +#define SET_GPIO_uart2_pad_dcd_n(gpionum) sf_vic_gpio_manual(0x364, gpionum + 2) +#define SET_GPIO_uart2_pad_dsr_n(gpionum) sf_vic_gpio_manual(0x368, gpionum + 2) +#define SET_GPIO_uart2_pad_ri_n(gpionum) sf_vic_gpio_manual(0x36c, gpionum + 2) +#define SET_GPIO_uart2_pad_sin(gpionum) sf_vic_gpio_manual(0x370, gpionum + 2) +#define SET_GPIO_uart3_pad_sin(gpionum) sf_vic_gpio_manual(0x374, gpionum + 2) +#define SET_GPIO_usb_over_current(gpionum) sf_vic_gpio_manual(0x378, gpionum + 2) + +#endif /* __GPIO_PXA_H */ From fcf37216b017ca71c6bd595beba60ed20a008065 Mon Sep 17 00:00:00 2001 From: Samin Guo Date: Fri, 8 Jan 2021 03:11:04 +0800 Subject: [PATCH 10/54] drivers/tty/serial/8250: update driver for VIC7100 --- drivers/tty/serial/8250/8250_port.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fc5ab20322821..4fd09d3dd6df7 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -73,8 +73,16 @@ static const struct serial8250_config uart_config[] = { }, [PORT_16550] = { .name = "16550", +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, +#else .fifo_size = 1, .tx_loadsz = 1, +#endif }, [PORT_16550A] = { .name = "16550A", From 453d751d645b54788df1855d058be0c1785ba700 Mon Sep 17 00:00:00 2001 From: Huan Feng Date: Fri, 8 Jan 2021 03:35:42 +0800 Subject: [PATCH 11/54] drivers/hw_random: Add Starfive VIC Random Number Generator driver --- drivers/char/hw_random/Kconfig | 13 ++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/starfive-vic-rng.c | 256 ++++++++++++++++++++++ drivers/char/hw_random/starfive-vic-rng.h | 167 ++++++++++++++ 4 files changed, 437 insertions(+) create mode 100644 drivers/char/hw_random/starfive-vic-rng.c create mode 100644 drivers/char/hw_random/starfive-vic-rng.h diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 1fe006f3f12fa..b21b7d33357e2 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -335,6 +335,19 @@ config HW_RANDOM_POWERNV If unsure, say Y. +config HW_RANDOM_STARFIVE_VIC + tristate "Starfive VIC Random Number Generator support" + depends on HW_RANDOM + default y if SOC_STARFIVE_VIC7100 + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Starfive VIC SoC. + + To compile this driver as a module, choose M here: the + module will be called starfive-vic-rng. + + If unsure, say Y. + config HW_RANDOM_HISI tristate "Hisilicon Random Number Generator support" depends on HW_RANDOM && ARCH_HISI diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 8933fada74f2f..9b959cfc1b308 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o +obj-$(CONFIG_HW_RANDOM_STARFIVE_VIC) += starfive-vic-rng.o obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o diff --git a/drivers/char/hw_random/starfive-vic-rng.c b/drivers/char/hw_random/starfive-vic-rng.c new file mode 100644 index 0000000000000..6142b6a7ace6b --- /dev/null +++ b/drivers/char/hw_random/starfive-vic-rng.c @@ -0,0 +1,256 @@ +/* + ****************************************************************************** + * @file starfive-vic-rng.c + * @author StarFive Technology + * @version V1.0 + * @date 08/13/2020 + * @brief + ****************************************************************************** + * @copy + * + * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS + * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE + * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY + * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING + * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE + * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. + * + * COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "starfive-vic-rng.h" + +#define to_vic_rng(p) container_of(p, struct vic_rng, rng) + +struct vic_rng { + struct device *dev; + void __iomem *base; + struct hwrng rng; +}; + +static inline void vic_wait_till_idle(struct vic_rng *hrng) +{ + while(readl(hrng->base + VIC_STAT) & VIC_STAT_BUSY) + ; +} + +static inline void vic_rng_irq_mask_clear(struct vic_rng *hrng) +{ + // clear register: ISTAT + u32 data = readl(hrng->base + VIC_ISTAT); + writel(data, hrng->base + VIC_ISTAT); + writel(0, hrng->base + VIC_ALARM); +} + +static int vic_trng_cmd(struct vic_rng *hrng, u32 cmd) { + int res = 0; + // wait till idle + vic_wait_till_idle(hrng); + switch (cmd) { + case VIC_CTRL_CMD_NOP: + case VIC_CTRL_CMD_GEN_NOISE: + case VIC_CTRL_CMD_GEN_NONCE: + case VIC_CTRL_CMD_CREATE_STATE: + case VIC_CTRL_CMD_RENEW_STATE: + case VIC_CTRL_CMD_REFRESH_ADDIN: + case VIC_CTRL_CMD_GEN_RANDOM: + case VIC_CTRL_CMD_ADVANCE_STATE: + case VIC_CTRL_CMD_KAT: + case VIC_CTRL_CMD_ZEROIZE: + writel(cmd, hrng->base + VIC_CTRL); + break; + default: + res = -1; + break; + } + + return res; +} + +static int vic_rng_init(struct hwrng *rng) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + // wait till idle + + // clear register: ISTAT + vic_rng_irq_mask_clear(hrng); + + // set mission mode + writel(VIC_SMODE_SECURE_EN(1), hrng->base + VIC_SMODE); + + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_NOISE); + vic_wait_till_idle(hrng); + + // set interrupt + writel(VIC_IE_ALL, hrng->base + VIC_IE); + + // zeroize + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + + vic_wait_till_idle(hrng); + + return 0; +} + +static irqreturn_t vic_rng_irq(int irq, void *priv) +{ + u32 status, val; + struct vic_rng *hrng = (struct vic_rng *)priv; + + /* + * clearing the interrupt will also clear the error register + * read error and status before clearing + */ + status = readl(hrng->base + VIC_ISTAT); + + if (status & VIC_ISTAT_ALARMS) { + writel(VIC_ISTAT_ALARMS, hrng->base + VIC_ISTAT); + val = readl(hrng->base + VIC_ALARM); + if (val & VIC_ALARM_ILLEGAL_CMD_SEQ) { + writel(VIC_ALARM_ILLEGAL_CMD_SEQ, hrng->base + VIC_ALARM); + //dev_info(hrng->dev, "ILLEGAL CMD SEQ: LAST_CMD=0x%x\r\n", + //VIC_STAT_LAST_CMD(readl(hrng->base + VIC_STAT))); + } else { + dev_info(hrng->dev, "Failed test: %x\r\n", val); + } + } + + if (status & VIC_ISTAT_ZEROIZE) { + writel(VIC_ISTAT_ZEROIZE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "zeroized\r\n"); + } + + if (status & VIC_ISTAT_KAT_COMPLETE) { + writel(VIC_ISTAT_KAT_COMPLETE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "kat_completed\r\n"); + } + + if (status & VIC_ISTAT_NOISE_RDY) { + writel(VIC_ISTAT_NOISE_RDY, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "noise_rdy\r\n"); + } + + if (status & VIC_ISTAT_DONE) { + writel(VIC_ISTAT_DONE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "done\r\n"); + /* + if (VIC_STAT_LAST_CMD(readl(hrng->base + VIC_STAT)) == + VIC_CTRL_CMD_GEN_RANDOM) { + dev_info(hrng->dev, "Need Update Buffer\r\n"); + } + */ + } + vic_rng_irq_mask_clear(hrng); + + return IRQ_HANDLED; +} + +static void vic_rng_cleanup(struct hwrng *rng) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + writel(0, hrng->base + VIC_CTRL); +} + +static int vic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_NOISE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_CREATE_STATE); + + vic_wait_till_idle(hrng); + max = min_t(size_t, max, (VIC_RAND_LEN * 4)); + + writel(0x0, hrng->base + VIC_MODE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_RANDOM); + + vic_wait_till_idle(hrng); + memcpy_fromio(buf, hrng->base + VIC_RAND0, max); + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + + vic_wait_till_idle(hrng); + return max; +} + +static int vic_rng_probe(struct platform_device *pdev) +{ + int ret; + int irq; + struct vic_rng *rng; + struct resource *res; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng){ + return -ENOMEM; + } + + platform_set_drvdata(pdev, rng); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rng->base)){ + return PTR_ERR(rng->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "Couldn't get irq %d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, vic_rng_irq, 0, pdev->name, + (void *)rng); + if (ret) { + dev_err(&pdev->dev, "Can't get interrupt working.\n"); + return ret; + } + + rng->rng.name = pdev->name; + rng->rng.init = vic_rng_init; + rng->rng.cleanup = vic_rng_cleanup; + rng->rng.read = vic_rng_read; + + rng->dev = &pdev->dev; + + ret = devm_hwrng_register(&pdev->dev, &rng->rng); + if (ret) { + dev_err(&pdev->dev, "failed to register hwrng\n"); + return ret; + } + + dev_info(&pdev->dev, "Initialized\n"); + + return 0; +} + +static const struct of_device_id vic_rng_dt_ids[] = { + { .compatible = "starfive,vic-rng" }, + { } +}; +MODULE_DEVICE_TABLE(of, vic_rng_dt_ids); + +static struct platform_driver vic_rng_driver = { + .probe = vic_rng_probe, + .driver = { + .name = "vic-rng", + .of_match_table = of_match_ptr(vic_rng_dt_ids), + }, +}; + +module_platform_driver(vic_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huan Feng "); +MODULE_DESCRIPTION("Starfive VIC random number generator driver"); diff --git a/drivers/char/hw_random/starfive-vic-rng.h b/drivers/char/hw_random/starfive-vic-rng.h new file mode 100644 index 0000000000000..b3bbabde0cfb1 --- /dev/null +++ b/drivers/char/hw_random/starfive-vic-rng.h @@ -0,0 +1,167 @@ +/* + ****************************************************************************** + * @file starfive-vic-rng.h + * @author StarFive Technology + * @version V1.0 + * @date 08/13/2020 + * @brief + ****************************************************************************** + * @copy + * + * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS + * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE + * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY + * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING + * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE + * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. + * + * COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd. + */ + +#define VIC_CTRL 0x00 +#define VIC_MODE 0x04 +#define VIC_SMODE 0x08 +#define VIC_STAT 0x0C +#define VIC_IE 0x10 +#define VIC_ISTAT 0x14 +#define VIC_ALARM 0x18 +#define VIC_BUILD_ID 0x1C +#define VIC_FEATURES 0x20 +#define VIC_RAND0 0x24 +#define VIC_NPA_DATA0 0x34 +#define VIC_SEED0 0x74 +#define VIC_IA_RDATA 0xA4 +#define VIC_IA_WDATA 0xA8 +#define VIC_IA_ADDR 0xAC +#define VIC_IA_CMD 0xB0 + +/* CTRL */ +#define VIC_CTRL_CMD_NOP 0 +#define VIC_CTRL_CMD_GEN_NOISE 1 +#define VIC_CTRL_CMD_GEN_NONCE 2 +#define VIC_CTRL_CMD_CREATE_STATE 3 +#define VIC_CTRL_CMD_RENEW_STATE 4 +#define VIC_CTRL_CMD_REFRESH_ADDIN 5 +#define VIC_CTRL_CMD_GEN_RANDOM 6 +#define VIC_CTRL_CMD_ADVANCE_STATE 7 +#define VIC_CTRL_CMD_KAT 8 +#define VIC_CTRL_CMD_ZEROIZE 15 + +/* MODE */ +#define _VIC_MODE_ADDIN_PRESENT 4 +#define _VIC_MODE_PRED_RESIST 3 +#define _VIC_MODE_KAT_SEL 2 +#define _VIC_MODE_KAT_VEC 1 +#define _VIC_MODE_SEC_ALG 0 + +#define VIC_MODE_ADDIN_PRESENT (1UL << _VIC_MODE_ADDIN_PRESENT) +#define VIC_MODE_PRED_RESIST (1UL << _VIC_MODE_PRED_RESIST) +#define VIC_MODE_KAT_SEL (1UL << _VIC_MODE_KAT_SEL) +#define VIC_MODE_KAT_VEC (1UL << _VIC_MODE_KAT_VEC) +#define VIC_MODE_SEC_ALG (1UL << _VIC_MODE_SEC_ALG) + +/* SMODE */ +#define _VIC_SMODE_MAX_REJECTS 2 +#define _VIC_SMODE_SECURE_EN 1 +#define _VIC_SMODE_NONCE 0 + +#define VIC_SMODE_MAX_REJECTS(x) ((x) << _VIC_SMODE_MAX_REJECTS) +#define VIC_SMODE_SECURE_EN(x) ((x) << _VIC_SMODE_SECURE_EN) +#define VIC_SMODE_NONCE (1UL << _VIC_SMODE_NONCE) + +/* STAT */ +#define _VIC_STAT_BUSY 31 +#define _VIC_STAT_DRBG_STATE 7 +#define _VIC_STAT_SECURE 6 +#define _VIC_STAT_NONCE_MODE 5 +#define _VIC_STAT_SEC_ALG 4 +#define _VIC_STAT_LAST_CMD 0 + +#define VIC_STAT_BUSY (1UL << _VIC_STAT_BUSY) +#define VIC_STAT_DRBG_STATE (1UL << _VIC_STAT_DRBG_STATE) +#define VIC_STAT_SECURE (1UL << _VIC_STAT_SECURE) +#define VIC_STAT_NONCE_MODE (1UL << _VIC_STAT_NONCE_MODE) +#define VIC_STAT_SEC_ALG (1UL << _VIC_STAT_SEC_ALG) +#define VIC_STAT_LAST_CMD(x) (((x) >> _VIC_STAT_LAST_CMD) & 0xF) + +/* IE */ +#define _VIC_IE_GLBL 31 +#define _VIC_IE_DONE 4 +#define _VIC_IE_ALARMS 3 +#define _VIC_IE_NOISE_RDY 2 +#define _VIC_IE_KAT_COMPLETE 1 +#define _VIC_IE_ZEROIZE 0 + +#define VIC_IE_GLBL (1UL << _VIC_IE_GLBL) +#define VIC_IE_DONE (1UL << _VIC_IE_DONE) +#define VIC_IE_ALARMS (1UL << _VIC_IE_ALARMS) +#define VIC_IE_NOISE_RDY (1UL << _VIC_IE_NOISE_RDY) +#define VIC_IE_KAT_COMPLETE (1UL << _VIC_IE_KAT_COMPLETE) +#define VIC_IE_ZEROIZE (1UL << _VIC_IE_ZEROIZE) +#define VIC_IE_ALL (VIC_IE_GLBL | VIC_IE_DONE | VIC_IE_ALARMS | \ + VIC_IE_NOISE_RDY | VIC_IE_KAT_COMPLETE | VIC_IE_ZEROIZE) + +/* ISTAT */ +#define _VIC_ISTAT_DONE 4 +#define _VIC_ISTAT_ALARMS 3 +#define _VIC_ISTAT_NOISE_RDY 2 +#define _VIC_ISTAT_KAT_COMPLETE 1 +#define _VIC_ISTAT_ZEROIZE 0 + +#define VIC_ISTAT_DONE (1UL << _VIC_ISTAT_DONE) +#define VIC_ISTAT_ALARMS (1UL << _VIC_ISTAT_ALARMS) +#define VIC_ISTAT_NOISE_RDY (1UL << _VIC_ISTAT_NOISE_RDY) +#define VIC_ISTAT_KAT_COMPLETE (1UL << _VIC_ISTAT_KAT_COMPLETE) +#define VIC_ISTAT_ZEROIZE (1UL << _VIC_ISTAT_ZEROIZE) + +/* ALARMS */ +#define VIC_ALARM_ILLEGAL_CMD_SEQ (1UL << 4) +#define VIC_ALARM_FAILED_TEST_ID_OK 0 +#define VIC_ALARM_FAILED_TEST_ID_KAT_STAT 1 +#define VIC_ALARM_FAILED_TEST_ID_KAT 2 +#define VIC_ALARM_FAILED_TEST_ID_MONOBIT 3 +#define VIC_ALARM_FAILED_TEST_ID_RUN 4 +#define VIC_ALARM_FAILED_TEST_ID_LONGRUN 5 +#define VIC_ALARM_FAILED_TEST_ID_AUTOCORRELATION 6 +#define VIC_ALARM_FAILED_TEST_ID_POKER 7 +#define VIC_ALARM_FAILED_TEST_ID_REPETITION_COUNT 8 +#define VIC_ALARM_FAILED_TEST_ID_ADAPATIVE_PROPORTION 9 + +/* BUILD_ID */ +#define VIC_BUILD_ID_STEPPING(x) (((x) >> 28) & 0xF) +#define VIC_BUILD_ID_EPN(x) ((x) & 0xFFFF) + +/* FEATURES */ +#define VIC_FEATURES_AES_256(x) (((x) >> 9) & 1) +#define VIC_FEATURES_EXTRA_PS_PRESENT(x) (((x) >> 8) & 1) +#define VIC_FEATURES_DIAG_LEVEL_NS(x) (((x) >> 7) & 1) +#define VIC_FEATURES_DIAG_LEVEL_CLP800(x) (((x) >> 4) & 7) +#define VIC_FEATURES_DIAG_LEVEL_ST_HLT(x) (((x) >> 1) & 7) +#define VIC_FEATURES_SECURE_RST_STATE(x) ((x) & 1) + +/* IA_CMD */ +#define VIC_IA_CMD_GO (1UL << 31) +#define VIC_IA_CMD_WR (1) + +#define _VIC_SMODE_MAX_REJECTS_MASK 255UL +#define _VIC_SMODE_SECURE_EN_MASK 1UL +#define _VIC_SMODE_NONCE_MASK 1UL +#define _VIC_MODE_SEC_ALG_MASK 1UL +#define _VIC_MODE_ADDIN_PRESENT_MASK 1UL +#define _VIC_MODE_PRED_RESIST_MASK 1UL + +#define VIC_SMODE_SET_MAX_REJECTS(y, x) (((y) & ~(_VIC_SMODE_MAX_REJECTS_MASK << _VIC_SMODE_MAX_REJECTS)) | ((x) << _VIC_SMODE_MAX_REJECTS)) +#define VIC_SMODE_SET_SECURE_EN(y, x) (((y) & ~(_VIC_SMODE_SECURE_EN_MASK << _VIC_SMODE_SECURE_EN)) | ((x) << _VIC_SMODE_SECURE_EN)) +#define VIC_SMODE_SET_NONCE(y, x) (((y) & ~(_VIC_SMODE_NONCE_MASK << _VIC_SMODE_NONCE)) | ((x) << _VIC_SMODE_NONCE)) +#define VIC_SMODE_GET_MAX_REJECTS(x) (((x) >> _VIC_SMODE_MAX_REJECTS) & _VIC_SMODE_MAX_REJECTS_MASK) +#define VIC_SMODE_GET_SECURE_EN(x) (((x) >> _VIC_SMODE_SECURE_EN) & _VIC_SMODE_SECURE_EN_MASK) +#define VIC_SMODE_GET_NONCE(x) (((x) >> _VIC_SMODE_NONCE) & _VIC_SMODE_NONCE_MASK) + +#define VIC_MODE_SET_SEC_ALG(y, x) (((y) & ~(_VIC_MODE_SEC_ALG_MASK << _VIC_MODE_SEC_ALG)) | ((x) << _VIC_MODE_SEC_ALG)) +#define VIC_MODE_SET_PRED_RESIST(y, x) (((y) & ~(_VIC_MODE_PRED_RESIST_MASK << _VIC_MODE_PRED_RESIST)) | ((x) << _VIC_MODE_PRED_RESIST)) +#define VIC_MODE_SET_ADDIN_PRESENT(y, x) (((y) & ~(_VIC_MODE_ADDIN_PRESENT_MASK << _VIC_MODE_ADDIN_PRESENT)) | ((x) << _VIC_MODE_ADDIN_PRESENT)) +#define VIC_MODE_GET_SEC_ALG(x) (((x) >> _VIC_MODE_SEC_ALG) & _VIC_MODE_SEC_ALG_MASK) +#define VIC_MODE_GET_PRED_RESIST(x) (((x) >> _VIC_MODE_PRED_RESIST) & _VIC_MODE_PRED_RESIST_MASK) +#define VIC_MODE_GET_ADDIN_PRESENT(x) (((x) >> _VIC_MODE_ADDIN_PRESENT) & _VIC_MODE_ADDIN_PRESENT_MASK) + +#define VIC_RAND_LEN 4 From ff732cbce1153c83367b0d3a4adc74f6c23357d6 Mon Sep 17 00:00:00 2001 From: Samin Guo Date: Fri, 5 Feb 2021 11:29:44 +0800 Subject: [PATCH 12/54] hwmon: (sfctemp) Add Starfive JH7100 temperature sensor Signed-off-by: Emil Renner Berthing --- drivers/hwmon/Kconfig | 9 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/sfctemp.c | 322 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 332 insertions(+) create mode 100644 drivers/hwmon/sfctemp.c diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 87624902ea809..410dbb7661b51 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1751,6 +1751,15 @@ config SENSORS_STTS751 This driver can also be built as a module. If so, the module will be called stts751. +config SENSORS_SFCTEMP + tristate "Starfive JH7100 temperature sensor" + help + If you say yes here you get support for tmperature sensor + on the Starfive JH7100 SoC. + + This driver can also be built as a module. If so, the module + will be called sfctemp. + config SENSORS_SMM665 tristate "Summit Microelectronics SMM665" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 59e78bc212cf3..3723eb580bf3e 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -167,6 +167,7 @@ obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o +obj-$(CONFIG_SENSORS_SFCTEMP) += sfctemp.o obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SHT21) += sht21.o diff --git a/drivers/hwmon/sfctemp.c b/drivers/hwmon/sfctemp.c new file mode 100644 index 0000000000000..1a6ad39909aa5 --- /dev/null +++ b/drivers/hwmon/sfctemp.c @@ -0,0 +1,322 @@ +/* + * Copyright (C) 2021 Samin Guo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* TempSensor reset. The RSTN can be de-asserted once the analog core has + * powered up. Trst(min 100ns) + * 0:reset 1:de-assert */ +#define SFCTEMP_RSTN BIT(0) + +/* TempSensor analog core power down. The analog core will be powered up + * Tpu(min 50us) after PD is de-asserted. RSTN should be held low until the + * analog core is powered up. + * 0:power up 1:power down */ +#define SFCTEMP_PD BIT(1) + +/* TempSensor start conversion enable. + * 0:disable 1:enable */ +#define SFCTEMP_RUN BIT(2) + +/* TempSensor calibration mode enable. + * 0:disable 1:enable */ +#define SFCTEMP_CAL BIT(4) + +/* TempSensor signature enable. Generate a toggle value outputting on DOUT for + * test purpose. + * 0:disable 1:enable */ +#define SFCTEMP_SGN BIT(5) + +/* TempSensor test access control. + * 0000:normal 0001:Test1 0010:Test2 0011:Test3 + * 0100:Test4 1000:Test8 1001:Test9 */ +#define SFCTEMP_TM_Pos 12 +#define SFCTEMP_TM_Msk GENMASK(15, 12) + +/* TempSensor conversion value output. + * Temp(c)=DOUT*Y/4094 - K */ +#define SFCTEMP_DOUT_Pos 16 +#define SFCTEMP_DOUT_Msk GENMASK(27, 16) + +/* TempSensor digital test output. */ +#define SFCTEMP_DIGO BIT(31) + +/* DOUT to Celcius conversion constants */ +#define SFCTEMP_Y1000 237500L +#define SFCTEMP_Z 4094L +#define SFCTEMP_K1000 81100L + +struct sfctemp { + struct mutex lock; + struct completion conversion_done; + void __iomem *regs; + u32 dout; + bool enabled; +}; + +static irqreturn_t sfctemp_isr(int irq, void *data) +{ + struct sfctemp *sfctemp = data; + + sfctemp->dout = readl(sfctemp->regs); + writel(SFCTEMP_RSTN, sfctemp->regs); + complete(&sfctemp->conversion_done); + return IRQ_HANDLED; +} + +static void sfctemp_power_up(struct sfctemp *sfctemp) +{ + writel(SFCTEMP_PD, sfctemp->regs); + udelay(1); + + writel(0, sfctemp->regs); + /* wait t_pu(50us) + t_rst(100ns) */ + usleep_range(60, 200); + + writel(SFCTEMP_RSTN, sfctemp->regs); + /* wait t_su(500ps) */ + udelay(1); +} + +static void sfctemp_power_down(struct sfctemp *sfctemp) +{ + writel(SFCTEMP_RSTN, sfctemp->regs); + udelay(1); + + writel(SFCTEMP_PD, sfctemp->regs); + udelay(1); +} + +static void sfctemp_run(struct sfctemp *sfctemp) +{ + writel(SFCTEMP_RSTN | SFCTEMP_RUN, sfctemp->regs); +} + +static int sfctemp_enable(struct sfctemp *sfctemp) +{ + mutex_lock(&sfctemp->lock); + if (sfctemp->enabled) + goto done; + + sfctemp_power_up(sfctemp); + sfctemp->enabled = true; +done: + mutex_unlock(&sfctemp->lock); + return 0; +} + +static int sfctemp_disable(struct sfctemp *sfctemp) +{ + mutex_lock(&sfctemp->lock); + if (!sfctemp->enabled) + goto done; + + sfctemp_power_down(sfctemp); + sfctemp->enabled = false; +done: + mutex_unlock(&sfctemp->lock); + return 0; +} + +static int sfctemp_convert(struct sfctemp *sfctemp, long *val) +{ + long ret; + + mutex_lock(&sfctemp->lock); + if (!sfctemp->enabled) { + ret = -ENODATA; + goto out; + } + + sfctemp_run(sfctemp); + + ret = wait_for_completion_interruptible_timeout(&sfctemp->conversion_done, + msecs_to_jiffies(10)); + if (ret < 0) + goto out; + + /* calculate temperature in milli Celcius */ + *val = (long)((sfctemp->dout & SFCTEMP_DOUT_Msk) >> SFCTEMP_DOUT_Pos) + * SFCTEMP_Y1000 / SFCTEMP_Z - SFCTEMP_K1000; + + ret = 0; +out: + mutex_unlock(&sfctemp->lock); + return ret; +} + +static umode_t sfctemp_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + return 0644; + case hwmon_temp_input: + return 0444; + } + return 0; + default: + return 0; + } +} + +static int sfctemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + *val = sfctemp->enabled; + return 0; + case hwmon_temp_input: + return sfctemp_convert(sfctemp, val); + } + return -EINVAL; + default: + return -EINVAL; + } +} + +static int sfctemp_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + if (val == 0) + return sfctemp_disable(sfctemp); + if (val == 1) + return sfctemp_enable(sfctemp); + break; + } + return -EINVAL; + default: + return -EINVAL; + } +} + +static const struct hwmon_channel_info *sfctemp_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT), + NULL +}; + +static const struct hwmon_ops sfctemp_hwmon_ops = { + .is_visible = sfctemp_is_visible, + .read = sfctemp_read, + .write = sfctemp_write, +}; + +static const struct hwmon_chip_info sfctemp_chip_info = { + .ops = &sfctemp_hwmon_ops, + .info = sfctemp_info, +}; + +static int sfctemp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *hwmon_dev; + struct resource *mem; + struct sfctemp *sfctemp; + long val; + int ret; + + sfctemp = devm_kzalloc(dev, sizeof(*sfctemp), GFP_KERNEL); + if (!sfctemp) + return -ENOMEM; + + dev_set_drvdata(dev, sfctemp); + + mutex_init(&sfctemp->lock); + init_completion(&sfctemp->conversion_done); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sfctemp->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(sfctemp->regs)) + return PTR_ERR(sfctemp->regs); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + ret = devm_request_irq(dev, ret, sfctemp_isr, + IRQF_SHARED, pdev->name, sfctemp); + if (ret) { + dev_err(dev, "request irq failed: %d\n", ret); + return ret; + } + + ret = sfctemp_enable(sfctemp); + if (ret) + return ret; + + hwmon_dev = hwmon_device_register_with_info(dev, pdev->name, sfctemp, + &sfctemp_chip_info, NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + /* do a conversion to check everything works */ + ret = sfctemp_convert(sfctemp, &val); + if (ret) { + hwmon_device_unregister(hwmon_dev); + return ret; + } + + dev_info(dev, "%ld.%03ld C\n", val / 1000, val % 1000); + return 0; +} + +static int sfctemp_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + hwmon_device_unregister(dev); + return sfctemp_disable(sfctemp); +} + +static const struct of_device_id sfctemp_of_match[] = { + { .compatible = "sfc,tempsensor" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, sfctemp_of_match); + +static struct platform_driver sfctemp_driver = { + .driver = { + .name = "sfctemp", + .of_match_table = of_match_ptr(sfctemp_of_match), + }, + .probe = sfctemp_probe, + .remove = sfctemp_remove, +}; +module_platform_driver(sfctemp_driver); + +MODULE_AUTHOR("Samin Guo"); +MODULE_DESCRIPTION("Starfive JH7100 temperature sensor driver"); +MODULE_LICENSE("GPL"); From 4ea4f7ab73636e84c89eb2277d0796a71bb416be Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:54:51 +0800 Subject: [PATCH 13/54] sifive/sifive_l2_cache: Add sifive_l2_flush64_range function --- drivers/soc/sifive/Kconfig | 15 ++++++++++ drivers/soc/sifive/sifive_l2_cache.c | 41 +++++++++++++++++++++++++++- include/soc/sifive/sifive_l2_cache.h | 4 +++ 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 58cf8c40d08d5..4d0fdab56e81a 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -7,4 +7,19 @@ config SIFIVE_L2 help Support for the L2 cache controller on SiFive platforms. +config SIFIVE_L2_FLUSH + bool "Support Level 2 Cache Controller Flush operation of SiFive Soc" + +if SIFIVE_L2_FLUSH + +config SIFIVE_L2_FLUSH_START + hex "Level 2 Cache Flush operation start" + default 0x80000000 + +config SIFIVE_L2_FLUSH_SIZE + hex "Level 2 Cache Flush operation size" + default 0x800000000 + +endif # SIFIVE_L2_FLUSH + endif diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 59640a1d0b28a..0b9e9e852ee44 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -29,13 +29,17 @@ #define SIFIVE_L2_DATECCFAIL_HIGH 0x164 #define SIFIVE_L2_DATECCFAIL_COUNT 0x168 +#define SIFIVE_L2_FLUSH64 0x200 + #define SIFIVE_L2_CONFIG 0x00 #define SIFIVE_L2_WAYENABLE 0x08 #define SIFIVE_L2_ECCINJECTERR 0x40 #define SIFIVE_L2_MAX_ECCINTR 4 -static void __iomem *l2_base; +#define SIFIVE_L2_FLUSH64_LINE_LEN 64 + +static void __iomem *l2_base = NULL; static int g_irq[SIFIVE_L2_MAX_ECCINTR]; static struct riscv_cacheinfo_ops l2_cache_ops; @@ -116,6 +120,41 @@ int unregister_sifive_l2_error_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier); +#ifdef CONFIG_SIFIVE_L2_FLUSH +void sifive_l2_flush64_range(unsigned long start, unsigned long len) +{ + unsigned long line; + + if(!l2_base) { + pr_warn("L2CACHE: base addr invalid, skipping flush\n"); + return; + } + + /* TODO: if (len == 0), skipping flush or going on? */ + if(!len) { + pr_debug("L2CACHE: flush64 range @ 0x%lx(len:0)\n", start); + return; + } + + /* make sure the address is in the range */ + if(start < CONFIG_SIFIVE_L2_FLUSH_START || + (start + len) > (CONFIG_SIFIVE_L2_FLUSH_START + + CONFIG_SIFIVE_L2_FLUSH_SIZE)) { + pr_warn("L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", + start, len); + return; + } + + mb(); /* sync */ + for (line = start; line < start + len; + line += SIFIVE_L2_FLUSH64_LINE_LEN) { + writeq(line, l2_base + SIFIVE_L2_FLUSH64); + mb(); + } +} +EXPORT_SYMBOL_GPL(sifive_l2_flush64_range); +#endif + static int l2_largest_wayenabled(void) { return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF; diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h index 92ade10ed67e9..dd3e56787d316 100644 --- a/include/soc/sifive/sifive_l2_cache.h +++ b/include/soc/sifive/sifive_l2_cache.h @@ -7,6 +7,10 @@ #ifndef __SOC_SIFIVE_L2_CACHE_H #define __SOC_SIFIVE_L2_CACHE_H +#ifdef CONFIG_SIFIVE_L2_FLUSH +extern void sifive_l2_flush64_range(unsigned long start, unsigned long len); +#endif + extern int register_sifive_l2_error_notifier(struct notifier_block *nb); extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); From 841ff8c53a26e92247f80a58f1f69690105649dc Mon Sep 17 00:00:00 2001 From: Tom Date: Mon, 15 Feb 2021 23:59:46 +0800 Subject: [PATCH 14/54] sifive/sifive_l2_cache: Add Starfive support --- drivers/soc/sifive/sifive_l2_cache.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 0b9e9e852ee44..5f2b295fc5efd 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -103,6 +103,7 @@ static void l2_config_read(void) static const struct of_device_id sifive_l2_ids[] = { { .compatible = "sifive,fu540-c000-ccache" }, { .compatible = "sifive,fu740-c000-ccache" }, + { .compatible = "starfive,ccache0" }, { /* end of table */ }, }; From a9e28051a3cb25bf031b6956ae2eb5f7f3d6516e Mon Sep 17 00:00:00 2001 From: Tom Date: Sat, 13 Feb 2021 22:25:17 +0800 Subject: [PATCH 15/54] sifive/sifive_l2_cache: Add disabling IRQ option (workaround) --- drivers/irqchip/irq-sifive-plic.c | 41 ++++++++++++++++++++++++++++ drivers/soc/sifive/Kconfig | 4 +++ drivers/soc/sifive/sifive_l2_cache.c | 8 ++++++ 3 files changed, 53 insertions(+) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 97d4d04b0a80e..63a8dea3fae5d 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -273,6 +273,44 @@ static int plic_starting_cpu(unsigned int cpu) return 0; } +#if IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +#define SIFIVE_L2_MAX_ECCINTR 4 +#else +#define SIFIVE_L2_MAX_ECCINTR 3 +#endif +static const struct of_device_id sifive_l2_ids[] = { + { .compatible = "sifive,fu540-c000-ccache" }, + { .compatible = "starfive,ccache0" }, + { /* end of table */ }, +}; + +static void sifive_l2_irq_disable(struct plic_handler *handler) +{ + int i, irq; + struct of_phandle_args oirq; + + struct device_node *np = of_find_matching_node(NULL, sifive_l2_ids); + if (!np) { + pr_err("Can't get L2 cache device node.\n"); + return; + } + + for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) { + if (!of_irq_parse_one(np, i, &oirq)) { + irq = *oirq.args; + if (irq) { + pr_info("disable L2 cache irq %d in plic\n", irq); + plic_toggle(handler, irq, 0); + continue; + } + } + pr_err("Can't get L2 cache irq(#%d).\n", i); + } +} +#endif + + static int __init plic_init(struct device_node *node, struct device_node *parent) { @@ -366,6 +404,9 @@ static int __init plic_init(struct device_node *node, done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); +#if IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) + sifive_l2_irq_disable(handler); +#endif nr_handlers++; } diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 4d0fdab56e81a..4cccaad9e943b 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -22,4 +22,8 @@ config SIFIVE_L2_FLUSH_SIZE endif # SIFIVE_L2_FLUSH +config SIFIVE_L2_IRQ_DISABLE + bool "Disable Level 2 Cache Controller interrupts" + default y if SOC_STARFIVE_VIC7100 + endif diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 5f2b295fc5efd..be4e141f5a0ea 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -40,7 +40,9 @@ #define SIFIVE_L2_FLUSH64_LINE_LEN 64 static void __iomem *l2_base = NULL; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) static int g_irq[SIFIVE_L2_MAX_ECCINTR]; +#endif static struct riscv_cacheinfo_ops l2_cache_ops; enum { @@ -188,6 +190,7 @@ static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_le return NULL; } +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) static irqreturn_t l2_int_handler(int irq, void *device) { unsigned int add_h, add_l; @@ -231,12 +234,15 @@ static irqreturn_t l2_int_handler(int irq, void *device) return IRQ_HANDLED; } +#endif static int __init sifive_l2_init(void) { struct device_node *np; struct resource res; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) int i, rc, intr_num; +#endif np = of_find_matching_node(NULL, sifive_l2_ids); if (!np) @@ -249,6 +255,7 @@ static int __init sifive_l2_init(void) if (!l2_base) return -ENOMEM; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) intr_num = of_property_count_u32_elems(np, "interrupts"); if (!intr_num) { pr_err("L2CACHE: no interrupts property\n"); @@ -263,6 +270,7 @@ static int __init sifive_l2_init(void) return rc; } } +#endif l2_config_read(); From b71b73fdab28419957e422c3f41d7a6e74a02945 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 21 May 2021 08:35:33 +0200 Subject: [PATCH 16/54] sifive/sifive_l2_cache: Print a backtrace on out-of-range flushes This makes it easier to find out which driver passes a wrong address range. Signed-off-by: Geert Uytterhoeven --- drivers/soc/sifive/sifive_l2_cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index be4e141f5a0ea..626b664547e51 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -143,8 +143,8 @@ void sifive_l2_flush64_range(unsigned long start, unsigned long len) if(start < CONFIG_SIFIVE_L2_FLUSH_START || (start + len) > (CONFIG_SIFIVE_L2_FLUSH_START + CONFIG_SIFIVE_L2_FLUSH_SIZE)) { - pr_warn("L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", - start, len); + WARN(1, "L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", + start, len); return; } From 9c32e9e346590df9697b7d7a0a0d42b73962facc Mon Sep 17 00:00:00 2001 From: Chenjieqin Date: Fri, 8 Jan 2021 03:56:54 +0800 Subject: [PATCH 17/54] drivers/pwm: Add SiFive PWM PTC driver --- drivers/pwm/Kconfig | 10 ++ drivers/pwm/Makefile | 1 + drivers/pwm/pwm-sifive-ptc.c | 290 +++++++++++++++++++++++++++++++++++ 3 files changed, 301 insertions(+) create mode 100644 drivers/pwm/pwm-sifive-ptc.c diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index c76adedd58c9f..e25c22b3ff195 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -491,6 +491,16 @@ config PWM_SIFIVE To compile this driver as a module, choose M here: the module will be called pwm-sifive. +config PWM_SIFIVE_PTC + tristate "SiFive PWM PTC support" + depends on OF + depends on COMMON_CLK + help + Generic PWM framework driver for SiFive SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-sifive-ptc. + config PWM_SL28CPLD tristate "Kontron sl28cpld PWM support" depends on MFD_SL28CPLD || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 708840b7fba8d..3ad7903fd7fe2 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o +obj-$(CONFIG_PWM_SIFIVE_PTC) += pwm-sifive-ptc.o obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o diff --git a/drivers/pwm/pwm-sifive-ptc.c b/drivers/pwm/pwm-sifive-ptc.c new file mode 100644 index 0000000000000..e510181b32bb2 --- /dev/null +++ b/drivers/pwm/pwm-sifive-ptc.c @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2018 SiFive, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2, as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define PTC_DEBUG 0 + +/* max channel of pwm */ +#define MAX_PWM 8 + +/* PTC Register offsets */ +#define REG_RPTC_CNTR 0x0 +#define REG_RPTC_HRC 0x4 +#define REG_RPTC_LRC 0x8 +#define REG_RPTC_CTRL 0xC + +/* Bit for PWM clock */ +#define BIT_PWM_CLOCK_EN 31 + +/* Bit for clock gen soft reset */ +#define BIT_CLK_GEN_SOFT_RESET 13 + +#define NS_1 1000000000 + +/* Access PTC register (cntr hrc lrc and ctrl) ,need to replace PWM_BASE_ADDR */ +#define REG_PTC_BASE_ADDR_SUB(base, N) ((base) + ((N>3)?((N-4)*0x10+(1<<15)):(N*0x10))) +#define REG_PTC_RPTC_CNTR(base,N) (REG_PTC_BASE_ADDR_SUB(base,N)) +#define REG_PTC_RPTC_HRC(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0x4) +#define REG_PTC_RPTC_LRC(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0x8) +#define REG_PTC_RPTC_CTRL(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0xC) + +/* pwm ptc device */ +struct sifive_pwm_ptc_device { + struct pwm_chip chip; + struct clk *clk; + void __iomem *regs; + int irq; + /* apb clock frequency , from dts */ + unsigned int approx_period; +}; + +static inline struct sifive_pwm_ptc_device *chip_to_sifive_ptc(struct pwm_chip *c) +{ + return container_of(c, struct sifive_pwm_ptc_device, chip); +} + + +static void sifive_pwm_ptc_get_state(struct pwm_chip *chip, struct pwm_device *dev, struct pwm_state *state) +{ + struct sifive_pwm_ptc_device *pwm = chip_to_sifive_ptc(chip); + uint32_t data_lrc; + uint32_t data_hrc; + uint32_t pwm_clk_ns = 0; + + /* get lrc and hrc data from registe*/ + data_lrc = ioread32(REG_PTC_RPTC_LRC(pwm->regs, dev->hwpwm)); + data_hrc = ioread32(REG_PTC_RPTC_HRC(pwm->regs, dev->hwpwm)); + + /* how many ns does apb clock elapse */ + pwm_clk_ns = NS_1 / pwm->approx_period; + + /* pwm period(ns) */ + state->period = data_lrc*pwm_clk_ns; + + /* duty cycle(ns) ,means high level eclapse ns if it is normal polarity */ + state->duty_cycle = data_hrc*pwm_clk_ns; + + /* polarity,we don't use it now because it is not in dts */ + state->polarity = PWM_POLARITY_NORMAL; + + /* enabled or not */ + state->enabled = 1; +#ifdef PTC_DEBUG + printk("sifive_pwm_ptc_get_state in,no:%d....\r\n",dev->hwpwm); + printk("data_hrc:0x%x 0x%x \n", data_hrc, data_lrc); + printk("period:%llu\r\n",state->period); + printk("duty_cycle:%llu\r\n",state->duty_cycle); + printk("polarity:%d\r\n",state->polarity); + printk("enabled:%d\r\n",state->enabled); +#endif +} + + +static int sifive_pwm_ptc_apply(struct pwm_chip *chip, struct pwm_device *dev, struct pwm_state *state) +{ + struct sifive_pwm_ptc_device *pwm = chip_to_sifive_ptc(chip); + uint32_t pwm_clk_ns = 0; + uint32_t data_hrc = 0; + uint32_t data_lrc = 0; + uint32_t period_data = 0; + uint32_t duty_data = 0; + void __iomem* reg_addr; + +#if PTC_DEBUG + printk("sifive_pwm_ptc_apply in,no:%d....\r\n",dev->hwpwm); + printk("set parameter......\r\n"); + printk("period:%d\r\n",state->period); + printk("duty_cycle:%d\r\n",state->duty_cycle); + printk("polarity:%d\r\n",state->polarity); + printk("enabled:%d\r\n",state->enabled); +#endif + /* duty_cycle should be less or equal than period */ + if(state->duty_cycle > state->period) + state->duty_cycle = state->period; + + /* calculate pwm real period (ns) */ + pwm_clk_ns = NS_1 / pwm->approx_period; + +#if PTC_DEBUG + printk("approx_period,:%d,pwm_clk_ns:%d\r\n",pwm->approx_period,pwm_clk_ns); +#endif + + /* calculate period count */ + period_data = state->period / pwm_clk_ns; + + if (!state->enabled) + /* if is unenable,just set duty_dat to 0 , means low level always */ + duty_data = 0; + else + /* calculate duty count*/ + duty_data = state->duty_cycle / pwm_clk_ns; + +#if PTC_DEBUG + printk("period_data:%d,duty_data:%d\r\n",period_data,duty_data); +#endif + + if(state->polarity == PWM_POLARITY_NORMAL) + /* calculate data_hrc */ + data_hrc = period_data - duty_data; + else + /* calculate data_hrc */ + data_hrc = duty_data; + + data_lrc = period_data; + + /* set hrc */ + reg_addr = REG_PTC_RPTC_HRC(pwm->regs, dev->hwpwm); +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]reg_addr:0x%lx,data:%d....\n",reg_addr,data_hrc); +#endif + iowrite32(data_hrc, reg_addr); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]hrc ok....\n"); +#endif + + /* set lrc */ + reg_addr = REG_PTC_RPTC_LRC(pwm->regs, dev->hwpwm); +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]reg_addr:0x%lx,data:%d....\n",reg_addr,data_lrc); +#endif + + iowrite32(data_lrc, reg_addr); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]lrc ok....\n"); +#endif + + return 0; +} + + + +static const struct pwm_ops sifive_pwm_ptc_ops = { + .get_state = sifive_pwm_ptc_get_state, + .apply = (void *)sifive_pwm_ptc_apply, + .owner = THIS_MODULE, +}; + + + + +static int sifive_pwm_ptc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct sifive_pwm_ptc_device *pwm; + struct pwm_chip *chip; + struct resource *res; + int ret; + +#if PTC_DEBUG + printk("sifive_pwm_ptc_probe in....\r\n"); +#endif + pwm = devm_kzalloc(dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) { + dev_err(dev, "Out of memory\n"); + return -ENOMEM; + } + + chip = &pwm->chip; + chip->dev = dev; + chip->ops = &sifive_pwm_ptc_ops; + + /* how many parameters can be transfered to ptc,need to fix */ + chip->of_pwm_n_cells = 3; + chip->base = -1; + + /* get pwm channels count, max value is 8 */ + ret = of_property_read_u32(node, "starfive,npwm", &chip->npwm); + if (ret < 0 || chip->npwm > MAX_PWM) + chip->npwm = MAX_PWM; + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] npwm:0x%lx....\r\n",chip->npwm); +#endif + /* get apb clock frequency */ + ret = of_property_read_u32(node, "sifive,approx-period", &pwm->approx_period); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] approx_period:%d....\r\n",pwm->approx_period); +#endif + /* get IO base address*/ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] res start:0x%lx,end:0x%lx....\r\n",res->start,res->end); +#endif + pwm->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(pwm->regs)) + { + dev_err(dev, "Unable to map IO resources\n"); + return PTR_ERR(pwm->regs); + } + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] regs:0x%lx....\r\n",pwm->regs); +#endif + + pwm->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pwm->clk)) { + dev_err(dev, "Unable to find controller clock\n"); + return PTR_ERR(pwm->clk); + } + + /* after add,it will display as /sys/class/pwm/pwmchip0,0 is chip->base + * after execute echo 0 > export in , pwm0 can be seen */ + ret = pwmchip_add(chip); + if (ret < 0) { + dev_err(dev, "cannot register PTC: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, pwm); + +#if PTC_DEBUG + printk("SiFive PWM PTC chip registered %d PWMs\n", chip->npwm); +#endif + + return 0; +} + +static int sifive_pwm_ptc_remove(struct platform_device *dev) +{ + struct sifive_pwm_ptc_device *pwm = platform_get_drvdata(dev); + struct pwm_chip *chip = &pwm->chip; + + return pwmchip_remove(chip); +} + +static const struct of_device_id sifive_pwm_ptc_of_match[] = { + { .compatible = "sifive,pwm0" }, + { .compatible = "starfive,pwm0" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sifive_pwm_ptc_of_match); + +static struct platform_driver sifive_pwm_ptc_driver = { + .probe = sifive_pwm_ptc_probe, + .remove = sifive_pwm_ptc_remove, + .driver = { + .name = "pwm-sifive-ptc", + .of_match_table = of_match_ptr(sifive_pwm_ptc_of_match), + }, +}; +module_platform_driver(sifive_pwm_ptc_driver); + +MODULE_DESCRIPTION("SiFive PWM PTC driver"); +MODULE_LICENSE("GPL v2"); From e2244153a123bb5e0111cb7bdd365f37dacdcc15 Mon Sep 17 00:00:00 2001 From: "yiming.li" Date: Tue, 16 Mar 2021 01:45:19 +0800 Subject: [PATCH 18/54] drivers/pwm/pwm-sifive-ptc: Clear PWM CNTR Clear CNTR of PWM after setting period & duty_cycle --- drivers/pwm/pwm-sifive-ptc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pwm/pwm-sifive-ptc.c b/drivers/pwm/pwm-sifive-ptc.c index e510181b32bb2..9f149a064ed86 100644 --- a/drivers/pwm/pwm-sifive-ptc.c +++ b/drivers/pwm/pwm-sifive-ptc.c @@ -167,6 +167,10 @@ static int sifive_pwm_ptc_apply(struct pwm_chip *chip, struct pwm_device *dev, s printk("[sifive_pwm_ptc_config]lrc ok....\n"); #endif + /* Clear REG_RPTC_CNTR after setting period & duty_cycle*/ + reg_addr = REG_PTC_RPTC_CNTR(pwm->regs, dev->hwpwm); + iowrite32(0, reg_addr); + return 0; } From 80969dd3e73409e456ffd8238d0d0701700253f6 Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:57:50 +0800 Subject: [PATCH 19/54] drivers/dma: Add dw-axi-dmac-starfive driver for VIC7100 --- drivers/dma/Kconfig | 7 + drivers/dma/Makefile | 1 + drivers/dma/dw-axi-dmac-starfive/Makefile | 2 + .../dw-axi-dmac-starfive-misc.c | 322 ++++++++++++++++++ .../starfive_dmaengine_memcpy.c | 287 ++++++++++++++++ .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 103 +++++- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 36 +- 7 files changed, 738 insertions(+), 20 deletions(-) create mode 100644 drivers/dma/dw-axi-dmac-starfive/Makefile create mode 100644 drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c create mode 100644 drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6ab9d9a488a6e..60f4e80b23f48 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -180,6 +180,13 @@ config DW_AXI_DMAC NOTE: This driver wasn't tested on 64 bit platform because of lack 64 bit platform with Synopsys DW AXI DMAC. +config DW_AXI_DMAC_STARFIVE + tristate "Synopsys DesignWare AXI DMA support for StarFive SOC" + depends on SOC_STARFIVE_VIC7100 + help + Enable support for Synopsys DesignWare AXI DMA controller. + NOTE: It's for StarFive SOC. + config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" depends on ARCH_EP93XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index aa69094e35470..7d332af8b96c6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ +obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += dw-axi-dmac-starfive/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o diff --git a/drivers/dma/dw-axi-dmac-starfive/Makefile b/drivers/dma/dw-axi-dmac-starfive/Makefile new file mode 100644 index 0000000000000..c30fd928982f9 --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += starfive_dmaengine_memcpy.o dw-axi-dmac-starfive-misc.o \ No newline at end of file diff --git a/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c b/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c new file mode 100644 index 0000000000000..a1189bbe1e5ba --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c @@ -0,0 +1,322 @@ +/* + * Copyright 2020 StarFive, Inc + * + * DW AXI dma driver for StarFive SoC VIC7100. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "dwaxidma" +#define AXIDMA_IOC_MAGIC 'A' +#define AXIDMA_IOCGETCHN _IO(AXIDMA_IOC_MAGIC, 0) +#define AXIDMA_IOCCFGANDSTART _IO(AXIDMA_IOC_MAGIC, 1) +#define AXIDMA_IOCGETSTATUS _IO(AXIDMA_IOC_MAGIC, 2) +#define AXIDMA_IOCRELEASECHN _IO(AXIDMA_IOC_MAGIC, 3) + +#define AXI_DMA_MAX_CHANS 20 + +#define DMA_CHN_UNUSED 0 +#define DMA_CHN_USED 1 +#define DMA_STATUS_UNFINISHED 0 +#define DMA_STATUS_FINISHED 1 + +/* for DEBUG*/ +//#define DW_DMA_CHECK_RESULTS +//#define DW_DMA_PRINT_MEM +//#define DW_DMA_FLUSH_DESC + +struct axidma_chncfg { + unsigned long src_addr; /*dma addr*/ + unsigned long dst_addr; /*dma addr*/ + unsigned long virt_src; /*mmap src addr*/ + unsigned long virt_dst; /*mmap dst addr*/ + unsigned long phys; /*desc phys addr*/ + unsigned int len; /*transport lenth*/ + int mem_fd; /*fd*/ + unsigned char chn_num; /*dma channels number*/ + unsigned char status; /*dma transport status*/ +}; + +struct axidma_chns { + struct dma_chan *dma_chan; + unsigned char used; + unsigned char status; + unsigned char reserve[2]; +}; + +struct axidma_chns channels[AXI_DMA_MAX_CHANS]; +#ifdef DW_DMA_PRINT_MEM +void print_in_line_u64(u8 *p_name, u64 *p_buf, u32 len) +{ + u32 i, j; + u32 line; + u32* ptmp; + u32 len_tmp; + u32 rest = len / 4; + + printk("%s: 0x%#llx, 0x%x\n", + p_name, dw_virt_to_phys((void *)p_buf), len); + + if(len >= 0x1000) + len_tmp = 0x1000 / 32; //print 128 size of memory. + else + len_tmp = len / 8; //print real 100% size of memory. + + rest = len / 4; //one line print 8 u32 + + for (i = 0; i < len_tmp; i += 4, rest -= line) { + if (!(i % 4)) + printk(KERN_CONT KERN_INFO" %#llx: ", + dw_virt_to_phys((void *)(p_buf + i))); + + ptmp = (u32*)(p_buf + i); + line = (rest > 8) ? 8 : rest; + + for (j = 0; j < line; j++) + printk(KERN_CONT KERN_INFO "%08x ", *(ptmp + j)); + + printk(KERN_CONT KERN_INFO"\n"); + } +} +#endif + +static int axidma_open(struct inode *inode, struct file *file) +{ + /*Open: do nothing*/ + return 0; +} + +static int axidma_release(struct inode *inode, struct file *file) +{ + /* Release: do nothing */ + return 0; +} + +static ssize_t axidma_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + /* Write: do nothing */ + return 0; +} + +static void dma_complete_func(void *status) +{ + *(char *)status = DMA_STATUS_FINISHED; +} + +static long axidma_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int i, ret; + dma_cap_mask_t mask; + dma_cookie_t cookie; + struct dma_device *dma_dev; + struct axidma_chncfg chncfg; + struct dma_async_tx_descriptor *tx; + +#ifdef DW_DMA_FLUSH_DESC + void *des_chncfg = &chncfg; + chncfg.phys = dw_virt_to_phys(des_chncfg); +#endif + memset(&chncfg, 0, sizeof(struct axidma_chncfg)); + + switch(cmd) { + case AXIDMA_IOCGETCHN: + for(i = 0; i < AXI_DMA_MAX_CHANS; i++) { + if(DMA_CHN_UNUSED == channels[i].used) + break; + } + if(AXI_DMA_MAX_CHANS == i) { + printk("Get dma chn failed, because no idle channel\n"); + goto error; + } else { + channels[i].used = DMA_CHN_USED; + channels[i].status = DMA_STATUS_UNFINISHED; + chncfg.status = DMA_STATUS_UNFINISHED; + chncfg.chn_num = i; + } + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + channels[i].dma_chan = dma_request_channel(mask, NULL, NULL); + if(!channels[i].dma_chan) { + printk("dma request channel failed\n"); + channels[i].used = DMA_CHN_UNUSED; + goto error; + } + ret = copy_to_user((void __user *)arg, &chncfg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy to user failed\n"); + goto error; + } + break; + case AXIDMA_IOCCFGANDSTART: +#ifdef DW_DMA_CHECK_RESULTS + void *src,*dst; +#endif + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if((chncfg.chn_num >= AXI_DMA_MAX_CHANS) || + (!channels[chncfg.chn_num].dma_chan)) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + dma_dev = channels[chncfg.chn_num].dma_chan->device; +#ifdef DW_DMA_FLUSH_DESC + starfive_flush_dcache(chncfg.phys,sizeof(chncfg)); +#endif +#ifdef DW_DMA_CHECK_RESULTS + src = dw_phys_to_virt(chncfg.src_addr); + dst = dw_phys_to_virt(chncfg.dst_addr); +#endif + starfive_flush_dcache(chncfg.src_addr, chncfg.len); + + tx = dma_dev->device_prep_dma_memcpy( + channels[chncfg.chn_num].dma_chan, + chncfg.dst_addr, chncfg.src_addr, chncfg.len, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if(!tx){ + printk("Failed to prepare DMA memcpy\n"); + goto error; + } + channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED; + tx->callback_param = &channels[chncfg.chn_num].status; + tx->callback = dma_complete_func; + cookie = tx->tx_submit(tx); + if(dma_submit_error(cookie)) { + printk("Failed to dma tx_submit\n"); + goto error; + } + dma_async_issue_pending(channels[chncfg.chn_num].dma_chan); + /*flush dcache*/ + starfive_flush_dcache(chncfg.dst_addr, chncfg.len); +#ifdef DW_DMA_PRINT_MEM + print_in_line_u64((u8 *)"src", (u64 *)src, chncfg.len); + print_in_line_u64((u8 *)"dst", (u64 *)dst, chncfg.len); +#endif +#ifdef DW_DMA_CHECK_RESULTS + if(memcmp(src, dst, chncfg.len)) + printk("check data faild.\n"); + else + printk("check data ok.\n"); +#endif + break; + + case AXIDMA_IOCGETSTATUS: + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if(chncfg.chn_num >= AXI_DMA_MAX_CHANS) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + + chncfg.status = channels[chncfg.chn_num].status; + + ret = copy_to_user((void __user *)arg, &chncfg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy to user failed\n"); + goto error; + } + break; + + case AXIDMA_IOCRELEASECHN: + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if((chncfg.chn_num >= AXI_DMA_MAX_CHANS) || + (!channels[chncfg.chn_num].dma_chan)) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + + dma_release_channel(channels[chncfg.chn_num].dma_chan); + channels[chncfg.chn_num].used = DMA_CHN_UNUSED; + channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED; + break; + + default: + printk("Don't support cmd [%d]\n", cmd); + break; + } + return 0; + +error: + return -EFAULT; +} + +/* + * Kernel Interfaces + */ +static struct file_operations axidma_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = axidma_write, + .unlocked_ioctl = axidma_unlocked_ioctl, + .open = axidma_open, + .release = axidma_release, +}; + +static struct miscdevice axidma_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &axidma_fops, +}; + +static int __init axidma_init(void) +{ + int ret = misc_register(&axidma_miscdev); + if(ret) { + printk (KERN_ERR "cannot register miscdev (err=%d)\n", ret); + return ret; + } + + memset(&channels, 0, sizeof(channels)); + + return 0; +} + +static void __exit axidma_exit(void) +{ + misc_deregister(&axidma_miscdev); +} + +module_init(axidma_init); +module_exit(axidma_exit); + +MODULE_AUTHOR("samin.guo"); +MODULE_DESCRIPTION("DW Axi Dmac Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c b/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c new file mode 100644 index 0000000000000..aee72c10d77fa --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c @@ -0,0 +1,287 @@ +/* + * Copyright 2020 StarFive, Inc + * + * API for dma mem2mem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static volatile int dma_finished = 0; +static DECLARE_WAIT_QUEUE_HEAD(wq); + +u64 dw_virt_to_phys(void *vaddr) +{ + u64 pfn_offset = ((u64)vaddr) & 0xfff; + + return _dw_virt_to_phys((u64 *)vaddr) + pfn_offset; +} +EXPORT_SYMBOL(dw_virt_to_phys); + +void *dw_phys_to_virt(u64 phys) +{ + u64 pfn_offset = phys & 0xfff; + + return (void *)(_dw_phys_to_virt(phys) + pfn_offset); +} +EXPORT_SYMBOL(dw_phys_to_virt); + +static void tx_callback(void *dma_async_param) +{ + dma_finished = 1; + wake_up_interruptible(&wq); +} + +static int _dma_async_alloc_buf(struct device *dma_dev, + void **src, void **dst, size_t size, + dma_addr_t *src_dma, dma_addr_t *dst_dma) +{ + *src = dma_alloc_coherent(dma_dev, size, src_dma, GFP_KERNEL); + if(!(*src)) { + DMA_DEBUG("src alloc err.\n"); + goto _FAILED_ALLOC_SRC; + } + + *dst = dma_alloc_coherent(dma_dev, size, dst_dma, GFP_KERNEL); + if(!(*dst)) { + DMA_DEBUG("dst alloc err.\n"); + goto _FAILED_ALLOC_DST; + } + + return 0; + +_FAILED_ALLOC_DST: + dma_free_coherent(dma_dev, size, *src, *src_dma); + +_FAILED_ALLOC_SRC: + dma_free_coherent(dma_dev, size, *dst, *dst_dma); + + return -1; +} + +static int _dma_async_prebuf(void *src, void *dst, size_t size) +{ + memset((u8 *)src, 0xff, size); + memset((u8 *)dst, 0x00, size); + return 0; +} + +static int _dma_async_check_data(void *src, void *dst, size_t size) +{ + return memcmp(src, dst, size); +} + +static void _dma_async_release(struct dma_chan *chan) +{ + dma_release_channel(chan); +} + +static struct dma_chan *_dma_get_channel(enum dma_transaction_type tx_type) +{ + dma_cap_mask_t dma_mask; + + dma_cap_zero(dma_mask); + dma_cap_set(tx_type, dma_mask); + + return dma_request_channel(dma_mask, NULL, NULL); +} + +static struct dma_async_tx_descriptor *_dma_async_get_desc( + struct dma_chan *chan, + dma_addr_t src_dma, dma_addr_t dst_dma, + size_t size) +{ + dma_finished = 0; + return dmaengine_prep_dma_memcpy(chan, dst_dma, src_dma, size, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +} + +static void _dma_async_do_start(struct dma_async_tx_descriptor *desc, + struct dma_chan *chan) +{ + dma_cookie_t dma_cookie = dmaengine_submit(desc); + if (dma_submit_error(dma_cookie)) + DMA_DEBUG("Failed to do DMA tx_submit\n"); + + dma_async_issue_pending(chan); + wait_event_interruptible(wq, dma_finished); +} + +int dw_dma_async_do_memcpy(void *src, void *dst, size_t size) +{ + int ret; + struct device *dma_dev; + struct dma_chan *chan; + dma_addr_t src_dma, dst_dma; + struct dma_async_tx_descriptor *desc; + + const struct iommu_ops *iommu; + u64 dma_addr = 0, dma_size = 0; + + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL); + if(!dma_dev){ + dev_err(dma_dev, "kmalloc error.\n"); + return -ENOMEM; + } + + dma_dev->bus = NULL; + dma_dev->coherent_dma_mask = 0xffffffff; + + iort_dma_setup(dma_dev, &dma_addr, &dma_size); + iommu = iort_iommu_configure_id(dma_dev, NULL); + if (PTR_ERR(iommu) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + arch_setup_dma_ops(dma_dev, dst_dma, dma_size, iommu, true); + + if(_dma_async_alloc_buf(dma_dev, &src, &dst, size, &src_dma, &dst_dma)) { + dev_err(dma_dev, "Err alloc.\n"); + return -ENOMEM; + } + + DMA_DEBUG("src=%#llx, dst=%#llx\n", (u64)src, (u64)dst); + DMA_DEBUG("dma_src=%#x dma_dst=%#x\n", (u32)src_dma, (u32)dst_dma); + + _dma_async_prebuf(src, dst, size); + + chan = _dma_get_channel(DMA_MEMCPY); + if(!chan ){ + DMA_PRINTK("Err get chan.\n"); + return -EBUSY; + } + DMA_DEBUG("get chan ok.\n"); + + desc = _dma_async_get_desc(chan, src_dma, dst_dma, size); + if(!desc){ + DMA_PRINTK("Err get desc.\n"); + dma_release_channel(chan); + return -ENOMEM; + } + DMA_DEBUG("get desc ok.\n"); + + desc->callback = tx_callback; + + starfive_flush_dcache(src_dma, size); + starfive_flush_dcache(dst_dma, size); + + _dma_async_do_start(desc, chan); + _dma_async_release(chan); + + ret = _dma_async_check_data(src, dst, size); + + dma_free_coherent(dma_dev, size, src, src_dma); + dma_free_coherent(dma_dev, size, dst, dst_dma); + + return ret; +} +EXPORT_SYMBOL(dw_dma_async_do_memcpy); + +/* +* phys addr for dma. +*/ +int dw_dma_memcpy_raw(dma_addr_t src_dma, dma_addr_t dst_dma, size_t size) +{ + struct dma_chan *chan; + struct device *dma_dev; + struct dma_async_tx_descriptor *desc; + + const struct iommu_ops *iommu; + u64 dma_addr = 0, dma_size = 0; + + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL); + if(!dma_dev){ + DMA_PRINTK("kmalloc error.\n"); + return -ENOMEM; + } + + dma_dev->bus = NULL; + dma_dev->coherent_dma_mask = 0xffffffff; + + iort_dma_setup(dma_dev, &dma_addr, &dma_size); + iommu = iort_iommu_configure_id(dma_dev, NULL); + if (PTR_ERR(iommu) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + arch_setup_dma_ops(dma_dev, dst_dma, dma_size, iommu, true); + + chan = _dma_get_channel(DMA_MEMCPY); + if(!chan){ + DMA_PRINTK("Error get chan.\n"); + return -EBUSY; + } + DMA_DEBUG("get chan ok.\n"); + + DMA_DEBUG("src_dma=%#llx, dst_dma=%#llx \n", src_dma, dst_dma); + desc = _dma_async_get_desc(chan, src_dma, dst_dma, size); + if(!desc){ + DMA_PRINTK("Error get desc.\n"); + dma_release_channel(chan); + return -ENOMEM; + } + DMA_DEBUG("get desc ok.\n"); + + desc->callback = tx_callback; + + starfive_flush_dcache(src_dma, size); + starfive_flush_dcache(dst_dma, size); + + _dma_async_do_start(desc, chan); + _dma_async_release(chan); + + return 0; +} +EXPORT_SYMBOL(dw_dma_memcpy_raw); + +/* +*virtl addr for cpu. +*/ +int dw_dma_memcpy(void *src, void *dst, size_t size) +{ + dma_addr_t src_dma, dst_dma; + + src_dma = dw_virt_to_phys(src); + dst_dma = dw_virt_to_phys(dst); + + dw_dma_memcpy_raw(src_dma, dst_dma, size); + return 0; +} +EXPORT_SYMBOL(dw_dma_memcpy); + +int dw_dma_mem2mem_test(void) +{ + int ret; + void *src = NULL; + void *dst = NULL; + size_t size = 256; + + ret = dw_dma_async_do_memcpy(src, dst, size); + if(ret){ + DMA_PRINTK("memcpy failed.\n"); + } else { + DMA_PRINTK("memcpy ok.\n"); + } + + return ret; +} diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index d9e4ac3edb4ea..fd1939eb821ec 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -32,6 +32,8 @@ #include "../dmaengine.h" #include "../virt-dma.h" +#include + /* * The set of bus widths supported by the DMA controller. DW AXI DMAC supports * master data bus width up to 512 bits (for both AXI master interfaces), but @@ -148,24 +150,43 @@ static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan) return axi_chan_ioread32(chan, CH_INTSTATUS); } +static inline bool axi_chan_get_nr8(struct axi_dma_chan *chan) +{ + return chan->chip->flag->nr_chan_8; +} + static inline void axi_chan_disable(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); - val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN_8); + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT_8); + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHEN_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } } static inline void axi_chan_enable(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | - BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN_8); + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT_8 | + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHEN_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } } static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) @@ -335,6 +356,7 @@ static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, struct axi_dma_desc *first) { + struct axi_dma_desc *desc; u32 priority = chan->chip->dw->hdata->priority[chan->id]; u32 reg, irq_mask; u8 lms = 0; /* Select AXI0 master for LLI fetching */ @@ -384,6 +406,23 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; axi_chan_irq_set(chan, irq_mask); + /*flush all the desc */ +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + if(chan->chip->flag->need_flush) { + /*flush fisrt desc*/ + starfive_flush_dcache(first->vd.tx.phys, sizeof(*first)); + + list_for_each_entry(desc, &first->xfer_list, xfer_list) { + starfive_flush_dcache(desc->vd.tx.phys, sizeof(*desc)); + + dev_dbg(chan->chip->dev, + "sar:%#llx dar:%#llx llp:%#llx ctl:0x%x:%08x\n", + desc->lli.sar, desc->lli.dar, desc->lli.llp, + desc->lli.ctl_hi, desc->lli.ctl_lo); + } + } +#endif + axi_chan_enable(chan); } @@ -1070,8 +1109,10 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) if (status & DWAXIDMAC_IRQ_ALL_ERR) axi_chan_handle_err(chan, status); - else if (status & DWAXIDMAC_IRQ_DMA_TRF) + else if (status & DWAXIDMAC_IRQ_DMA_TRF) { axi_chan_block_xfer_complete(chan); + dev_dbg(chip->dev, "axi_chan_block_xfer_complete.\n"); + } } /* Re-enable interrupts */ @@ -1126,10 +1167,17 @@ static int dma_chan_pause(struct dma_chan *dchan) spin_lock_irqsave(&chan->vc.lock, flags); - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)){ + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP_8); + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT_8 | + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP); + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP, val); + } do { if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) @@ -1152,11 +1200,17 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); - val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); - + if(axi_chan_get_nr8(chan)){ + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP_8); + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT_8); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT_8); + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP); + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP, val); + } chan->is_paused = false; } @@ -1248,6 +1302,13 @@ static int parse_device_properties(struct axi_dma_chip *chip) chip->dw->hdata->nr_channels = tmp; + if(chip->dw->hdata->nr_channels > 8){ + chip->flag->nr_chan_8 = true; +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + chip->flag->need_flush = true; +#endif + } + ret = device_property_read_u32(dev, "snps,dma-masters", &tmp); if (ret) return ret; @@ -1309,6 +1370,7 @@ static int dw_probe(struct platform_device *pdev) struct resource *mem; struct dw_axi_dma *dw; struct dw_axi_dma_hcfg *hdata; + struct dw_dma_flag *flag; u32 i; int ret; @@ -1324,9 +1386,14 @@ static int dw_probe(struct platform_device *pdev) if (!hdata) return -ENOMEM; + flag = devm_kzalloc(&pdev->dev, sizeof(*flag), GFP_KERNEL); + if (!flag) + return -ENOMEM; + chip->dw = dw; chip->dev = &pdev->dev; chip->dw->hdata = hdata; + chip->flag = flag; chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index b69897887c765..0e454a926a82c 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -5,6 +5,8 @@ * Synopsys DesignWare AXI DMA Controller driver. * * Author: Eugeniy Paltsev + * Samin.guo + * add support for (channels > 8). 2020. */ #ifndef _AXI_DMA_PLATFORM_H @@ -18,10 +20,17 @@ #include "../virt-dma.h" -#define DMAC_MAX_CHANNELS 8 +#define DMAC_MAX_CHANNELS 16 #define DMAC_MAX_MASTERS 2 #define DMAC_MAX_BLK_SIZE 0x200000 +struct dw_dma_flag { + bool nr_chan_8; +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + bool need_flush; +#endif +}; + struct dw_axi_dma_hcfg { u32 nr_channels; u32 nr_masters; @@ -68,6 +77,7 @@ struct axi_dma_chip { struct clk *core_clk; struct clk *cfgr_clk; struct dw_axi_dma *dw; + struct dw_dma_flag *flag; }; /* LLI == Linked List Item */ @@ -139,6 +149,15 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */ #define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */ #define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */ +#define DMAC_CHSUSP 0x018 /* R/W DMAC Channel suspend */ +#define DMAC_CHABORT 0x018 /* R/W DMAC Channel Abort */ + +#define DMAC_CHEN_8 0x018 /* R/W DMAC Channel Enable */ +#define DMAC_CHEN_L_8 0x018 /* R/W DMAC Channel Enable */ +#define DMAC_CHEN_H_8 0x01C /* R/W DMAC Channel Enable */ +#define DMAC_CHSUSP_8 0x020 /* R/W DMAC Channel Suspend */ +#define DMAC_CHABORT_8 0x028 /* R/W DMAC Channel Abort */ + #define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */ #define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */ #define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */ @@ -199,6 +218,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMAC_CHAN_SUSP_SHIFT 16 #define DMAC_CHAN_SUSP_WE_SHIFT 24 +#define DMAC_CHAN_ABORT_SHIFT 32 +#define DMAC_CHAN_ABORT_WE_SHIFT 40 + + +#define DMAC_CHAN_EN_SHIFT_8 0 +#define DMAC_CHAN_EN_WE_SHIFT_8 16 + +#define DMAC_CHAN_SUSP_SHIFT_8 0 +#define DMAC_CHAN_SUSP_WE_SHIFT_8 16 + +#define DMAC_CHAN_ABORT_SHIFT_8 0 +#define DMAC_CHAN_ABORT_WE_SHIFT_8 16 + /* CH_CTL_H */ #define CH_CTL_H_ARLEN_EN BIT(6) #define CH_CTL_H_ARLEN_POS 7 @@ -255,7 +287,7 @@ enum { #define CH_CTL_L_SRC_MAST BIT(0) /* CH_CFG_H */ -#define CH_CFG_H_PRIORITY_POS 17 +#define CH_CFG_H_PRIORITY_POS 15 #define CH_CFG_H_HS_SEL_DST_POS 4 #define CH_CFG_H_HS_SEL_SRC_POS 3 enum { From a7766666931ff0972a3ebca2fc19756ab865d76c Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 03:25:24 +0800 Subject: [PATCH 20/54] drivers/i2c: Improve Synopsys DesignWare I2C adapter driver for StarFive VIC7100 --- drivers/i2c/busses/i2c-designware-platdrv.c | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 4b37f28ec0c6c..904694a19e512 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -39,6 +39,21 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) return clk_get_rate(dev->clk)/1000; } +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +static u32 starfive_i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) +{ + u32 val; + + if(!device_property_read_u32(dev->dev, "clocks", &val)) { + dev_info(dev->dev, "Using 'clocks' : %u / 1000", val); + return (val / 1000); + } else { + dev_info(dev->dev, "Using the static setting value: 49500"); + return 49500; + } +} +#endif + #ifdef CONFIG_ACPI static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, @@ -271,6 +286,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + } else { + u64 clk_khz; + + dev->get_clk_rate_khz = starfive_i2c_dw_get_clk_rate_khz; + clk_khz = dev->get_clk_rate_khz(dev); + + if (!dev->sda_hold_time && t->sda_hold_ns) + dev->sda_hold_time = + div_u64(clk_khz * t->sda_hold_ns + 500000, + 1000000); +#endif } adap = &dev->adapter; From 1586e04b801cebb5ff12244798ba5b6ff53ff179 Mon Sep 17 00:00:00 2001 From: Tom Date: Sat, 13 Mar 2021 15:22:38 +0800 Subject: [PATCH 21/54] drivers/i2c: Add GPIO configuration for VIC7100. [FIXME] why we can not do it in U-boot? [geert: Rebase to v5.13-rc1] --- drivers/i2c/busses/i2c-designware-core.h | 2 + drivers/i2c/busses/i2c-designware-master.c | 44 +++++++++++++++++++++ drivers/i2c/busses/i2c-designware-platdrv.c | 5 +++ 3 files changed, 51 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 6a53f75abf7c7..c8e2c59bc29d5 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -286,6 +286,8 @@ struct dw_i2c_dev { int (*init)(struct dw_i2c_dev *dev); int (*set_sda_hold_time)(struct dw_i2c_dev *dev); int mode; + int scl_gpio; + int sda_gpio; struct i2c_bus_recovery_info rinfo; bool suspended; }; diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 9b08bb5df38d2..85d2cddfcdf6c 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -164,6 +165,48 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) return 0; } +static void i2c_dw_configure_gpio(struct dw_i2c_dev *dev) +{ +#ifdef CONFIG_SOC_STARFIVE_VIC7100_I2C_GPIO + if((dev->scl_gpio > 0) && (dev->sda_gpio > 0)) { + SET_GPIO_dout_LOW(dev->scl_gpio); + SET_GPIO_dout_LOW(dev->sda_gpio); + SET_GPIO_doen_reverse_(dev->scl_gpio,1); + SET_GPIO_doen_reverse_(dev->sda_gpio,1); + switch(dev->adapter.nr) { + case 0: + SET_GPIO_doen_i2c0_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c0_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c0_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c0_pad_sda_in(dev->sda_gpio); + break; + case 1: + SET_GPIO_doen_i2c1_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c1_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c1_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c1_pad_sda_in(dev->sda_gpio); + break; + case 2: + SET_GPIO_doen_i2c2_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c2_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c2_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c2_pad_sda_in(dev->sda_gpio); + break; + case 3: + SET_GPIO_doen_i2c3_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c3_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c3_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c3_pad_sda_in(dev->sda_gpio); + break; + default: + dev_err(dev->dev, "i2c adapter number is invalid\n"); + } + } else + dev_err(dev->dev, "scl/sda gpio number is invalid !\n"); +#endif + return; +} + /** * i2c_dw_init_master() - Initialize the designware I2C master hardware * @dev: device private data @@ -927,6 +970,7 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) dev_err(dev->dev, "failure adding adapter: %d\n", ret); pm_runtime_put_noidle(dev->dev); + i2c_dw_configure_gpio(dev); return ret; } EXPORT_SYMBOL_GPL(i2c_dw_probe_master); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 904694a19e512..1388e79bd9ef7 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -220,6 +222,7 @@ static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = { static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct i2c_adapter *adap; struct dw_i2c_dev *dev; struct i2c_timings *t; @@ -236,6 +239,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); dev->dev = &pdev->dev; dev->irq = irq; + dev->scl_gpio = of_get_named_gpio(np, "scl-gpio", 0); + dev->sda_gpio = of_get_named_gpio(np, "sda-gpio", 0); platform_set_drvdata(pdev, dev); ret = dw_i2c_plat_request_regs(dev); From e2418d16bebcc9506e9a100cc1a46b1854cd6dc5 Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 04:01:19 +0800 Subject: [PATCH 22/54] net: stmmac: Add dcache flush functions for JH7100 Note: including uSDK v0.9->v1.0 patch [geert: Rebase to v5.13-rc1] Warnings fixed by Matteo. Signed-off-by: Matteo Croce --- drivers/net/ethernet/stmicro/stmmac/descs.h | 3 + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 239 +++++++++++++++++- 2 files changed, 238 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 49d6a866244f4..1bf6506ff7788 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -169,6 +169,9 @@ struct dma_extended_desc { __le32 des5; /* Reserved */ __le32 des6; /* Tx/Rx Timestamp Low */ __le32 des7; /* Tx/Rx Timestamp High */ +#if defined(CONFIG_FPGA_GMAC_FLUSH_DDR) + __le32 pad[8]; +#endif }; /* Enhanced descriptor for TBS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3491adaa8d0ea..690021175fc6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -166,6 +166,20 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) } EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); +#ifdef CONFIG_FPGA_GMAC_FLUSH_DDR +#define FLUSH_RX_DESC_ENABLE +#define FLUSH_RX_BUF_ENABLE + +#define FLUSH_TX_DESC_ENABLE +#define FLUSH_TX_BUF_ENABLE + +#include +static inline void stmmac_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -1362,6 +1376,19 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) priv->use_riwt, priv->mode, (i == priv->dma_rx_size - 1), priv->dma_buf_sz); + +#ifdef FLUSH_RX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_RX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_RX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(rx_q->dma_rx_phy, len); + } +#endif } /** @@ -1390,6 +1417,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) stmmac_init_tx_desc(priv, p, priv->mode, last); } + +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif } /** @@ -1453,6 +1493,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, priv->dma_buf_sz); +#endif if (priv->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); @@ -1779,6 +1822,18 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff_dma[i].last_segment = false; tx_q->tx_skbuff[i] = NULL; } +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif tx_q->dirty_tx = 0; tx_q->cur_tx = 0; @@ -2493,8 +2548,22 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) status = stmmac_tx_status(priv, &priv->dev->stats, &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ - if (unlikely(status & tx_dma_own)) + if (unlikely(status & tx_dma_own)) { +#ifdef FLUSH_TX_DESC_ENABLE + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif break; + } count++; @@ -2563,6 +2632,22 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) } stmmac_release_tx_desc(priv, p, priv->mode); +#ifdef FLUSH_TX_DESC_ENABLE + { + /* wangyh for test,flush description */ + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); } @@ -2636,6 +2721,19 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx_dma(priv, chan); dma_free_tx_skbufs(priv, chan); stmmac_clear_tx_descriptors(priv, chan); + +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif tx_q->dirty_tx = 0; tx_q->cur_tx = 0; tx_q->mss = 0; @@ -3881,6 +3979,21 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 0, 0); +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + tx_q->cur_tx * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + tx_q->cur_tx * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif tmp_len -= TSO_MAX_BUFF_SIZE; } } @@ -3948,6 +4061,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) u32 pay_len, mss; dma_addr_t des; int i; +#ifdef FLUSH_TX_DESC_ENABLE + unsigned int mss_entry; + unsigned long start, len; +#endif tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; @@ -3987,6 +4104,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; stmmac_set_mss(priv, mss_desc, mss); +#ifdef FLUSH_TX_DESC_ENABLE + mss_entry = tx_q->cur_tx; +#endif tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); @@ -4021,6 +4141,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, skb_headlen(skb)); +#endif + tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); tx_q->tx_skbuff_dma[first_entry].map_as_page = false; @@ -4054,6 +4178,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, skb_frag_size(frag)); +#endif stmmac_tso_allocator(priv, des, skb_frag_size(frag), (i == nfrags - 1), queue); @@ -4101,7 +4228,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", @@ -4132,6 +4258,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 1, tx_q->tx_skbuff_dma[first_entry].last_segment, hdr / 4, (skb->len - proto_hdr_len)); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif /* If context desc is used to change MSS */ if (mss_desc) { /* Make sure that first descriptor has been completely @@ -4141,6 +4278,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) */ dma_wmb(); stmmac_set_tx_owner(priv, mss_desc); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + mss_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + mss_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif } if (netif_msg_pktdata(priv)) { @@ -4188,6 +4336,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) bool has_vlan, set_ic; int entry, first_tx; dma_addr_t des; +#ifdef FLUSH_TX_DESC_ENABLE + unsigned long start, len; +#endif tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; @@ -4267,6 +4418,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, len); +#endif tx_q->tx_skbuff_dma[entry].buf = des; stmmac_set_desc_addr(priv, desc, des); @@ -4279,6 +4433,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, priv->mode, 1, last_segment, skb->len); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif } /* Only the last descriptor gets to point to the skb. */ @@ -4315,6 +4480,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif priv->xstats.tx_set_ic_bit++; } @@ -4394,6 +4570,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, nopaged_len); +#endif + +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif + stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_flush_tx_descriptors(priv, queue); @@ -4487,9 +4679,25 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) if (!priv->use_riwt) use_rx_wd = false; - dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); +#ifdef FLUSH_RX_DESC_ENABLE + { + unsigned long start, len; + + if (priv->extend_desc) { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif + dma_wmb(); + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); } rx_q->dirty_rx = entry; @@ -5105,8 +5313,22 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) status = stmmac_rx_status(priv, &priv->dev->stats, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ - if (unlikely(status & dma_own)) + if (unlikely(status & dma_own)) { +#ifdef FLUSH_RX_DESC_ENABLE + unsigned long start, len; + + if (priv->extend_desc) { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif break; + } rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, priv->dma_rx_size); @@ -5173,6 +5395,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, buf1_len); +#endif xdp.data = page_address(buf->page) + buf->page_offset; xdp.data_end = xdp.data + buf1_len; @@ -5244,6 +5469,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, buf1_len); +#endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, priv->dma_buf_sz); @@ -5256,6 +5484,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, buf2_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->sec_addr, buf2_len); +#endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_buf_sz); From abe43efb1e956885ea36c3e0a570e466d988a549 Mon Sep 17 00:00:00 2001 From: Tom Date: Tue, 6 Apr 2021 13:30:26 +0800 Subject: [PATCH 23/54] net: stmmac: Configure gtxclk based on speed --- .../ethernet/stmicro/stmmac/dwmac-generic.c | 47 +++++++++++++++++++ drivers/net/phy/micrel.c | 0 2 files changed, 47 insertions(+) mode change 100644 => 100755 drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c mode change 100644 => 100755 drivers/net/phy/micrel.c diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c old mode 100644 new mode 100755 index fbfda55b4c526..8b6b1bfad35e6 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -16,6 +16,50 @@ #include "stmmac.h" #include "stmmac_platform.h" +/* + * GMAC_GTXCLK 为 gmac 的时钟分频寄存器,低8位为分频值 + * bit name access default descript + * [31] clk_gmac_gtxclk enable RW 0x0 "1:enable; 0:disable" + * [30] reserved - 0x0 reserved + * [29:8] reserved - 0x0 reserved + * [7:0] clk_gmac_gtxclk divide ratio RW 0x4 divide value + * + * gmac 的 root 时钟为500M, gtxclk 需求的时钟如下: + * 1000M: gtxclk为125M,分频值为500/125 = 0x4 + * 100M: gtxclk为25M, 分频值为500/25 = 0x14 + * 10M: gtxclk为2.5M,分频值为500/2.5 = 0xc8 + */ +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +#define CLKGEN_BASE 0x11800000 +#define CLKGEN_GMAC_GTXCLK_OFFSET 0x1EC +#define CLKGEN_GMAC_GTXCLK_ADDR (CLKGEN_BASE + CLKGEN_GMAC_GTXCLK_OFFSET) + +#define CLKGEN_125M_DIV 0x4 +#define CLKGEN_25M_DIV 0x14 +#define CLKGEN_2_5M_DIV 0xc8 + +static void dwmac_fixed_speed(void *priv, unsigned int speed) +{ + u32 value; + void *addr = ioremap(CLKGEN_GMAC_GTXCLK_ADDR, sizeof(value)); + if (!addr) { + pr_err("%s can't remap CLKGEN_GMAC_GTXCLK_ADDR\n", __func__); + return; + } + + value = readl(addr) & (~0x000000FF); + + switch (speed) { + case SPEED_1000: value |= CLKGEN_125M_DIV; break; + case SPEED_100: value |= CLKGEN_25M_DIV; break; + case SPEED_10: value |= CLKGEN_2_5M_DIV; break; + default: iounmap(addr); return; + } + writel(value, addr); /*set gmac gtxclk*/ + iounmap(addr); +} +#endif + static int dwmac_generic_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -52,6 +96,9 @@ static int dwmac_generic_probe(struct platform_device *pdev) if (ret) goto err_remove_config_dt; } +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + plat_dat->fix_mac_speed = dwmac_fixed_speed; +#endif ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c old mode 100644 new mode 100755 From 3d848a59c8e70b2a10b833bc01c40c678990814e Mon Sep 17 00:00:00 2001 From: Tom Date: Wed, 6 Jan 2021 20:31:08 +0800 Subject: [PATCH 24/54] drivers/mmc/host/dw_mmc: Add dcache flush(VIC7100 ONLY). --- drivers/mmc/host/dw_mmc.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d15315..360373de4e074 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -145,6 +145,14 @@ static int dw_mci_req_show(struct seq_file *s, void *v) } DEFINE_SHOW_ATTRIBUTE(dw_mci_req); +#ifdef CONFIG_MMC_DW_FLUSH_DDR +#include +static inline void dw_mci_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + static int dw_mci_regs_show(struct seq_file *s, void *v) { struct dw_mci *host = s->private; @@ -691,6 +699,10 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host, /* Physical address to DMA to/from */ desc->des2 = cpu_to_le32(mem_addr); +#ifdef CONFIG_MMC_DW_FLUSH_DDR + dw_mci_flush_dcache((unsigned long)mem_addr, + (unsigned long)desc_len); +#endif /* Update physical address for the next desc */ mem_addr += desc_len; @@ -707,6 +719,10 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host, IDMAC_DES0_DIC)); desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); +#ifdef CONFIG_MMC_DW_FLUSH_DDR + dw_mci_flush_dcache((unsigned long)(host->sg_dma), + (unsigned long)(sg_len * sizeof(struct idmac_desc))); +#endif return 0; err_own_bit: /* restore the descriptor chain as it's polluted */ From 4442499a35cfcc0260a1126ef8e52b2a824fde97 Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 19:51:05 +0800 Subject: [PATCH 25/54] drivers/usb: Add dcache flush(VIC7100 ONLY) drivers/usb/cdns3/ drivers/usb/core/ drivers/usb/host/ include/linux/usb.h [geert: Rebase to v5.13-rc1] --- drivers/usb/cdns3/cdns3-debug.h | 3 + drivers/usb/cdns3/cdns3-ep0.c | 129 ++++++++++-- drivers/usb/cdns3/cdns3-gadget.c | 204 +++++++++++++++++- drivers/usb/cdns3/cdns3-gadget.h | 8 + drivers/usb/cdns3/cdns3-trace.h | 7 + drivers/usb/core/devio.c | 22 ++ drivers/usb/core/hcd.c | 81 +++++++- drivers/usb/core/urb.c | 4 + drivers/usb/core/usb.c | 10 + drivers/usb/host/xhci-dbg.c | 3 + drivers/usb/host/xhci-dbgcap.c | 28 +++ drivers/usb/host/xhci-debugfs.c | 12 ++ drivers/usb/host/xhci-hub.c | 9 +- drivers/usb/host/xhci-mem.c | 141 ++++++++++++- drivers/usb/host/xhci-ring.c | 346 +++++++++++++++++++++++++++++-- drivers/usb/host/xhci.c | 230 +++++++++++++++++++- include/linux/usb.h | 23 ++ 17 files changed, 1210 insertions(+), 50 deletions(-) diff --git a/drivers/usb/cdns3/cdns3-debug.h b/drivers/usb/cdns3/cdns3-debug.h index a5c6a29e13406..ba4143280a231 100644 --- a/drivers/usb/cdns3/cdns3-debug.h +++ b/drivers/usb/cdns3/cdns3-debug.h @@ -152,6 +152,9 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep, le32_to_cpu(trb->buffer), le32_to_cpu(trb->length), le32_to_cpu(trb->control)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(struct cdns3_trb)); +#endif addr += sizeof(*trb); } diff --git a/drivers/usb/cdns3/cdns3-ep0.c b/drivers/usb/cdns3/cdns3-ep0.c index 9a17802275d51..2e0ad2df53d3e 100644 --- a/drivers/usb/cdns3/cdns3-ep0.c +++ b/drivers/usb/cdns3/cdns3-ep0.c @@ -53,6 +53,11 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, priv_ep->trb_pool[1].control = 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + 2 * TRB_SIZE); +#endif + trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool); cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir); @@ -88,6 +93,9 @@ static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev, spin_unlock(&priv_dev->lock); priv_dev->setup_pending = 1; ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif priv_dev->setup_pending = 0; spin_lock(&priv_dev->lock); return ret; @@ -97,6 +105,12 @@ static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev) { priv_dev->ep0_data_dir = 0; priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_dev->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, sizeof(struct usb_ctrlrequest), 0, 0); } @@ -140,6 +154,9 @@ static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev, u32 config = le16_to_cpu(ctrl_req->wValue); int result = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif switch (device_state) { case USB_STATE_ADDRESS: result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); @@ -185,7 +202,9 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev, u32 addr; addr = le16_to_cpu(ctrl_req->wValue); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif if (addr > USB_DEVICE_MAX_ADDRESS) { dev_err(priv_dev->dev, "Device address (%d) cannot be greater than %d\n", @@ -225,9 +244,14 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, u16 usb_status = 0; u32 recip; u8 index; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 tmp_ind; +#endif recip = ctrl->bRequestType & USB_RECIP_MASK; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (recip) { case USB_RECIP_DEVICE: /* self powered */ @@ -253,8 +277,17 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + tmp_ind = ctrl->wIndex; + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); + + /* check if endpoint is stalled or stall is pending */ + cdns3_select_ep(priv_dev, tmp_ind); +#else + /* check if endpoint is stalled or stall is pending */ cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); +#endif if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); @@ -266,6 +299,10 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, response_pkt = (__le16 *)priv_dev->setup_buf; *response_pkt = cpu_to_le16(usb_status); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_dev->setup_dma, sizeof(*response_pkt)); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, sizeof(*response_pkt), 1, 0); return 0; @@ -282,6 +319,9 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, u16 tmode; wValue = le16_to_cpu(ctrl->wValue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif state = priv_dev->gadget.state; speed = priv_dev->gadget.speed; @@ -309,7 +349,9 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, return -EINVAL; tmode = le16_to_cpu(ctrl->wIndex); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif if (!set || (tmode & 0xff) != 0) return -EINVAL; @@ -342,7 +384,9 @@ static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev, int ret = 0; wValue = le16_to_cpu(ctrl->wValue); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (wValue) { case USB_INTRF_FUNC_SUSPEND: break; @@ -360,17 +404,38 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep; int ret = 0; u8 index; - - if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 tmp_ind; +#endif + + if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; + } - if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) + if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif return 0; + } + +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + tmp_ind = ctrl->wIndex; + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); + cdns3_select_ep(priv_dev, tmp_ind); +#else cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); +#endif if (set) __cdns3_gadget_ep_set_halt(priv_ep); @@ -400,7 +465,9 @@ static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev, u32 recip; recip = ctrl->bRequestType & USB_RECIP_MASK; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (recip) { case USB_RECIP_DEVICE: ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set); @@ -434,9 +501,17 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); + cdns_flush_dcache(priv_dev->setup_dma, 6); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0); return 0; } @@ -452,11 +527,19 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, struct usb_ctrlrequest *ctrl_req) { - if (ctrl_req->wIndex || ctrl_req->wLength) + if (ctrl_req->wIndex || ctrl_req->wLength) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; + } priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif + return 0; } @@ -472,7 +555,13 @@ static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev, { int ret; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u8 bReq = ctrl_req->bRequest; + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); + switch (bReq) { +#else switch (ctrl_req->bRequest) { +#endif case USB_REQ_SET_ADDRESS: ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req); break; @@ -535,7 +624,9 @@ static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) int result; priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif trace_cdns3_ctrl_req(ctrl); if (!list_empty(&priv_ep->pending_req_list)) { @@ -552,10 +643,17 @@ static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) else priv_dev->ep0_stage = CDNS3_STATUS_STAGE; - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif result = cdns3_ep0_standard_request(priv_dev, ctrl); - else + } else { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif result = cdns3_ep0_delegate_req(priv_dev, ctrl); + } if (result == USB_GADGET_DELAYED_STATUS) return; @@ -579,6 +677,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev) request->actual = TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + sizeof(struct cdns3_trb)); +#endif priv_ep->dir = priv_dev->ep0_data_dir; cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0); } @@ -764,6 +866,9 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, (request->length % ep->maxpacket == 0)) zlp = 1; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp); spin_unlock_irqrestore(&priv_dev->lock, flags); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index a8b7b50abf645..9d254f2a2b945 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -230,6 +230,9 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) } memset(priv_ep->trb_pool, 0, ring_size); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_ep->trb_pool_dma, ring_size); +#endif priv_ep->num_trbs = num_trbs; @@ -249,6 +252,11 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, link_trb)), + TRB_SIZE); +#endif return 0; } @@ -464,6 +472,11 @@ static void __cdns3_descmiss_copy_data(struct usb_request *request, memcpy(&((u8 *)request->buf)[request->actual], descmiss_req->buf, descmiss_req->actual); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache( + &((u8 *)request->buf)[request->actual], + descmiss_req->actual); +#endif request->actual = length; } else { /* It should never occures */ @@ -827,6 +840,10 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, priv_req->aligned_buf->dir); memcpy(request->buf, priv_req->aligned_buf->buf, request->length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(priv_req->aligned_buf->buf, + request->length); +#endif } priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); @@ -930,6 +947,10 @@ static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) return -ENOMEM; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(buf->dma, buf->size); +#endif + if (priv_req->aligned_buf) { trace_cdns3_free_aligned_request(priv_req); priv_req->aligned_buf->in_use = 0; @@ -950,6 +971,10 @@ static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) buf->dma, buf->size, buf->dir); memcpy(buf->buf, priv_req->request.buf, priv_req->request.length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(buf->dma, buf->size); + cdns_virt_flush_dcache(priv_req->request.buf, buf->size); +#endif } /* Transfer DMA buffer ownership back to device */ @@ -1016,10 +1041,18 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, priv_ep->flags |= EP_PENDING_REQUEST; /* must allocate buffer aligned to 8 */ - if (priv_req->flags & REQUEST_UNALIGNED) + if (priv_req->flags & REQUEST_UNALIGNED){ trb_dma = priv_req->aligned_buf->dma; - else +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_req->aligned_buf->dma, + priv_req->aligned_buf->size); +#endif + }else{ trb_dma = request->dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif + } /* For stream capable endpoints driver use only single TD. */ trb = priv_ep->trb_pool + priv_ep->enqueue; @@ -1035,15 +1068,34 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, if (!request->num_sgs) { trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); length = request->length; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif } else { trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); length = request->sg[sg_idx].length; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(TRB_BUFFER(request->sg[sg_idx].dma_address), + request->sg[sg_idx].length); +#endif +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(TRB_BUFFER(request->sg[sg_idx].dma_address), + request->sg[sg_idx].length); +#endif } tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + /* * For DEV_VER_V2 controller version we have enabled * USB_CONF2_EN_TDL_TRB in DMULT configuration. @@ -1056,6 +1108,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, priv_req->flags |= REQUEST_PENDING; trb->control = cpu_to_le32(control); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif trace_cdns3_prepare_trb(priv_ep, priv_req->trb); @@ -1063,6 +1120,10 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, * Memory barrier - Cycle Bit must be set before trb->length and * trb->buffer fields. */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(cdns3_trb_virt_to_dma(priv_ep, trb), + sizeof(struct cdns3_trb)); +#endif wmb(); /* always first element */ @@ -1124,6 +1185,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, u32 control; int pcs; u16 total_tdl = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + int number = 0; +#endif struct scatterlist *s = NULL; bool sg_supported = !!(request->num_mapped_sgs); @@ -1143,10 +1207,18 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, priv_ep->flags |= EP_PENDING_REQUEST; /* must allocate buffer aligned to 8 */ - if (priv_req->flags & REQUEST_UNALIGNED) + if (priv_req->flags & REQUEST_UNALIGNED){ trb_dma = priv_req->aligned_buf->dma; - else +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_req->aligned_buf->dma, + priv_req->aligned_buf->size); +#endif + }else{ trb_dma = request->dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif + } trb = priv_ep->trb_pool + priv_ep->enqueue; priv_req->start_trb = priv_ep->enqueue; @@ -1184,6 +1256,12 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + link_trb)), + sizeof(struct cdns3_trb)); +#endif } if (priv_dev->dev_ver <= DEV_VER_V2) @@ -1219,12 +1297,26 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, length = request->length; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); + if(request->num_sgs) + gadget_flush_dcache(request->sg[sg_iter].dma_address, + request->sg[sg_iter].length); +#endif + if (priv_ep->flags & EP_TDLCHK_EN) total_tdl += DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | TRB_LEN(length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif pcs = priv_ep->pcs ? TRB_CYCLE : 0; /* @@ -1256,12 +1348,23 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, s = sg_next(s); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif + control = 0; ++sg_iter; priv_req->end_trb = priv_ep->enqueue; cdns3_ep_inc_enq(priv_ep); trb = priv_ep->trb_pool + priv_ep->enqueue; trb->length = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif } while (sg_iter < num_trb); trb = priv_req->trb; @@ -1271,6 +1374,11 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, if (sg_iter == 1) trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif if (priv_dev->dev_ver < DEV_VER_V2 && (priv_ep->flags & EP_TDLCHK_EN)) { @@ -1295,8 +1403,14 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, wmb(); /* give the TD to the consumer*/ - if (togle_pcs) + if (togle_pcs) { trb->control = trb->control ^ cpu_to_le32(1); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif + } if (priv_dev->dev_ver <= DEV_VER_V2) cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); @@ -1324,6 +1438,22 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, */ wmb(); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + if((priv_req->start_trb + num_trb) > (priv_ep->num_trbs - 1)) { + number = priv_ep->num_trbs - 1 - priv_req->start_trb; + gadget_flush_dcache(priv_ep->trb_pool_dma + + (priv_req->start_trb * TRB_SIZE), + (number + 1) * TRB_SIZE); + gadget_flush_dcache(priv_ep->trb_pool_dma, + (num_trb - number)* TRB_SIZE); + } else { + gadget_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + + priv_req->start_trb * + TRB_SIZE), + num_trb * TRB_SIZE); + } +#endif + /* * For DMULT mode we can set address to transfer ring only once after * enabling endpoint. @@ -1508,9 +1638,18 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, /* Request was dequeued and TRB was changed to TRB_LINK. */ if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif cdns3_move_deq_to_next_trb(priv_req); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif if (!request->stream_id) { /* Re-select endpoint. It could be changed by other CPU * during handling usb_gadget_giveback_request. @@ -1554,6 +1693,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, cdns3_select_ep(priv_dev, priv_ep->endpoint.address); trb = priv_ep->trb_pool; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif trace_cdns3_complete_trb(priv_ep, trb); if (trb != priv_req->trb) @@ -1562,6 +1706,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, priv_req->trb, trb); request->actual += TRB_LEN(le32_to_cpu(trb->length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif if (!request->num_sgs || (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { @@ -1769,6 +1919,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, __must_hold(&priv_dev->lock) { int speed = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + int i; + struct cdns3_endpoint *priv_ep; +#endif trace_cdns3_usb_irq(priv_dev, usb_ists); if (usb_ists & USB_ISTS_L1ENTI) { @@ -1797,6 +1951,18 @@ __must_hold(&priv_dev->lock) priv_dev->gadget.speed = USB_SPEED_UNKNOWN; usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); cdns3_hw_reset_eps_config(priv_dev); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + /* clean TRB*/ + for(i = 0;i < CDNS3_ENDPOINTS_MAX_COUNT; i++){ + priv_ep = priv_dev->eps[i]; + if(priv_ep && priv_ep->trb_pool){ + memset(priv_ep->trb_pool, 0, + priv_ep->alloc_ring_size); + gadget_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + priv_ep->alloc_ring_size); + } + } +#endif } if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { @@ -2642,6 +2808,12 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, ((priv_req->end_trb + 1) * TRB_SIZE))); link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | TRB_TYPE(TRB_LINK) | TRB_CHAIN); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + link_trb)), + sizeof(struct cdns3_trb)); +#endif if (priv_ep->wa1_trb == priv_req->trb) cdns3_wa1_restore_cycle_bit(priv_ep); @@ -2695,8 +2867,15 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { priv_req = to_cdns3_request(request); trb = priv_req->trb; - if (trb) + if (trb) { trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + } } writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); @@ -2710,9 +2889,16 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); if (request) { - if (trb) + if (trb) { trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + } cdns3_rearm_transfer(priv_ep, 1); } @@ -3210,7 +3396,9 @@ static int cdns3_gadget_start(struct cdns *cdns) ret = -ENOMEM; goto err2; } - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_dev->setup_dma, 8); +#endif priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h index c5660f2c4293f..a1805ba41d38b 100644 --- a/drivers/usb/cdns3/cdns3-gadget.h +++ b/drivers/usb/cdns3/cdns3-gadget.h @@ -1368,4 +1368,12 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable); void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir); int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA +#include +static inline void gadget_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + #endif /* __LINUX_CDNS3_GADGET */ diff --git a/drivers/usb/cdns3/cdns3-trace.h b/drivers/usb/cdns3/cdns3-trace.h index 7574b4a628132..45b95b545515e 100644 --- a/drivers/usb/cdns3/cdns3-trace.h +++ b/drivers/usb/cdns3/cdns3-trace.h @@ -187,6 +187,9 @@ DECLARE_EVENT_CLASS(cdns3_log_ctrl, __entry->wIndex = le16_to_cpu(ctrl->wIndex); __entry->wLength = le16_to_cpu(ctrl->wLength); ), +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNS3_MSG_MAX, __entry->bRequestType, __entry->bRequest, __entry->wValue, @@ -407,6 +410,10 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); __entry->last_stream_id = priv_ep->last_stream_id; ), +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u", __get_str(name), __entry->trb, __entry->buffer, TRB_LEN(__entry->length), diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 2218941d35a3f..24f6d20fd7d49 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -251,6 +251,10 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma_handle, size); +#endif + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, @@ -262,6 +266,9 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma_handle, size); +#endif return -EAGAIN; } } @@ -542,6 +549,9 @@ static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, len); +#endif return 0; } @@ -1734,6 +1744,12 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb as->urb->transfer_buffer = as->usbm->mem + (uurb_start - as->usbm->vm_start); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(as->usbm->dma_handle + + (uurb_start - as->usbm->vm_start), + as->usbm->size - + (uurb_start - as->usbm->vm_start)); +#endif } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL | __GFP_NOWARN); @@ -1820,6 +1836,12 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; as->urb->transfer_dma = as->usbm->dma_handle + (uurb_start - as->usbm->vm_start); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(as->usbm->dma_handle + + (uurb_start - as->usbm->vm_start), + as->usbm->size - + (uurb_start - as->usbm->vm_start)); +#endif } else if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; as->signr = uurb->signr; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6119fb41d7365..4cd2f3376dfb5 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -419,6 +419,9 @@ ascii2desc(char const *s, u8 *buf, unsigned len) *buf++ = t >> 8; t = (unsigned char)*s++; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(buf, len); +#endif return len; } @@ -450,6 +453,9 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len) if (len > 4) len = 4; memcpy(data, langids, len); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(data, len); +#endif return len; case 1: /* Serial number */ @@ -502,6 +508,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) wValue = le16_to_cpu (cmd->wValue); wIndex = le16_to_cpu (cmd->wIndex); wLength = le16_to_cpu (cmd->wLength); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd, sizeof(struct usb_ctrlrequest)); +#endif if (wLength > urb->transfer_buffer_length) goto error; @@ -727,6 +736,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) bDeviceProtocol)) ((struct usb_device_descriptor *) ubuf)-> bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ubuf, len); +#endif } kfree(tbuf); @@ -773,6 +785,9 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) urb->actual_length = length; memcpy(urb->transfer_buffer, buffer, length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, length); +#endif usb_hcd_unlink_urb_from_ep(hcd, urb); usb_hcd_giveback_urb(hcd, urb, 0); } else { @@ -1301,6 +1316,9 @@ static int hcd_alloc_coherent(struct usb_bus *bus, memcpy(vaddr, *vaddr_handle, size); *vaddr_handle = vaddr; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(*dma_handle, size + sizeof(vaddr)); +#endif return 0; } @@ -1312,9 +1330,13 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle, vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size)); - if (dir == DMA_FROM_DEVICE) + if (dir == DMA_FROM_DEVICE) { memcpy(vaddr, *vaddr_handle, size); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(vaddr, size); + cdns_virt_flush_dcache(*vaddr_handle, size); +#endif + } hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle); *vaddr_handle = vaddr; @@ -1324,12 +1346,16 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle, void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb) { if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_SETUP_MAP_SINGLE)) + (urb->transfer_flags & URB_SETUP_MAP_SINGLE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif dma_unmap_single(hcd->self.sysdev, urb->setup_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); - else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL) + } else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL) hcd_free_coherent(urb->dev->bus, &urb->setup_dma, (void **) &urb->setup_packet, @@ -1363,23 +1389,36 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) urb->num_sgs, dir); else if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_DMA_MAP_PAGE)) + (urb->transfer_flags & URB_DMA_MAP_PAGE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif dma_unmap_page(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); - else if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_DMA_MAP_SINGLE)) + } else if (IS_ENABLED(CONFIG_HAS_DMA) && + (urb->transfer_flags & URB_DMA_MAP_SINGLE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif dma_unmap_single(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); - else if (urb->transfer_flags & URB_MAP_LOCAL) + } else if (urb->transfer_flags & URB_MAP_LOCAL) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif hcd_free_coherent(urb->dev->bus, &urb->transfer_dma, &urb->transfer_buffer, urb->transfer_buffer_length, dir); + } /* Make it safe to call this routine more than once */ urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE | @@ -1418,6 +1457,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, (void **)&urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif if (ret) return ret; urb->transfer_flags |= URB_SETUP_MAP_LOCAL; @@ -1435,6 +1478,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, if (dma_mapping_error(hcd->self.sysdev, urb->setup_dma)) return -EAGAIN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif urb->transfer_flags |= URB_SETUP_MAP_SINGLE; } } @@ -1449,6 +1496,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, &urb->transfer_buffer, urb->transfer_buffer_length, dir); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length + 8); +#endif if (ret == 0) urb->transfer_flags |= URB_MAP_LOCAL; } else if (hcd_uses_dma(hcd)) { @@ -1487,6 +1538,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_PAGE; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif } else if (object_is_on_stack(urb->transfer_buffer)) { WARN_ONCE(1, "transfer buffer is on stack\n"); ret = -EAGAIN; @@ -1501,6 +1556,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_SINGLE; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif } } if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE | @@ -2949,6 +3008,9 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, if (IS_ERR(local_mem)) return PTR_ERR(local_mem); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(phys_addr,size); +#endif /* * Here we pass a dma_addr_t but the arg type is a phys_addr_t. * It's not backed by system memory and thus there's no kernel mapping @@ -2962,6 +3024,9 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, return err; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,size); +#endif return 0; } EXPORT_SYMBOL_GPL(usb_hcd_setup_local_mem); diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 357b149b20d3a..3df361946a143 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -407,6 +407,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, + sizeof(struct usb_ctrlrequest)); +#endif } else { is_out = usb_endpoint_dir_out(&ep->desc); } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 62368c4ed37af..c5b441d2d5625 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -970,9 +970,19 @@ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + void *ret; +#endif if (!dev || !dev->bus) return NULL; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = hcd_buffer_alloc(dev->bus, size, mem_flags, dma); + if(ret) + cdns_flush_dcache(*dma, size); + return ret; +#else return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); +#endif } EXPORT_SYMBOL_GPL(usb_alloc_coherent); diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 386abf26641d2..5a840db12aa89 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -16,6 +16,9 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); int state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return xhci_slot_state_string(state); } diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index ccb0156fcebeb..6837581908693 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -105,6 +105,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); info->length = cpu_to_le32(string_length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, string_length); +#endif /* Populate bulk out endpoint context: */ ep_ctx = dbc_bulkout_ctx(dbc); @@ -113,6 +116,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif /* Populate bulk in endpoint context: */ ep_ctx = dbc_bulkin_ctx(dbc); @@ -120,6 +126,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif /* Set DbC context and info registers: */ lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); @@ -279,6 +288,11 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, * Add a barrier between writes of trb fields and flipping * the cycle bit: */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(req->dma, req->length); + cdns_flush_dcache(req->trb_dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif wmb(); if (cycle) @@ -286,6 +300,10 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, else trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(req->trb_dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); return 0; @@ -501,12 +519,19 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags) if (!dbc->string) goto string_fail; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dbc->string_dma, dbc->string_size); +#endif + /* Setup ERST register: */ writel(dbc->erst.erst_size, &dbc->regs->ersts); lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, dbc->ring_evt->dequeue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif lo_hi_writeq(deq, &dbc->regs->erdp); /* Setup strings and contexts: */ @@ -877,6 +902,9 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) if (update_erdp) { deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, dbc->ring_evt->dequeue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif lo_hi_writeq(deq, &dbc->regs->erdp); } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 2c0fda57869e4..0a6a351c1b322 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -207,6 +207,9 @@ static void xhci_ring_dump_segment(struct seq_file *s, le32_to_cpu(trb->generic.field[1]), le32_to_cpu(trb->generic.field[2]), le32_to_cpu(trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,sizeof(*trb)); +#endif } } @@ -268,6 +271,9 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) le32_to_cpu(slot_ctx->dev_info2), le32_to_cpu(slot_ctx->tt_info), le32_to_cpu(slot_ctx->dev_state))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return 0; } @@ -291,6 +297,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) le32_to_cpu(ep_ctx->ep_info2), le64_to_cpu(ep_ctx->deq), le32_to_cpu(ep_ctx->tx_info))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } return 0; @@ -551,6 +560,9 @@ static int xhci_stream_context_array_show(struct seq_file *s, void *unused) else seq_printf(s, "%pad stream context entry not used deq %016llx\n", &dma, le64_to_cpu(stream_ctx->stream_ring)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,16); +#endif } return 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e9b18fc176172..dc9ddbaf5eefd 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -499,8 +499,15 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i); /* Check ep is running, required by AMD SNPS 3.1 xHC */ - if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif command = xhci_alloc_command(xhci, false, GFP_NOWAIT); if (!command) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f66815fe84822..82e06d75b88b4 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -44,6 +44,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, kfree(seg); return NULL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif if (max_packet) { seg->bounce_buf = kzalloc_node(max_packet, flags, @@ -56,8 +59,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, } /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { - for (i = 0; i < TRBS_PER_SEGMENT; i++) + for (i = 0; i < TRBS_PER_SEGMENT; i++) { seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&seg->trbs[i], + sizeof(union xhci_trb)); +#endif + } } seg->dma = dma; seg->next = NULL; @@ -68,6 +76,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) { if (seg->trbs) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->trbs, sizeof(union xhci_trb)); +#endif dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } @@ -111,11 +122,19 @@ static void xhci_link_segments(struct xhci_segment *prev, /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&prev->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif val &= ~TRB_TYPE_BITMASK; val |= TRB_TYPE(TRB_LINK); if (chain_links) val |= TRB_CHAIN; prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&prev->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } } @@ -149,7 +168,15 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, &= ~cpu_to_le32(LINK_TOGGLE); last->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ring->last_seg->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif ring->last_seg = last; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&last->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } } @@ -265,6 +292,10 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring) seg = ring->first_seg; do { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif xhci_remove_segment_mapping(ring->trb_address_map, seg); seg = seg->next; } while (seg != ring->first_seg); @@ -398,6 +429,10 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, /* See section 4.9.2.1 and 6.4.4.1 */ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ring->last_seg->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } xhci_initialize_ring_info(ring, cycle_state); trace_xhci_ring_alloc(ring); @@ -489,6 +524,9 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, kfree(ctx); return NULL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(ctx->dma, ctx->size); +#endif return ctx; } @@ -645,6 +683,10 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, goto cleanup_ctx; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(stream_info->ctx_array_dma, + sizeof(struct xhci_stream_ctx) * num_stream_ctxs); +#endif /* Allocate everything needed to free the stream rings later */ stream_info->free_streams_command = @@ -674,6 +716,10 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, cur_ring->cycle_state; stream_info->stream_ctx_array[cur_stream].stream_ring = cpu_to_le64(addr); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&stream_info->stream_ctx_array[cur_stream], + sizeof(struct xhci_stream_ctx)); +#endif xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); @@ -731,6 +777,9 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) | EP_HAS_LSA); ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } /* @@ -745,6 +794,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx,sizeof(*ep_ctx)); +#endif } /* Frees all stream contexts associated with the endpoint, @@ -1011,12 +1063,19 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, dev->udev = udev; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dev->out_ctx->dma, dev->out_ctx->size); +#endif /* Point to output device context in dcbaa. */ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, + sizeof(struct xhci_device_context_array)); +#endif trace_xhci_alloc_virt_device(dev); @@ -1054,6 +1113,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue) | ep_ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep0_ctx, sizeof(*ep0_ctx)); +#endif } /* @@ -1106,6 +1168,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud /* 3) Only the control endpoint is valid - one endpoint context */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx,sizeof(*slot_ctx)); +#endif switch (udev->speed) { case USB_SPEED_SUPER_PLUS: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); @@ -1136,10 +1201,16 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return -EINVAL; } /* Find the root hub port this device is under */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) @@ -1185,6 +1256,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud (udev->ttport << 8)); if (udev->tt->multi) slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); @@ -1199,6 +1273,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep0_ctx, sizeof(*ep0_ctx)); +#endif trace_xhci_setup_addressable_virt_device(dev); @@ -1508,6 +1585,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | EP_AVG_TRB_LENGTH(avg_trb_len)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif return 0; } @@ -1529,6 +1609,9 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) @@ -1560,14 +1643,29 @@ void xhci_update_bw_info(struct xhci_hcd *xhci, * set in the first place. */ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Dropped endpoint */ xhci_clear_endpoint_bw_info(bw_info); continue; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif if (EP_IS_ADDED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Ignore non-periodic endpoints */ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && @@ -1591,7 +1689,14 @@ void xhci_update_bw_info(struct xhci_hcd *xhci, bw_info->type = ep_type; bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( le32_to_cpu(ep_ctx->tx_info)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, sizeof(struct xhci_input_control_ctx)); +#endif } } @@ -1618,6 +1723,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci, in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(in_ep_ctx, sizeof(*in_ep_ctx)); + cdns_virt_flush_dcache(out_ep_ctx, sizeof(*out_ep_ctx)); +#endif } /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. @@ -1639,6 +1748,10 @@ void xhci_slot_copy(struct xhci_hcd *xhci, in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; in_slot_ctx->tt_info = out_slot_ctx->tt_info; in_slot_ctx->dev_state = out_slot_ctx->dev_state; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(in_slot_ctx, sizeof(*in_slot_ctx)); + cdns_virt_flush_dcache(out_slot_ctx, sizeof(*out_slot_ctx)); +#endif } /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ @@ -1664,6 +1777,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) &xhci->scratchpad->sp_dma, flags); if (!xhci->scratchpad->sp_array) goto fail_sp2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->scratchpad->sp_dma, num_sp * sizeof(u64)); +#endif xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *), flags, dev_to_node(dev)); @@ -1680,7 +1796,13 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) xhci->scratchpad->sp_array[i] = dma; xhci->scratchpad->sp_buffers[i] = buf; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, xhci->page_size); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->scratchpad->sp_dma, num_sp * sizeof(u64)); +#endif return 0; @@ -1804,6 +1926,9 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, size, &erst->erst_dma_addr, flags); if (!erst->entries) return -ENOMEM; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(erst->erst_dma_addr, size); +#endif erst->num_entries = evt_ring->num_segs; @@ -1815,6 +1940,9 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, entry->rsvd = 0; seg = seg->next; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(erst->erst_dma_addr, size); +#endif return 0; } @@ -2109,6 +2237,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write event ring dequeue pointer, " "preserving EHB bit"); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } @@ -2432,6 +2563,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->dcbaa) goto fail; xhci->dcbaa->dma = dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, sizeof(*xhci->dcbaa)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Device context base array address = 0x%llx (DMA), %p (virt)", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); @@ -2540,6 +2674,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->erst.erst_dma_addr, + xhci->event_ring->num_segs * + sizeof(struct xhci_erst_entry)); +#endif xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6acd2329e08d4..709c14ce479e5 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,12 +82,26 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, static bool trb_is_noop(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = TRB_TYPE_NOOP_LE32(trb->generic.field[3]); + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +#endif } static bool trb_is_link(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = TRB_TYPE_LINK_LE32(trb->link.control); + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return TRB_TYPE_LINK_LE32(trb->link.control); +#endif } static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) @@ -103,7 +117,14 @@ static bool last_trb_on_ring(struct xhci_ring *ring, static bool link_trb_toggles_cycle(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = le32_to_cpu(trb->link.control) & LINK_TOGGLE; + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +#endif } static bool last_td_in_urb(struct xhci_td *td) @@ -133,6 +154,9 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type) trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -224,6 +248,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, /* If this is not event ring, there is one less usable TRB */ if (!trb_is_link(ring->enqueue)) ring->num_trbs_free--; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ring->enqueue, sizeof(union xhci_trb)); +#endif if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { xhci_err(xhci, "Tried to move enqueue past ring segment\n"); @@ -255,6 +282,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, next->link.control &= cpu_to_le32(~TRB_CHAIN); next->link.control |= cpu_to_le32(chain); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(next,sizeof(union xhci_trb)); +#endif /* Give this link TRB to the hardware */ wmb(); next->link.control ^= cpu_to_le32(TRB_CYCLE); @@ -262,6 +292,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, /* Toggle the cycle bit after the last ring segment. */ if (link_trb_toggles_cycle(next)) ring->cycle_state ^= 1; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(next,sizeof(union xhci_trb)); +#endif ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; @@ -539,15 +572,30 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, struct xhci_ep_ctx *ep_ctx; struct xhci_stream_ctx *st_ctx; struct xhci_virt_ep *ep; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u64 ret; +#endif ep = &vdev->eps[ep_index]; if (ep->ep_state & EP_HAS_STREAMS) { st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = le64_to_cpu(st_ctx->stream_ring); + cdns_virt_flush_dcache(st_ctx, sizeof(*st_ctx)); + return ret; +#else return le64_to_cpu(st_ctx->stream_ring); +#endif } ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = le64_to_cpu(ep_ctx->deq); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + return ret; +#else return le64_to_cpu(ep_ctx->deq); +#endif } static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, @@ -694,8 +742,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, trb_to_noop(trb, TRB_TR_NOOP); /* flip cycle if asked to */ - if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) { trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif + } if (trb == td->last_trb) break; @@ -748,17 +800,26 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, return; if (usb_urb_dir_out(urb)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_TO_DEVICE); return; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); /* for in tranfers we need to copy the data from bounce to sg */ if (urb->num_sgs) { len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, seg->bounce_len, seg->bounce_offs); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif if (len != seg->bounce_len) xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", len, seg->bounce_len); @@ -1019,6 +1080,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, int err; if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif if (!xhci->devs[slot_id]) xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", slot_id); @@ -1026,6 +1090,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, } ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; @@ -1033,6 +1100,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); trace_xhci_handle_cmd_stop_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif if (comp_code == COMP_CONTEXT_STATE_ERROR) { /* @@ -1309,6 +1379,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; @@ -1325,6 +1398,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); trace_xhci_handle_cmd_set_deq(slot_ctx); trace_xhci_handle_cmd_set_deq_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif if (cmd_comp_code != COMP_SUCCESS) { unsigned int ep_state; @@ -1339,6 +1416,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep_state = GET_EP_CTX_STATE(ep_ctx); slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Slot state = %u, EP state = %u", slot_state, ep_state); @@ -1365,8 +1446,14 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_stream_ctx *ctx = &ep->stream_info->stream_ctx_array[stream_id]; deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctx, sizeof(*ctx)); +#endif } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); @@ -1408,12 +1495,18 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); trace_xhci_handle_cmd_reset_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* This command will only fail if the endpoint wasn't halted, * but we don't care. @@ -1432,8 +1525,16 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, xhci_giveback_invalidated_tds(ep); /* if this was a soft reset, then restart */ - if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) + if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, @@ -1456,6 +1557,9 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); trace_xhci_handle_cmd_disable_slot(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ @@ -1492,11 +1596,17 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, add_flags = le32_to_cpu(ctrl_ctx->add_flags); drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Input ctx add_flags are the endpoint index plus one */ ep_index = xhci_last_valid_endpoint(add_flags) - 1; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); trace_xhci_handle_cmd_config_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* A usb_set_interface() call directly after clearing a halted * condition may race on this quirky hardware. Not worth @@ -1532,6 +1642,9 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) return; slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_addr_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) @@ -1547,6 +1660,9 @@ static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) } slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_reset_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Completed reset device command.\n"); } @@ -1562,6 +1678,9 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, "NEC firmware version %2x.%02x", NEC_FW_MAJOR(le32_to_cpu(event->status)), NEC_FW_MINOR(le32_to_cpu(event->status))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif } static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) @@ -1649,12 +1768,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_command *cmd; u32 cmd_type; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif + if (slot_id >= MAX_HC_SLOTS) { xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); return; } cmd_dma = le64_to_cpu(event->cmd_trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif cmd_trb = xhci->cmd_ring->dequeue; trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); @@ -1676,6 +1802,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cancel_delayed_work(&xhci->cmd_timer); cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif /* If CMD ring stopped we own the trbs between enqueue and dequeue */ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { @@ -1705,6 +1834,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif switch (cmd_type) { case TRB_ENABLE_SLOT: xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); @@ -1724,6 +1856,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_STOP_RING: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif if (!cmd->completion) xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, cmd_comp_code); @@ -1731,6 +1866,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_SET_DEQ: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_CMD_NOOP: @@ -1741,6 +1879,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_RESET_EP: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: @@ -1749,6 +1890,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, */ slot_id = TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_reset_dev(xhci, slot_id); break; case TRB_NEC_GET_FW: @@ -1790,6 +1934,9 @@ static void handle_device_notification(struct xhci_hcd *xhci, struct usb_device *udev; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if (!xhci->devs[slot_id]) { xhci_warn(xhci, "Device Notification event for " "unused slot %u\n", slot_id); @@ -1846,12 +1993,19 @@ static void handle_port_status(struct xhci_hcd *xhci, struct xhci_port *port; /* Port status change events always have a successful completion code */ - if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); + } port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); max_ports = HCS_MAX_PORTS(xhci->hcs_params1); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Port change event with invalid port ID %d\n", @@ -2107,15 +2261,24 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, /* TRB completion codes that may require a manual halt cleanup */ if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || trb_comp_code == COMP_BABBLE_DETECTED_ERROR || - trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) + trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) { /* The 0.95 spec says a babbling control endpoint * is not halted. The 0.96 spec says it is. Some HW * claims to be 0.95 compliant, but it halts the control * endpoint anyway. Check if a babble halted the * endpoint. */ - if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif return 1; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif + } return 0; } @@ -2229,6 +2392,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { if (!trb_is_noop(trb) && !trb_is_link(trb)) sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } return sum; } @@ -2246,10 +2412,16 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif switch (trb_comp_code) { case COMP_SUCCESS: @@ -2351,6 +2523,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, requested = frame->length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? -EREMOTEIO : 0; @@ -2452,9 +2628,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 remaining, requested, ep_trb_len; slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif requested = td->urb->transfer_buffer_length; switch (trb_comp_code) { @@ -2486,8 +2669,15 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep_ring->err_count++ > MAX_SOFT_RETRY) || - le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) + le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif break; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif td->status = 0; @@ -2542,6 +2732,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); ep_trb_dma = le64_to_cpu(event->buffer); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) { @@ -2556,8 +2749,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_err(xhci, "ERROR Transfer event for disabled endpoint slot %u ep %u\n", slot_id, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif goto err_out; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ if (!ep_ring) { @@ -2592,8 +2791,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif break; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if (xhci->quirks & XHCI_TRUST_TX_LENGTH || ep_ring->last_td_was_short) trb_comp_code = COMP_SHORT_PACKET; @@ -2671,19 +2878,27 @@ static int handle_tx_event(struct xhci_hcd *xhci, * Underrun Event for OUT Isoch endpoint. */ xhci_dbg(xhci, "underrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) + if (!list_empty(&ep_ring->td_list)) { xhci_dbg(xhci, "Underrun Event for slot %d ep %d " "still with TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif + } goto cleanup; case COMP_RING_OVERRUN: xhci_dbg(xhci, "overrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) + if (!list_empty(&ep_ring->td_list)) { xhci_dbg(xhci, "Overrun Event for slot %d ep %d " "still with TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif + } goto cleanup; case COMP_MISSED_SERVICE_ERROR: /* @@ -2741,6 +2956,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif } if (ep->skip) { ep->skip = false; @@ -2832,6 +3050,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif /* * No-op TRB could trigger interrupts in a case where @@ -2916,9 +3137,16 @@ static int xhci_handle_event(struct xhci_hcd *xhci) event = xhci->event_ring->dequeue; /* Does the HC or OS own the TRB? */ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != - xhci->event_ring->cycle_state) + xhci->event_ring->cycle_state) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif return 0; - + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif trace_xhci_handle_event(xhci->event_ring, &event->generic); /* @@ -2927,6 +3155,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci) */ rmb(); trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif /* FIXME: Handle more event types. */ switch (trb_type) { @@ -2999,6 +3230,9 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, /* Update HC event ring dequeue pointer */ temp_64 &= ERST_PTR_MASK; temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif } /* Clear the event handler busy flag (RW1C) */ @@ -3115,8 +3349,14 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, /* make sure TRB is fully written before giving it to the controller */ wmb(); trb->field[3] = cpu_to_le32(field4); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif trace_xhci_queue_trb(ring, trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif inc_enq(xhci, ring, more_trbs_coming); } @@ -3191,10 +3431,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, else ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ep_ring->enqueue->link, sizeof(union xhci_trb)); +#endif wmb(); ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ep_ring->enqueue->link, sizeof(union xhci_trb)); +#endif /* Toggle the cycle bit after the last ring segment. */ if (link_trb_toggles_cycle(ep_ring->enqueue)) ep_ring->cycle_state ^= 1; @@ -3231,6 +3477,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, struct xhci_td *td; struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 ep_state; +#endif ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, stream_id); @@ -3240,7 +3489,13 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ep_state = GET_EP_CTX_STATE(ep_ctx); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + ret = prepare_ring(xhci, ep_ring, ep_state, +#else ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), +#endif num_trbs, mem_flags); if (ret) return ret; @@ -3337,6 +3592,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, start_trb->field[3] |= cpu_to_le32(start_cycle); else start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(start_trb, sizeof(union xhci_trb)); +#endif xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } @@ -3347,6 +3605,9 @@ static void check_interval(struct xhci_hcd *xhci, struct urb *urb, int ep_interval; xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ep_interval = urb->interval; /* Convert to microframes */ @@ -3484,6 +3745,9 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->bounce_buf, new_buff_len); +#endif } else { seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_FROM_DEVICE); @@ -3494,6 +3758,9 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, max_pkt); +#endif *trb_buff_len = new_buff_len; seg->bounce_len = new_buff_len; seg->bounce_offs = enqd_len; @@ -3539,6 +3806,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = (u64) urb->transfer_dma; block_len = full_len; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(addr, block_len); +#endif ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, num_trbs, urb, 0, mem_flags); @@ -3608,6 +3878,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, trb_buff_len); le64_to_cpus(&send_addr); field |= TRB_IDT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, trb_buff_len); + cdns_flush_dcache(send_addr, trb_buff_len); +#endif } } @@ -3622,6 +3896,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(send_addr, trb_buff_len); +#endif queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, lower_32_bits(send_addr), @@ -3731,6 +4008,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_TX_TYPE(TRB_DATA_IN); else field |= TRB_TX_TYPE(TRB_DATA_OUT); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif } } @@ -3740,6 +4020,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, TRB_LEN(8) | TRB_INTR_TARGET(0), /* Immediate data in pointer */ field); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif /* If there's data, queue data TRBs */ /* Only set interrupt on short packet for IN endpoints */ @@ -3757,6 +4040,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb->transfer_buffer_length); le64_to_cpus(&addr); field |= TRB_IDT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, + urb->transfer_buffer_length); + cdns_flush_dcache(addr, + urb->transfer_buffer_length); +#endif } else { addr = (u64) urb->transfer_dma; } @@ -3770,6 +4059,10 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, TRB_INTR_TARGET(0); if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); + cdns_flush_dcache(addr, urb->transfer_buffer_length); +#endif queue_trb(xhci, ep_ring, true, lower_32_bits(addr), upper_32_bits(addr), @@ -3787,6 +4080,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field = 0; else field = TRB_DIR_IN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif queue_trb(xhci, ep_ring, false, 0, 0, @@ -4091,7 +4387,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, else length_field |= TRB_TD_SIZE(remainder); first_trb = false; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(addr, trb_buff_len); +#endif queue_trb(xhci, ep_ring, more_trbs_coming, lower_32_bits(addr), upper_32_bits(addr), @@ -4166,6 +4464,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int ret; struct xhci_virt_ep *xep; int ist; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 ep_state; +#endif xdev = xhci->devs[slot_id]; xep = &xhci->devs[slot_id]->eps[ep_index]; @@ -4180,8 +4481,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ +#if defined(CONFIG_USB_CDNS3_HOST_FLUSH_DMA) + ep_state = GET_EP_CTX_STATE(ep_ctx); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + ret = prepare_ring(xhci, ep_ring, ep_state, num_trbs, mem_flags); +#else ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); +#endif if (ret) return ret; @@ -4194,9 +4501,15 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif urb->start_frame = xep->next_frame_id; goto skip_start_over; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } start_frame = readl(&xhci->run_regs->microframe_index); @@ -4293,6 +4606,9 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) @@ -4319,6 +4635,9 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), @@ -4329,6 +4648,9 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 27283654ca080..b1c0f0c21e93a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -860,6 +860,11 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->trbs, + sizeof(union xhci_trb) * + TRBS_PER_SEGMENT); +#endif seg = seg->next; } while (seg != ring->deq_seg); @@ -1527,6 +1532,9 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, out_ctx = xhci->devs[slot_id]->out_ctx; ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); if (hw_max_packet_size != max_packet_size) { xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, @@ -1566,8 +1574,14 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif ret = xhci_configure_endpoint(xhci, urb->dev, command, true, false); @@ -1576,6 +1590,9 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, * functions. */ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif command_cleanup: kfree(command->completion); kfree(command); @@ -1908,18 +1925,29 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || le32_to_cpu(ctrl_ctx->drop_flags) & xhci_get_endpoint_flag(&ep->desc)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Do not warn when called after a usb_device_reset */ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", __func__, ep); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); @@ -1996,20 +2024,32 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, */ if (virt_dev->eps[ep_index].ring && !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_warn(xhci, "Trying to add endpoint 0x%x " "without dropping it.\n", (unsigned int) ep->desc.bEndpointAddress); return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* If the HCD has already noted the endpoint is enabled, * ignore this request. */ if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", __func__, ep); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* * Configuration and alternate setting changes must be done in @@ -2032,12 +2072,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * drop flags alone. */ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Store the usb_device pointer for later use */ ep->hcpriv = udev; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); trace_xhci_add_endpoint(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, @@ -2069,16 +2115,25 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir */ ctrl_ctx->drop_flags = 0; ctrl_ctx->add_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); /* Endpoint 0 is always valid */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif for (i = 1; i < 31; i++) { ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; ep_ctx->deq = 0; ep_ctx->tx_info = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } } @@ -2194,6 +2249,9 @@ static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, */ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Use hweight32 to count the number of ones in the add flags, or * number of endpoints added. Don't count endpoints that are changed @@ -2211,6 +2269,9 @@ static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif return hweight32(valid_drop_flags) - hweight32(valid_add_flags & valid_drop_flags); @@ -2790,8 +2851,17 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, } for (i = 0; i < 31; i++) { - if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Make a copy of the BW info in case we need to revert this */ memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, @@ -2799,25 +2869,45 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, /* Drop the endpoint from the interval table if the endpoint is * being dropped or changed. */ - if (EP_IS_DROPPED(ctrl_ctx, i)) + if (EP_IS_DROPPED(ctrl_ctx, i)){ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_drop_ep_from_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } /* Overwrite the information stored in the endpoints' bw_info */ xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); for (i = 0; i < 31; i++) { /* Add any changed or added endpoints to the interval table */ - if (EP_IS_ADDED(ctrl_ctx, i)) + if (EP_IS_ADDED(ctrl_ctx, i)){ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_add_ep_to_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { @@ -2830,13 +2920,26 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, /* We don't have enough bandwidth for this, revert the stored info. */ for (i = 0; i < 31; i++) { - if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Drop the new copies of any added or changed endpoints from * the interval table. */ if (EP_IS_ADDED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_drop_ep_from_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, @@ -2844,18 +2947,36 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, &virt_dev->eps[i], virt_dev->tt_info); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Revert the endpoint back to its old information */ memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], sizeof(ep_bw_info[i])); /* Add any changed or dropped endpoints back into the table */ - if (EP_IS_DROPPED(ctrl_ctx, i)) + if (EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_add_ep_to_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif return -ENOMEM; } @@ -2915,6 +3036,10 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); trace_xhci_configure_endpoint(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, command, @@ -3021,13 +3146,22 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Don't issue the command if there's no endpoints to update. */ if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && ctrl_ctx->drop_flags == 0) { ret = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif goto command_cleanup; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); for (i = 31; i >= 1; i--) { @@ -3035,10 +3169,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) || (ctrl_ctx->add_flags & le32) || i == 1) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif break; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } ret = xhci_configure_endpoint(xhci, udev, command, @@ -3051,9 +3194,16 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) for (i = 1; i < 31; i++) { if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_free_endpoint_ring(xhci, virt_dev, i); xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } xhci_zero_in_ctx(xhci, virt_dev); /* @@ -3117,6 +3267,9 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); xhci_slot_copy(xhci, in_ctx, out_ctx); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } static void xhci_endpoint_disable(struct usb_hcd *hcd, @@ -3802,10 +3955,17 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, /* If device is not setup, there is no point in resetting it */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == - SLOT_STATE_DISABLED) + SLOT_STATE_DISABLED) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return 0; + } trace_xhci_discover_or_reset_device(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); /* Allocate the command structure that holds the struct completion. @@ -3942,6 +4102,9 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) virt_dev = xhci->devs[udev->slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); trace_xhci_free_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Stop any wayward timer functions (which may grab the lock) */ for (i = 0; i < 31; i++) { @@ -4077,6 +4240,9 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) vdev = xhci->devs[slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_alloc_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif udev->slot_id = slot_id; @@ -4153,9 +4319,16 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (setup == SETUP_CONTEXT_ONLY) { if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == SLOT_STATE_DEFAULT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Slot already in default state\n"); goto out; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } command = xhci_alloc_command(xhci, true, GFP_KERNEL); @@ -4179,18 +4352,35 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, * virt_device realloaction after a resume with an xHCI power loss, * then set up the slot context. */ - if (!slot_ctx->dev_info) + if (!slot_ctx->dev_info) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_setup_addressable_virt_dev(xhci, udev); /* Otherwise, update the control endpoint ring enqueue pointer. */ - else + } else { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); + } ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif trace_xhci_address_ctx(xhci, virt_dev->in_ctx, le32_to_cpu(slot_ctx->dev_info) >> 27); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif trace_xhci_address_ctrl_ctx(ctrl_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif + spin_lock_irqsave(&xhci->lock, flags); trace_xhci_setup_device(virt_dev); ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, @@ -4261,6 +4451,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, &xhci->dcbaa->dev_context_ptrs[udev->slot_id], (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, sizeof(*xhci->dcbaa)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Output Context DMA address = %#08llx", (unsigned long long)virt_dev->out_ctx->dma); @@ -4272,9 +4465,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, */ trace_xhci_address_ctx(xhci, virt_dev->out_ctx, le32_to_cpu(slot_ctx->dev_info) >> 27); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Zero the input context control for later use */ ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); @@ -4282,6 +4481,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, "Internal device address = %d", le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); out: +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif mutex_unlock(&xhci->mutex); if (command) { kfree(command->completion); @@ -4357,10 +4559,16 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, spin_unlock_irqrestore(&xhci->lock, flags); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); slot_ctx->dev_state = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, "Set up evaluate context for LPM MEL change."); @@ -5118,6 +5326,9 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); /* @@ -5154,6 +5365,9 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, (unsigned int) xhci->hci_version); } slot_ctx->dev_state = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Set up %s for hub device.\n", diff --git a/include/linux/usb.h b/include/linux/usb.h index eaae24217e8a2..4cc4e39a3ac9f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1608,6 +1608,20 @@ struct urb { /* (in) ISO ONLY */ }; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA +#include +static inline void cdns_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + (start & 63)); +} + +static inline void cdns_virt_flush_dcache(void *virt_start, unsigned long len) +{ + unsigned long start = dw_virt_to_phys(virt_start); + + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + (start & 63)); +} +#endif /* ----------------------------------------------------------------------- */ /** @@ -1640,6 +1654,9 @@ static inline void usb_fill_control_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif } /** @@ -1669,6 +1686,9 @@ static inline void usb_fill_bulk_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif } /** @@ -1712,6 +1732,9 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); From cd9bd8121c279a01eda439e3287054af5dc6268e Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Fri, 21 May 2021 17:32:06 +0900 Subject: [PATCH 26/54] drivers/usb: Don't flush NULL values When plugging in USB storage we get many warning reports such as: L2CACHE: flush64 out of range: 2080200000(20000), skip flush L2CACHE: flush64 out of range: 2080200000(20000), skip flush Adding some debug statements points to the following stack trace: Call Trace: [] sifive_l2_flush64_range+0x8e/0xaa [] uas_alloc_data_urb.constprop.0+0x72/0xb2 [] uas_submit_urbs+0x142/0x3ce [] uas_queuecommand+0xe2/0x1f6 [] scsi_queue_rq+0x2f2/0x7de [] blk_mq_dispatch_rq_list+0xd8/0x6fa [] __blk_mq_sched_dispatch_requests+0xd4/0x146 [] blk_mq_sched_dispatch_requests+0x2c/0x56 [] __blk_mq_run_hw_queue+0x4c/0x74 [] __blk_mq_delay_run_hw_queue+0x188/0x18e [] blk_mq_run_hw_queue+0x6c/0xa8 ... The issue is that uas_alloc_data_urb() calls usb_fill_bulk_urb() with a NULL buffer pointer and the dcache flush code tries to flush it causing the above warnings. This patch adds a check to ignore flush requests for NULL and 0 values. Fixes: 1ab9c4c0f8e3 ("drivers/usb: Add dcache flush(VIC7100 ONLY)") Signed-off-by: Stafford Horne --- include/linux/usb.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/linux/usb.h b/include/linux/usb.h index 4cc4e39a3ac9f..6cbb23aff7adb 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1617,9 +1617,8 @@ static inline void cdns_flush_dcache(unsigned long start, unsigned long len) static inline void cdns_virt_flush_dcache(void *virt_start, unsigned long len) { - unsigned long start = dw_virt_to_phys(virt_start); - - starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + (start & 63)); + if (virt_start) + cdns_flush_dcache(dw_virt_to_phys(virt_start), len); } #endif /* ----------------------------------------------------------------------- */ From 98818244cfe1544c094ca396260ac0ee1614db33 Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:59:40 +0800 Subject: [PATCH 27/54] riscv/starfive: Add VIC7100 support --- arch/riscv/Kconfig.socs | 50 ++++++++++++++++++++++++++++++++++ include/soc/starfive/vic7100.h | 36 ++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 include/soc/starfive/vic7100.h diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index ed963761fbd2f..81cd35b02d555 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -18,6 +18,56 @@ config SOC_SIFIVE help This enables support for SiFive SoC platform hardware. +config SOC_STARFIVE_VIC7100 + bool "StarFive VIC7100 SoC" + select SOC_SIFIVE + select OF_RESERVED_MEM + select SIFIVE_L2 + select SIFIVE_L2_FLUSH + select DW_AXI_DMAC_STARFIVE + select GPIO_STARFIVE_VIC + select HW_RANDOM_STARFIVE_VIC + help + This enables support for StarFive VIC7100 SoC Platform Hardware. + +menu "StarFive VIC7100 SoC Debug Option" + depends on SOC_STARFIVE_VIC7100 + +config FPGA_GMAC_FLUSH_DDR + bool "VIC7100 SOC GMAC description and packet buffer flush" + depends on SOC_STARFIVE_VIC7100 + depends on STMMAC_ETH + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 GMAC description and packet buffer flush + +config MMC_DW_FLUSH_DDR + bool "VIC7100 SOC DW MMC buffer flush" + depends on SOC_STARFIVE_VIC7100 + depends on MMC_DW + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW MMC description and data buffer flush + +config USB_CDNS3_HOST_FLUSH_DMA + bool "Cadence USB3 host controller flush dma memery" + depends on USB + depends on USB_CDNS3 + depends on SOC_STARFIVE_VIC7100 + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW USB CDNS3 driver data buffer flush + +config SOC_STARFIVE_VIC7100_I2C_GPIO + bool "VIC7100 SOC I2C GPIO init" + depends on I2C_DESIGNWARE_CORE + depends on SOC_STARFIVE_VIC7100 + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW I2C GPIO init in Synopsys DesignWare master driver + +endmenu + config SOC_VIRT bool "QEMU Virt Machine" select CLINT_TIMER if RISCV_M_MODE diff --git a/include/soc/starfive/vic7100.h b/include/soc/starfive/vic7100.h new file mode 100644 index 0000000000000..a850f4cd11bf7 --- /dev/null +++ b/include/soc/starfive/vic7100.h @@ -0,0 +1,36 @@ +#ifndef STARFIVE_VIC7100_H +#define STARFIVE_VIC7100_H +#include +#include + +/*cache.c*/ +#define starfive_flush_dcache(start, len) \ + sifive_l2_flush64_range(start, len) + +/*dma*/ +#define CONFIG_DW_DEBUG + +#define DMA_PRINTK(fmt,...) \ + printk("[DW_DMA] %s():%d \n" fmt, __func__, __LINE__, ##__VA_ARGS__) + +#ifdef CONFIG_DW_DEBUG +#define DMA_DEBUG(fmt,...) \ + printk("[DW_DMA_DEBUG] %s():%d \n" fmt, __func__, __LINE__, ##__VA_ARGS__) +#else +#define DMA_BEBUG(fmt,...) +#endif + +#define _dw_virt_to_phys(vaddr) (pfn_to_phys(virt_to_pfn(vaddr))) +#define _dw_phys_to_virt(paddr) (page_to_virt(phys_to_page(paddr))) + +void *dw_phys_to_virt(u64 phys); +u64 dw_virt_to_phys(void *vaddr); + +int dw_dma_async_do_memcpy(void *src, void *dst, size_t size); +int dw_dma_memcpy_raw(dma_addr_t src_dma, dma_addr_t dst_dma, size_t size); +int dw_dma_memcpy(void *src, void *dst, size_t size); + +int dw_dma_mem2mem_arry(void); +int dw_dma_mem2mem_test(void); + +#endif /*STARFIVE_VIC7100_H*/ \ No newline at end of file From e7a711edca60ea5d69342f79f60b9df81b11275c Mon Sep 17 00:00:00 2001 From: "jack.zhu" Date: Mon, 11 Jan 2021 04:06:22 +0800 Subject: [PATCH 28/54] drivers/video/fbdev and drivers/media/platform: starfive drivers added 1, add ov5640&sc2235 drivers, update stf_isp 2, add MIPI/CSI/DSI drivers for VIC7100 --- drivers/media/platform/Kconfig | 1 + drivers/media/platform/Makefile | 2 + drivers/media/platform/starfive/Kconfig | 32 + drivers/media/platform/starfive/Makefile | 10 + drivers/media/platform/starfive/imx219_mipi.c | 425 +++ drivers/media/platform/starfive/ov5640_dvp.c | 456 ++++ drivers/media/platform/starfive/sc2235.c | 424 +++ drivers/media/platform/starfive/stf_csi.c | 210 ++ drivers/media/platform/starfive/stf_csi.h | 135 + drivers/media/platform/starfive/stf_event.c | 39 + drivers/media/platform/starfive/stf_isp.c | 441 ++++ drivers/media/platform/starfive/stf_isp.h | 16 + drivers/media/platform/starfive/stf_vin.c | 935 +++++++ drivers/video/fbdev/Kconfig | 10 + drivers/video/fbdev/Makefile | 1 + drivers/video/fbdev/starfive/Kconfig | 35 + drivers/video/fbdev/starfive/Makefile | 11 + drivers/video/fbdev/starfive/adv7513.c | 268 ++ drivers/video/fbdev/starfive/adv7513.h | 22 + drivers/video/fbdev/starfive/seeed5inch.c | 242 ++ .../video/fbdev/starfive/starfive_comm_regs.h | 95 + .../fbdev/starfive/starfive_display_dev.c | 135 + .../fbdev/starfive/starfive_display_dev.h | 273 ++ .../video/fbdev/starfive/starfive_displayer.c | 912 +++++++ drivers/video/fbdev/starfive/starfive_fb.c | 1245 +++++++++ drivers/video/fbdev/starfive/starfive_fb.h | 138 + drivers/video/fbdev/starfive/starfive_lcdc.c | 364 +++ drivers/video/fbdev/starfive/starfive_lcdc.h | 152 ++ .../video/fbdev/starfive/starfive_mipi_tx.c | 665 +++++ .../video/fbdev/starfive/starfive_mipi_tx.h | 203 ++ drivers/video/fbdev/starfive/starfive_vpp.c | 588 +++++ drivers/video/fbdev/starfive/starfive_vpp.h | 194 ++ drivers/video/fbdev/starfive/tda998x.c | 2279 +++++++++++++++++ include/dt-bindings/starfive_fb.h | 47 + include/video/stf-vin.h | 307 +++ 35 files changed, 11312 insertions(+) mode change 100644 => 100755 drivers/media/platform/Kconfig create mode 100644 drivers/media/platform/starfive/Kconfig create mode 100644 drivers/media/platform/starfive/Makefile create mode 100644 drivers/media/platform/starfive/imx219_mipi.c create mode 100755 drivers/media/platform/starfive/ov5640_dvp.c create mode 100755 drivers/media/platform/starfive/sc2235.c create mode 100644 drivers/media/platform/starfive/stf_csi.c create mode 100644 drivers/media/platform/starfive/stf_csi.h create mode 100644 drivers/media/platform/starfive/stf_event.c create mode 100644 drivers/media/platform/starfive/stf_isp.c create mode 100644 drivers/media/platform/starfive/stf_isp.h create mode 100644 drivers/media/platform/starfive/stf_vin.c mode change 100644 => 100755 drivers/video/fbdev/Kconfig mode change 100644 => 100755 drivers/video/fbdev/Makefile create mode 100644 drivers/video/fbdev/starfive/Kconfig create mode 100755 drivers/video/fbdev/starfive/Makefile create mode 100644 drivers/video/fbdev/starfive/adv7513.c create mode 100644 drivers/video/fbdev/starfive/adv7513.h create mode 100644 drivers/video/fbdev/starfive/seeed5inch.c create mode 100644 drivers/video/fbdev/starfive/starfive_comm_regs.h create mode 100644 drivers/video/fbdev/starfive/starfive_display_dev.c create mode 100644 drivers/video/fbdev/starfive/starfive_display_dev.h create mode 100644 drivers/video/fbdev/starfive/starfive_displayer.c create mode 100644 drivers/video/fbdev/starfive/starfive_fb.c create mode 100644 drivers/video/fbdev/starfive/starfive_fb.h create mode 100644 drivers/video/fbdev/starfive/starfive_lcdc.c create mode 100644 drivers/video/fbdev/starfive/starfive_lcdc.h create mode 100644 drivers/video/fbdev/starfive/starfive_mipi_tx.c create mode 100644 drivers/video/fbdev/starfive/starfive_mipi_tx.h create mode 100644 drivers/video/fbdev/starfive/starfive_vpp.c create mode 100644 drivers/video/fbdev/starfive/starfive_vpp.h create mode 100755 drivers/video/fbdev/starfive/tda998x.c create mode 100755 include/dt-bindings/starfive_fb.h create mode 100755 include/video/stf-vin.h diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig old mode 100644 new mode 100755 index 157c924686e4b..3cd87484d8abd --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -171,6 +171,7 @@ source "drivers/media/platform/xilinx/Kconfig" source "drivers/media/platform/rcar-vin/Kconfig" source "drivers/media/platform/atmel/Kconfig" source "drivers/media/platform/sunxi/Kconfig" +source "drivers/media/platform/starfive/Kconfig" config VIDEO_TI_CAL tristate "TI CAL (Camera Adaptation Layer) driver" diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index eedc14aafb32c..23141e53e53ef 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -43,6 +43,8 @@ obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/ obj-y += stm32/ +obj-y += starfive/ + obj-y += davinci/ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o diff --git a/drivers/media/platform/starfive/Kconfig b/drivers/media/platform/starfive/Kconfig new file mode 100644 index 0000000000000..a79d7e1802d85 --- /dev/null +++ b/drivers/media/platform/starfive/Kconfig @@ -0,0 +1,32 @@ +# +# VIN sensor driver configuration +# +config VIDEO_STARFIVE_VIN + bool "starfive VIC video input support" + depends on OF + help + To compile this driver as a module, choose M here: the module + will be called stf-vin. + +choice + prompt "Image Sensor for VIC board" + default VIDEO_STARFIVE_VIN_SENSOR_IMX219 + depends on VIDEO_STARFIVE_VIN + optional + +config VIDEO_STARFIVE_VIN_SENSOR_OV5640 + bool "OmniVision OV5640 5mp MIPI Camera Module" + help + Say Y here if you want to have support for VIN sensor OV5640 + +config VIDEO_STARFIVE_VIN_SENSOR_IMX219 + bool "Sony IMX219 CMOS Image Sensor" + help + Say Y here if you want to have support for VIN sensor IMX219 + +config VIDEO_STARFIVE_VIN_SENSOR_SC2235 + bool "SmartSens Technology SC2235 CMOS Image Sensor" + help + Say Y here if you want to have support for VIN sensor SC2235 + +endchoice diff --git a/drivers/media/platform/starfive/Makefile b/drivers/media/platform/starfive/Makefile new file mode 100644 index 0000000000000..4585213935e65 --- /dev/null +++ b/drivers/media/platform/starfive/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for RTC class/drivers. +# + +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_OV5640) += ov5640_dvp.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_SC2235) += sc2235.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_IMX219) += imx219_mipi.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN) += video_stf_vin.o +video_stf_vin-objs += stf_vin.o stf_event.o stf_isp.o stf_csi.o diff --git a/drivers/media/platform/starfive/imx219_mipi.c b/drivers/media/platform/starfive/imx219_mipi.c new file mode 100644 index 0000000000000..2bbc2abefbd40 --- /dev/null +++ b/drivers/media/platform/starfive/imx219_mipi.c @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2011-2013 StarFive Technology Co., Ltd. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include