Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | /*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-noncoherent.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
/*
* ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
* - hardware IOC not available (or "dma-coherent" not set for device in DT)
* - But still handle both coherent and non-coherent requests from caller
*
* For DMA coherent hardware (IOC) generic code suffices
*/
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
phys_addr_t paddr;
void *kvaddr;
bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
/*
* __GFP_HIGHMEM flag is cleared by upper layer functions
* (in include/linux/dma-mapping.h) so we should never get a
* __GFP_HIGHMEM here.
*/
BUG_ON(gfp & __GFP_HIGHMEM);
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
*dma_handle = paddr;
/*
* A coherent buffer needs MMU mapping to enforce non-cachability.
* kvaddr is kernel Virtual address (0x7000_0000 based).
*/
if (need_coh) {
kvaddr = ioremap_nocache(paddr, size);
if (kvaddr == NULL) {
__free_pages(page, order);
return NULL;
}
} else {
kvaddr = (void *)(u32)paddr;
}
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap(), it gets kvaddr hence
* can't be used to efficiently flush L1 and/or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
if (need_coh)
dma_cache_wback_inv(paddr, size);
return kvaddr;
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr);
if (!(attrs & DMA_ATTR_NON_CONSISTENT))
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size));
}
int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = __phys_to_pfn(dma_addr);
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
/*
* Cache operations depending on function and direction argument, inspired by
* https://lkml.org/lkml/2018/5/18/979
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
* | map == for_device | unmap == for_cpu
* |----------------------------------------------------------------
* TO_DEV | writeback writeback | none none
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
*
* [*] needed for CPU speculative prefetches
*
* NOTE: we don't check the validity of direction argument as it is done in
* upper layer functions (in include/linux/dma-mapping.h)
*/
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv(paddr, size);
break;
default:
break;
}
}
/*
* Plug in coherent or noncoherent dma ops
*/
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
* with memory - eliding need for any explicit cache maintenance of
* DMA buffers - so we can use dma_direct cache ops.
*/
if (is_isa_arcv2() && ioc_enable && coherent) {
set_dma_ops(dev, &dma_direct_ops);
dev_info(dev, "use dma_direct_ops cache ops\n");
} else {
set_dma_ops(dev, &dma_noncoherent_ops);
dev_info(dev, "use dma_noncoherent_ops cache ops\n");
}
}
|