#include <linux/dma-map-ops.h>
#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
#include <asm/cacheflush.h>
#include <asm/spr_defs.h>
#include <asm/tlbflush.h>
static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_val(*pte) |= _PAGE_CI;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
local_dcache_range_flush(__pa(addr), __pa(next));
return 0;
}
static const struct mm_walk_ops set_nocache_walk_ops = {
.pte_entry = page_set_nocache,
};
static int
page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_val(*pte) &= ~_PAGE_CI;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
return 0;
}
static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache,
};
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
int error;
mmap_write_lock(&init_mm);
error = walk_kernel_page_table_range(va, va + size,
&set_nocache_walk_ops, NULL, NULL);
mmap_write_unlock(&init_mm);
if (error)
return ERR_PTR(error);
return cpu_addr;
}
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
mmap_write_lock(&init_mm);
WARN_ON(walk_kernel_page_table_range(va, va + size,
&clear_nocache_walk_ops, NULL, NULL));
mmap_write_unlock(&init_mm);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
local_dcache_range_flush(addr, addr + size);
break;
case DMA_FROM_DEVICE:
local_dcache_range_inv(addr, addr + size);
break;
default:
break;
}
}