linux/arch/openrisc/kernel/dma.c
Sahil Siddiq 0c4a6e79ef openrisc: Introduce new utility functions to flush and invalidate caches
According to the OpenRISC architecture manual, the dcache and icache may
not be present. When these caches are present, the invalidate and flush
registers may be absent. The current implementation does not perform
checks to verify their presence before utilizing cache registers, or
invalidating and flushing cache blocks.

Introduce new functions to detect the presence of cache components and
related special-purpose registers.

There are a few places where a range of addresses have to be flushed or
invalidated and the implementation is duplicated. Introduce new utility
functions and macros that generalize this implementation and reduce
duplication.

Signed-off-by: Sahil Siddiq <sahilcdq0@gmail.com>
Signed-off-by: Stafford Horne <shorne@gmail.com>
2025-04-20 07:06:54 +01:00

115 lines
2.8 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* DMA mapping callbacks...
*/
#include <linux/dma-map-ops.h>
#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
#include <asm/cacheflush.h>
#include <asm/spr_defs.h>
#include <asm/tlbflush.h>
static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_val(*pte) |= _PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* Flush page out of dcache */
local_dcache_range_flush(__pa(addr), __pa(next));
return 0;
}
static const struct mm_walk_ops set_nocache_walk_ops = {
.pte_entry = page_set_nocache,
};
static int
page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_val(*pte) &= ~_PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
return 0;
}
static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache,
};
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
int error;
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
mmap_write_lock(&init_mm);
error = walk_page_range_novma(&init_mm, va, va + size,
&set_nocache_walk_ops, NULL, NULL);
mmap_write_unlock(&init_mm);
if (error)
return ERR_PTR(error);
return cpu_addr;
}
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
mmap_write_lock(&init_mm);
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL, NULL));
mmap_write_unlock(&init_mm);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
local_dcache_range_flush(addr, addr + size);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
local_dcache_range_inv(addr, addr + size);
break;
default:
/*
* NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
* flush nor invalidate the cache here as the area will need
* to be manually synced anyway.
*/
break;
}
}