/* SPDX-License-Identifier: GPL-2.0 */1/*2* arch/arm/include/asm/cache.h3*/4#ifndef __ASMARM_CACHE_H5#define __ASMARM_CACHE_H67#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)910/*11* Memory returned by kmalloc() may be used for DMA, so we must make12* sure that all such allocations are cache aligned. Otherwise,13* unrelated code may cause parts of the buffer to be read into the14* cache before the transfer is done, causing old data to be seen by15* the CPU.16*/17#define ARCH_DMA_MINALIGN L1_CACHE_BYTES1819/*20* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.21*/22#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)23#define ARCH_SLAB_MINALIGN 824#endif2526#define __read_mostly __section(".data..read_mostly")2728#ifndef __ASSEMBLY__29#ifdef CONFIG_ARCH_HAS_CACHE_LINE_SIZE30int cache_line_size(void);31#endif32#endif3334#endif353637