Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/xtensa/include/asm/page.h
26442 views
1
/*
2
* include/asm-xtensa/page.h
3
*
4
* This program is free software; you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License version2 as
6
* published by the Free Software Foundation.
7
*
8
* Copyright (C) 2001 - 2007 Tensilica Inc.
9
*/
10
11
#ifndef _XTENSA_PAGE_H
12
#define _XTENSA_PAGE_H
13
14
#include <linux/const.h>
15
16
#include <asm/processor.h>
17
#include <asm/types.h>
18
#include <asm/cache.h>
19
#include <asm/kmem_layout.h>
20
21
#include <vdso/page.h>
22
23
#ifdef CONFIG_MMU
24
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
25
#define PHYS_OFFSET XCHAL_KSEG_PADDR
26
#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
27
PHYS_PFN(XCHAL_KSEG_SIZE))
28
#else
29
#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
30
#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
31
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
32
#endif
33
34
/*
35
* Cache aliasing:
36
*
37
* If the cache size for one way is greater than the page size, we have to
38
* deal with cache aliasing. The cache index is wider than the page size:
39
*
40
* | |cache| cache index
41
* | pfn |off| virtual address
42
* |xxxx:X|zzz|
43
* | : | |
44
* | \ / | |
45
* |trans.| |
46
* | / \ | |
47
* |yyyy:Y|zzz| physical address
48
*
49
* When the page number is translated to the physical page address, the lowest
50
* bit(s) (X) that are part of the cache index are also translated (Y).
51
* If this translation changes bit(s) (X), the cache index is also afected,
52
* thus resulting in a different cache line than before.
53
* The kernel does not provide a mechanism to ensure that the page color
54
* (represented by this bit) remains the same when allocated or when pages
55
* are remapped. When user pages are mapped into kernel space, the color of
56
* the page might also change.
57
*
58
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
59
* to temporarily map a patch so we can match the color.
60
*/
61
62
#if DCACHE_WAY_SIZE > PAGE_SIZE
63
# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
64
# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
65
# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
66
# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
67
#else
68
# define DCACHE_ALIAS_ORDER 0
69
# define DCACHE_ALIAS(a) ((void)(a), 0)
70
#endif
71
#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
72
73
#if ICACHE_WAY_SIZE > PAGE_SIZE
74
# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
75
# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
76
# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
77
# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
78
#else
79
# define ICACHE_ALIAS_ORDER 0
80
#endif
81
82
83
#ifdef __ASSEMBLER__
84
85
#define __pgprot(x) (x)
86
87
#else
88
89
/*
90
* These are used to make use of C type-checking..
91
*/
92
93
typedef struct { unsigned long pte; } pte_t; /* page table entry */
94
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
95
typedef struct { unsigned long pgprot; } pgprot_t;
96
typedef struct page *pgtable_t;
97
98
#define pte_val(x) ((x).pte)
99
#define pgd_val(x) ((x).pgd)
100
#define pgprot_val(x) ((x).pgprot)
101
102
#define __pte(x) ((pte_t) { (x) } )
103
#define __pgd(x) ((pgd_t) { (x) } )
104
#define __pgprot(x) ((pgprot_t) { (x) } )
105
106
# include <asm-generic/getorder.h>
107
108
struct page;
109
struct vm_area_struct;
110
extern void clear_page(void *page);
111
extern void copy_page(void *to, void *from);
112
113
/*
114
* If we have cache aliasing and writeback caches, we might have to do
115
* some extra work
116
*/
117
118
#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
119
extern void clear_page_alias(void *vaddr, unsigned long paddr);
120
extern void copy_page_alias(void *to, void *from,
121
unsigned long to_paddr, unsigned long from_paddr);
122
123
#define clear_user_highpage clear_user_highpage
124
void clear_user_highpage(struct page *page, unsigned long vaddr);
125
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
126
void copy_user_highpage(struct page *to, struct page *from,
127
unsigned long vaddr, struct vm_area_struct *vma);
128
#else
129
# define clear_user_page(page, vaddr, pg) clear_page(page)
130
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
131
#endif
132
133
/*
134
* This handles the memory map. We handle pages at
135
* XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
136
* These macros are for conversion of kernel address, not user
137
* addresses.
138
*/
139
140
#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
141
142
#ifdef CONFIG_MMU
143
static inline unsigned long ___pa(unsigned long va)
144
{
145
unsigned long off = va - PAGE_OFFSET;
146
147
if (off >= XCHAL_KSEG_SIZE)
148
off -= XCHAL_KSEG_SIZE;
149
150
#ifndef CONFIG_XIP_KERNEL
151
return off + PHYS_OFFSET;
152
#else
153
if (off < XCHAL_KSEG_SIZE)
154
return off + PHYS_OFFSET;
155
156
off -= XCHAL_KSEG_SIZE;
157
if (off >= XCHAL_KIO_SIZE)
158
off -= XCHAL_KIO_SIZE;
159
160
return off + XCHAL_KIO_PADDR;
161
#endif
162
}
163
#define __pa(x) ___pa((unsigned long)(x))
164
#else
165
#define __pa(x) \
166
((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
167
#endif
168
#define __va(x) \
169
((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
170
171
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
172
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
173
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
174
175
#endif /* __ASSEMBLER__ */
176
177
#include <asm-generic/memory_model.h>
178
#endif /* _XTENSA_PAGE_H */
179
180