Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
15126 views
1
/*
2
* Blackfin CPLB exception handling for when MPU in on
3
*
4
* Copyright 2008-2009 Analog Devices Inc.
5
*
6
* Licensed under the GPL-2 or later.
7
*/
8
9
#include <linux/module.h>
10
#include <linux/mm.h>
11
12
#include <asm/blackfin.h>
13
#include <asm/cacheflush.h>
14
#include <asm/cplb.h>
15
#include <asm/cplbinit.h>
16
#include <asm/mmu_context.h>
17
18
/*
19
* WARNING
20
*
21
* This file is compiled with certain -ffixed-reg options. We have to
22
* make sure not to call any functions here that could clobber these
23
* registers.
24
*/
25
26
int page_mask_nelts;
27
int page_mask_order;
28
unsigned long *current_rwx_mask[NR_CPUS];
29
30
int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32
int nr_cplb_flush[NR_CPUS];
33
34
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35
#define MGR_ATTR __attribute__((l1_text))
36
#else
37
#define MGR_ATTR
38
#endif
39
40
/*
41
* Given the contents of the status register, return the index of the
42
* CPLB that caused the fault.
43
*/
44
static inline int faulting_cplb_index(int status)
45
{
46
int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
47
return 30 - signbits;
48
}
49
50
/*
51
* Given the contents of the status register and the DCPLB_DATA contents,
52
* return true if a write access should be permitted.
53
*/
54
static inline int write_permitted(int status, unsigned long data)
55
{
56
if (status & FAULT_USERSUPV)
57
return !!(data & CPLB_SUPV_WR);
58
else
59
return !!(data & CPLB_USER_WR);
60
}
61
62
/* Counters to implement round-robin replacement. */
63
static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
64
65
/*
66
* Find an ICPLB entry to be evicted and return its index.
67
*/
68
MGR_ATTR static int evict_one_icplb(unsigned int cpu)
69
{
70
int i;
71
for (i = first_switched_icplb; i < MAX_CPLBS; i++)
72
if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
73
return i;
74
i = first_switched_icplb + icplb_rr_index[cpu];
75
if (i >= MAX_CPLBS) {
76
i -= MAX_CPLBS - first_switched_icplb;
77
icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
78
}
79
icplb_rr_index[cpu]++;
80
return i;
81
}
82
83
MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
84
{
85
int i;
86
for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
87
if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
88
return i;
89
i = first_switched_dcplb + dcplb_rr_index[cpu];
90
if (i >= MAX_CPLBS) {
91
i -= MAX_CPLBS - first_switched_dcplb;
92
dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
93
}
94
dcplb_rr_index[cpu]++;
95
return i;
96
}
97
98
MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
99
{
100
unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
101
int status = bfin_read_DCPLB_STATUS();
102
unsigned long *mask;
103
int idx;
104
unsigned long d_data;
105
106
nr_dcplb_miss[cpu]++;
107
108
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
109
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
110
if (bfin_addr_dcacheable(addr)) {
111
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
112
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
113
d_data |= CPLB_L1_AOW | CPLB_WT;
114
# endif
115
}
116
#endif
117
118
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
119
addr = L2_START;
120
d_data = L2_DMEMORY;
121
} else if (addr >= physical_mem_end) {
122
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
123
mask = current_rwx_mask[cpu];
124
if (mask) {
125
int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
126
int idx = page >> 5;
127
int bit = 1 << (page & 31);
128
129
if (mask[idx] & bit)
130
d_data |= CPLB_USER_RD;
131
}
132
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
133
&& (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
134
addr &= ~(1 * 1024 * 1024 - 1);
135
d_data &= ~PAGE_SIZE_4KB;
136
d_data |= PAGE_SIZE_1MB;
137
} else
138
return CPLB_PROT_VIOL;
139
} else if (addr >= _ramend) {
140
d_data |= CPLB_USER_RD | CPLB_USER_WR;
141
if (reserved_mem_dcache_on)
142
d_data |= CPLB_L1_CHBL;
143
} else {
144
mask = current_rwx_mask[cpu];
145
if (mask) {
146
int page = addr >> PAGE_SHIFT;
147
int idx = page >> 5;
148
int bit = 1 << (page & 31);
149
150
if (mask[idx] & bit)
151
d_data |= CPLB_USER_RD;
152
153
mask += page_mask_nelts;
154
if (mask[idx] & bit)
155
d_data |= CPLB_USER_WR;
156
}
157
}
158
idx = evict_one_dcplb(cpu);
159
160
addr &= PAGE_MASK;
161
dcplb_tbl[cpu][idx].addr = addr;
162
dcplb_tbl[cpu][idx].data = d_data;
163
164
_disable_dcplb();
165
bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
166
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
167
_enable_dcplb();
168
169
return 0;
170
}
171
172
MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
173
{
174
unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
175
int status = bfin_read_ICPLB_STATUS();
176
int idx;
177
unsigned long i_data;
178
179
nr_icplb_miss[cpu]++;
180
181
/* If inside the uncached DMA region, fault. */
182
if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
183
return CPLB_PROT_VIOL;
184
185
if (status & FAULT_USERSUPV)
186
nr_icplb_supv_miss[cpu]++;
187
188
/*
189
* First, try to find a CPLB that matches this address. If we
190
* find one, then the fact that we're in the miss handler means
191
* that the instruction crosses a page boundary.
192
*/
193
for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
194
if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
195
unsigned long this_addr = icplb_tbl[cpu][idx].addr;
196
if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
197
addr += PAGE_SIZE;
198
break;
199
}
200
}
201
}
202
203
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
204
205
#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
206
/*
207
* Normal RAM, and possibly the reserved memory area, are
208
* cacheable.
209
*/
210
if (addr < _ramend ||
211
(addr < physical_mem_end && reserved_mem_icache_on))
212
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
213
#endif
214
215
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
216
addr = L2_START;
217
i_data = L2_IMEMORY;
218
} else if (addr >= physical_mem_end) {
219
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
220
if (!(status & FAULT_USERSUPV)) {
221
unsigned long *mask = current_rwx_mask[cpu];
222
223
if (mask) {
224
int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
225
int idx = page >> 5;
226
int bit = 1 << (page & 31);
227
228
mask += 2 * page_mask_nelts;
229
if (mask[idx] & bit)
230
i_data |= CPLB_USER_RD;
231
}
232
}
233
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
234
&& (status & FAULT_USERSUPV)) {
235
addr &= ~(1 * 1024 * 1024 - 1);
236
i_data &= ~PAGE_SIZE_4KB;
237
i_data |= PAGE_SIZE_1MB;
238
} else
239
return CPLB_PROT_VIOL;
240
} else if (addr >= _ramend) {
241
i_data |= CPLB_USER_RD;
242
if (reserved_mem_icache_on)
243
i_data |= CPLB_L1_CHBL;
244
} else {
245
/*
246
* Two cases to distinguish - a supervisor access must
247
* necessarily be for a module page; we grant it
248
* unconditionally (could do better here in the future).
249
* Otherwise, check the x bitmap of the current process.
250
*/
251
if (!(status & FAULT_USERSUPV)) {
252
unsigned long *mask = current_rwx_mask[cpu];
253
254
if (mask) {
255
int page = addr >> PAGE_SHIFT;
256
int idx = page >> 5;
257
int bit = 1 << (page & 31);
258
259
mask += 2 * page_mask_nelts;
260
if (mask[idx] & bit)
261
i_data |= CPLB_USER_RD;
262
}
263
}
264
}
265
idx = evict_one_icplb(cpu);
266
addr &= PAGE_MASK;
267
icplb_tbl[cpu][idx].addr = addr;
268
icplb_tbl[cpu][idx].data = i_data;
269
270
_disable_icplb();
271
bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
272
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
273
_enable_icplb();
274
275
return 0;
276
}
277
278
MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
279
{
280
int status = bfin_read_DCPLB_STATUS();
281
282
nr_dcplb_prot[cpu]++;
283
284
if (status & FAULT_RW) {
285
int idx = faulting_cplb_index(status);
286
unsigned long data = dcplb_tbl[cpu][idx].data;
287
if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
288
write_permitted(status, data)) {
289
data |= CPLB_DIRTY;
290
dcplb_tbl[cpu][idx].data = data;
291
bfin_write32(DCPLB_DATA0 + idx * 4, data);
292
return 0;
293
}
294
}
295
return CPLB_PROT_VIOL;
296
}
297
298
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
299
{
300
int cause = seqstat & 0x3f;
301
unsigned int cpu = raw_smp_processor_id();
302
switch (cause) {
303
case 0x23:
304
return dcplb_protection_fault(cpu);
305
case 0x2C:
306
return icplb_miss(cpu);
307
case 0x26:
308
return dcplb_miss(cpu);
309
default:
310
return 1;
311
}
312
}
313
314
void flush_switched_cplbs(unsigned int cpu)
315
{
316
int i;
317
unsigned long flags;
318
319
nr_cplb_flush[cpu]++;
320
321
flags = hard_local_irq_save();
322
_disable_icplb();
323
for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
324
icplb_tbl[cpu][i].data = 0;
325
bfin_write32(ICPLB_DATA0 + i * 4, 0);
326
}
327
_enable_icplb();
328
329
_disable_dcplb();
330
for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
331
dcplb_tbl[cpu][i].data = 0;
332
bfin_write32(DCPLB_DATA0 + i * 4, 0);
333
}
334
_enable_dcplb();
335
hard_local_irq_restore(flags);
336
337
}
338
339
void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
340
{
341
int i;
342
unsigned long addr = (unsigned long)masks;
343
unsigned long d_data;
344
unsigned long flags;
345
346
if (!masks) {
347
current_rwx_mask[cpu] = masks;
348
return;
349
}
350
351
flags = hard_local_irq_save();
352
current_rwx_mask[cpu] = masks;
353
354
if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
355
addr = L2_START;
356
d_data = L2_DMEMORY;
357
} else {
358
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
359
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
360
d_data |= CPLB_L1_CHBL;
361
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
362
d_data |= CPLB_L1_AOW | CPLB_WT;
363
# endif
364
#endif
365
}
366
367
_disable_dcplb();
368
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
369
dcplb_tbl[cpu][i].addr = addr;
370
dcplb_tbl[cpu][i].data = d_data;
371
bfin_write32(DCPLB_DATA0 + i * 4, d_data);
372
bfin_write32(DCPLB_ADDR0 + i * 4, addr);
373
addr += PAGE_SIZE;
374
}
375
_enable_dcplb();
376
hard_local_irq_restore(flags);
377
}
378
379