Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/mm/maccess.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Access kernel memory without faulting -- s390 specific implementation.
4
*
5
* Copyright IBM Corp. 2009, 2015
6
*
7
*/
8
9
#include <linux/uaccess.h>
10
#include <linux/kernel.h>
11
#include <linux/types.h>
12
#include <linux/errno.h>
13
#include <linux/gfp.h>
14
#include <linux/cpu.h>
15
#include <linux/uio.h>
16
#include <linux/io.h>
17
#include <asm/asm-extable.h>
18
#include <asm/abs_lowcore.h>
19
#include <asm/stacktrace.h>
20
#include <asm/sections.h>
21
#include <asm/maccess.h>
22
#include <asm/ctlreg.h>
23
24
unsigned long __bootdata_preserved(__memcpy_real_area);
25
pte_t *__bootdata_preserved(memcpy_real_ptep);
26
static DEFINE_MUTEX(memcpy_real_mutex);
27
28
static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
29
{
30
unsigned long aligned, offset, count;
31
char tmp[8];
32
33
aligned = (unsigned long) dst & ~7UL;
34
offset = (unsigned long) dst & 7UL;
35
size = min(8UL - offset, size);
36
count = size - 1;
37
asm volatile(
38
" bras 1,0f\n"
39
" mvc 0(1,%4),0(%5)\n"
40
"0: mvc 0(8,%3),0(%0)\n"
41
" ex %1,0(1)\n"
42
" lg %1,0(%3)\n"
43
" lra %0,0(%0)\n"
44
" sturg %1,%0\n"
45
: "+&a" (aligned), "+&a" (count), "=m" (tmp)
46
: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
47
: "cc", "memory", "1");
48
return size;
49
}
50
51
/*
52
* __s390_kernel_write - write to kernel memory bypassing DAT
53
* @dst: destination address
54
* @src: source address
55
* @size: number of bytes to copy
56
*
57
* This function writes to kernel memory bypassing DAT and possible page table
58
* write protection. It writes to the destination using the sturg instruction.
59
* Therefore we have a read-modify-write sequence: the function reads eight
60
* bytes from destination at an eight byte boundary, modifies the bytes
61
* requested and writes the result back in a loop.
62
*/
63
static DEFINE_SPINLOCK(s390_kernel_write_lock);
64
65
notrace void *__s390_kernel_write(void *dst, const void *src, size_t size)
66
{
67
void *tmp = dst;
68
unsigned long flags;
69
long copied;
70
71
spin_lock_irqsave(&s390_kernel_write_lock, flags);
72
while (size) {
73
copied = s390_kernel_write_odd(tmp, src, size);
74
tmp += copied;
75
src += copied;
76
size -= copied;
77
}
78
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
79
80
return dst;
81
}
82
83
size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
84
{
85
size_t len, copied, res = 0;
86
unsigned long phys, offset;
87
void *chunk;
88
pte_t pte;
89
90
BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
91
while (count) {
92
phys = src & MEMCPY_REAL_MASK;
93
offset = src & ~MEMCPY_REAL_MASK;
94
chunk = (void *)(__memcpy_real_area + offset);
95
len = min(count, MEMCPY_REAL_SIZE - offset);
96
pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
97
98
mutex_lock(&memcpy_real_mutex);
99
if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
100
__ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
101
set_pte(memcpy_real_ptep, pte);
102
}
103
copied = copy_to_iter(chunk, len, iter);
104
mutex_unlock(&memcpy_real_mutex);
105
106
count -= copied;
107
src += copied;
108
res += copied;
109
if (copied < len)
110
break;
111
}
112
return res;
113
}
114
115
int memcpy_real(void *dest, unsigned long src, size_t count)
116
{
117
struct iov_iter iter;
118
struct kvec kvec;
119
120
kvec.iov_base = dest;
121
kvec.iov_len = count;
122
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
123
if (memcpy_real_iter(&iter, src, count) < count)
124
return -EFAULT;
125
return 0;
126
}
127
128
/*
129
* Find CPU that owns swapped prefix page
130
*/
131
static int get_swapped_owner(phys_addr_t addr)
132
{
133
phys_addr_t lc;
134
int cpu;
135
136
for_each_online_cpu(cpu) {
137
lc = virt_to_phys(lowcore_ptr[cpu]);
138
if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
139
continue;
140
return cpu;
141
}
142
return -1;
143
}
144
145
/*
146
* Convert a physical pointer for /dev/mem access
147
*
148
* For swapped prefix pages a new buffer is returned that contains a copy of
149
* the absolute memory. The buffer size is maximum one page large.
150
*/
151
void *xlate_dev_mem_ptr(phys_addr_t addr)
152
{
153
void *ptr = phys_to_virt(addr);
154
void *bounce = ptr;
155
struct lowcore *abs_lc;
156
unsigned long size;
157
int this_cpu, cpu;
158
159
cpus_read_lock();
160
this_cpu = get_cpu();
161
if (addr >= sizeof(struct lowcore)) {
162
cpu = get_swapped_owner(addr);
163
if (cpu < 0)
164
goto out;
165
}
166
bounce = (void *)__get_free_page(GFP_ATOMIC);
167
if (!bounce)
168
goto out;
169
size = PAGE_SIZE - (addr & ~PAGE_MASK);
170
if (addr < sizeof(struct lowcore)) {
171
abs_lc = get_abs_lowcore();
172
ptr = (void *)abs_lc + addr;
173
memcpy(bounce, ptr, size);
174
put_abs_lowcore(abs_lc);
175
} else if (cpu == this_cpu) {
176
ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
177
memcpy(bounce, ptr, size);
178
} else {
179
memcpy(bounce, ptr, size);
180
}
181
out:
182
put_cpu();
183
cpus_read_unlock();
184
return bounce;
185
}
186
187
/*
188
* Free converted buffer for /dev/mem access (if necessary)
189
*/
190
void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
191
{
192
if (addr != virt_to_phys(ptr))
193
free_page((unsigned long)ptr);
194
}
195
196