Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/parisc/include/asm/futex.h
26298 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_PARISC_FUTEX_H
3
#define _ASM_PARISC_FUTEX_H
4
5
#include <linux/futex.h>
6
#include <linux/uaccess.h>
7
#include <asm/atomic.h>
8
#include <asm/errno.h>
9
10
/* The following has to match the LWS code in syscall.S. We have
11
* 256 four-word locks. We use bits 20-27 of the futex virtual
12
* address for the hash index.
13
*/
14
15
static inline unsigned long _futex_hash_index(unsigned long ua)
16
{
17
return (ua >> 2) & 0x3fc;
18
}
19
20
static inline void
21
_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
22
{
23
local_irq_save(*flags);
24
arch_spin_lock(s);
25
}
26
27
static inline void
28
_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
29
{
30
arch_spin_unlock(s);
31
local_irq_restore(*flags);
32
}
33
34
static inline int
35
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
36
{
37
extern u32 lws_lock_start[];
38
unsigned long ua = (unsigned long)uaddr;
39
arch_spinlock_t *s;
40
unsigned long flags;
41
int oldval, ret;
42
u32 tmp;
43
44
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
45
_futex_spin_lock_irqsave(s, &flags);
46
47
/* Return -EFAULT if we encounter a page fault or COW break */
48
if (unlikely(get_user(oldval, uaddr) != 0)) {
49
ret = -EFAULT;
50
goto out_pagefault_enable;
51
}
52
53
ret = 0;
54
tmp = oldval;
55
56
switch (op) {
57
case FUTEX_OP_SET:
58
tmp = oparg;
59
break;
60
case FUTEX_OP_ADD:
61
tmp += oparg;
62
break;
63
case FUTEX_OP_OR:
64
tmp |= oparg;
65
break;
66
case FUTEX_OP_ANDN:
67
tmp &= ~oparg;
68
break;
69
case FUTEX_OP_XOR:
70
tmp ^= oparg;
71
break;
72
default:
73
ret = -ENOSYS;
74
goto out_pagefault_enable;
75
}
76
77
if (unlikely(put_user(tmp, uaddr) != 0))
78
ret = -EFAULT;
79
80
out_pagefault_enable:
81
_futex_spin_unlock_irqrestore(s, &flags);
82
83
if (!ret)
84
*oval = oldval;
85
86
return ret;
87
}
88
89
static inline int
90
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
91
u32 oldval, u32 newval)
92
{
93
extern u32 lws_lock_start[];
94
unsigned long ua = (unsigned long)uaddr;
95
arch_spinlock_t *s;
96
u32 val;
97
unsigned long flags;
98
99
if (!access_ok(uaddr, sizeof(u32)))
100
return -EFAULT;
101
102
/* HPPA has no cmpxchg in hardware and therefore the
103
* best we can do here is use an array of locks. The
104
* lock selected is based on a hash of the virtual
105
* address of the futex. This should scale to a couple
106
* of CPUs.
107
*/
108
109
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
110
_futex_spin_lock_irqsave(s, &flags);
111
if (unlikely(get_user(val, uaddr) != 0)) {
112
_futex_spin_unlock_irqrestore(s, &flags);
113
return -EFAULT;
114
}
115
116
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
117
_futex_spin_unlock_irqrestore(s, &flags);
118
return -EFAULT;
119
}
120
121
*uval = val;
122
_futex_spin_unlock_irqrestore(s, &flags);
123
124
return 0;
125
}
126
127
#endif /*_ASM_PARISC_FUTEX_H*/
128
129