Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/tile/include/asm/bitops_64.h
10818 views
1
/*
2
* Copyright 2011 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*/
14
15
#ifndef _ASM_TILE_BITOPS_64_H
16
#define _ASM_TILE_BITOPS_64_H
17
18
#include <linux/compiler.h>
19
#include <asm/atomic.h>
20
#include <asm/system.h>
21
22
/* See <asm/bitops.h> for API comments. */
23
24
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
25
{
26
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
27
__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
28
}
29
30
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
31
{
32
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
33
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
34
}
35
36
#define smp_mb__before_clear_bit() smp_mb()
37
#define smp_mb__after_clear_bit() smp_mb()
38
39
40
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
41
{
42
unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
43
long guess, oldval;
44
addr += nr / BITS_PER_LONG;
45
old = *addr;
46
do {
47
guess = oldval;
48
oldval = atomic64_cmpxchg((atomic64_t *)addr,
49
guess, guess ^ mask);
50
} while (guess != oldval);
51
}
52
53
54
/*
55
* The test_and_xxx_bit() routines require a memory fence before we
56
* start the operation, and after the operation completes. We use
57
* smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
58
* barrier(), to block until the atomic op is complete.
59
*/
60
61
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
62
{
63
int val;
64
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
65
smp_mb(); /* barrier for proper semantics */
66
val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
67
& mask) != 0;
68
barrier();
69
return val;
70
}
71
72
73
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
74
{
75
int val;
76
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
77
smp_mb(); /* barrier for proper semantics */
78
val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
79
& mask) != 0;
80
barrier();
81
return val;
82
}
83
84
85
static inline int test_and_change_bit(unsigned nr,
86
volatile unsigned long *addr)
87
{
88
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
89
long guess, oldval = *addr;
90
addr += nr / BITS_PER_LONG;
91
oldval = *addr;
92
do {
93
guess = oldval;
94
oldval = atomic64_cmpxchg((atomic64_t *)addr,
95
guess, guess ^ mask);
96
} while (guess != oldval);
97
return (oldval & mask) != 0;
98
}
99
100
#define ext2_set_bit_atomic(lock, nr, addr) \
101
test_and_set_bit((nr), (unsigned long *)(addr))
102
#define ext2_clear_bit_atomic(lock, nr, addr) \
103
test_and_clear_bit((nr), (unsigned long *)(addr))
104
105
#endif /* _ASM_TILE_BITOPS_64_H */
106
107