Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mn10300/include/asm/div64.h
15126 views
1
/* MN10300 64-bit division
2
*
3
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4
* Written by David Howells ([email protected])
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public Licence
8
* as published by the Free Software Foundation; either version
9
* 2 of the Licence, or (at your option) any later version.
10
*/
11
#ifndef _ASM_DIV64
12
#define _ASM_DIV64
13
14
#include <linux/types.h>
15
16
extern void ____unhandled_size_in_do_div___(void);
17
18
/*
19
* Beginning with gcc 4.6, the MDR register is represented explicitly. We
20
* must, therefore, at least explicitly clobber the register when we make
21
* changes to it. The following assembly fragments *could* be rearranged in
22
* order to leave the moves to/from the MDR register to the compiler, but the
23
* gains would be minimal at best.
24
*/
25
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
26
# define CLOBBER_MDR_CC "mdr", "cc"
27
#else
28
# define CLOBBER_MDR_CC "cc"
29
#endif
30
31
/*
32
* divide n by base, leaving the result in n and returning the remainder
33
* - we can do this quite efficiently on the MN10300 by cascading the divides
34
* through the MDR register
35
*/
36
#define do_div(n, base) \
37
({ \
38
unsigned __rem = 0; \
39
if (sizeof(n) <= 4) { \
40
asm("mov %1,mdr \n" \
41
"divu %2,%0 \n" \
42
"mov mdr,%1 \n" \
43
: "+r"(n), "=d"(__rem) \
44
: "r"(base), "1"(__rem) \
45
: CLOBBER_MDR_CC \
46
); \
47
} else if (sizeof(n) <= 8) { \
48
union { \
49
unsigned long long l; \
50
u32 w[2]; \
51
} __quot; \
52
__quot.l = n; \
53
asm("mov %0,mdr \n" /* MDR = 0 */ \
54
"divu %3,%1 \n" \
55
/* __quot.MSL = __div.MSL / base, */ \
56
/* MDR = MDR:__div.MSL % base */ \
57
"divu %3,%2 \n" \
58
/* __quot.LSL = MDR:__div.LSL / base, */ \
59
/* MDR = MDR:__div.LSL % base */ \
60
"mov mdr,%0 \n" \
61
: "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
62
: "r"(base), "0"(__rem), "1"(__quot.w[1]), \
63
"2"(__quot.w[0]) \
64
: CLOBBER_MDR_CC \
65
); \
66
n = __quot.l; \
67
} else { \
68
____unhandled_size_in_do_div___(); \
69
} \
70
__rem; \
71
})
72
73
/*
74
* do an unsigned 32-bit multiply and divide with intermediate 64-bit product
75
* so as not to lose accuracy
76
* - we use the MDR register to hold the MSW of the product
77
*/
78
static inline __attribute__((const))
79
unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
80
{
81
unsigned result;
82
83
asm("mulu %2,%0 \n" /* MDR:val = val*mult */
84
"divu %3,%0 \n" /* val = MDR:val/div;
85
* MDR = MDR:val%div */
86
: "=r"(result)
87
: "0"(val), "ir"(mult), "r"(div)
88
: CLOBBER_MDR_CC
89
);
90
91
return result;
92
}
93
94
/*
95
* do a signed 32-bit multiply and divide with intermediate 64-bit product so
96
* as not to lose accuracy
97
* - we use the MDR register to hold the MSW of the product
98
*/
99
static inline __attribute__((const))
100
signed __muldiv64s(signed val, signed mult, signed div)
101
{
102
signed result;
103
104
asm("mul %2,%0 \n" /* MDR:val = val*mult */
105
"div %3,%0 \n" /* val = MDR:val/div;
106
* MDR = MDR:val%div */
107
: "=r"(result)
108
: "0"(val), "ir"(mult), "r"(div)
109
: CLOBBER_MDR_CC
110
);
111
112
return result;
113
}
114
115
#endif /* _ASM_DIV64 */
116
117