Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/lib/libc/gmon/mcount.c
39499 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 1983, 1992, 1993
5
* The Regents of the University of California. All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* 3. Neither the name of the University nor the names of its contributors
16
* may be used to endorse or promote products derived from this software
17
* without specific prior written permission.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
#include <sys/param.h>
33
#include <sys/gmon.h>
34
#ifdef _KERNEL
35
#include <sys/systm.h>
36
#include <vm/vm.h>
37
#include <vm/vm_param.h>
38
#include <vm/pmap.h>
39
void bintr(void);
40
void btrap(void);
41
void eintr(void);
42
void user(void);
43
#endif
44
#include <machine/atomic.h>
45
46
/*
47
* mcount is called on entry to each function compiled with the profiling
48
* switch set. _mcount(), which is declared in a machine-dependent way
49
* with _MCOUNT_DECL, does the actual work and is either inlined into a
50
* C routine or called by an assembly stub. In any case, this magic is
51
* taken care of by the MCOUNT definition in <machine/profile.h>.
52
*
53
* _mcount updates data structures that represent traversals of the
54
* program's call graph edges. frompc and selfpc are the return
55
* address and function address that represents the given call graph edge.
56
*
57
* Note: the original BSD code used the same variable (frompcindex) for
58
* both frompcindex and frompc. Any reasonable, modern compiler will
59
* perform this optimization.
60
*/
61
/* _mcount; may be static, inline, etc */
62
_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
63
{
64
#ifdef GUPROF
65
u_int delta;
66
#endif
67
fptrdiff_t frompci;
68
u_short *frompcindex;
69
struct tostruct *top, *prevtop;
70
struct gmonparam *p;
71
long toindex;
72
#ifdef _KERNEL
73
MCOUNT_DECL(s)
74
#endif
75
76
p = &_gmonparam;
77
#ifndef GUPROF /* XXX */
78
/*
79
* check that we are profiling
80
* and that we aren't recursively invoked.
81
*/
82
if (p->state != GMON_PROF_ON)
83
return;
84
#endif
85
#ifdef _KERNEL
86
MCOUNT_ENTER(s);
87
#else
88
if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
89
return;
90
#endif
91
frompci = frompc - p->lowpc;
92
93
#ifdef _KERNEL
94
/*
95
* When we are called from an exception handler, frompci may be
96
* for a user address. Convert such frompci's to the index of
97
* user() to merge all user counts.
98
*/
99
if (frompci >= p->textsize) {
100
if (frompci + p->lowpc
101
>= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
102
goto done;
103
frompci = (uintfptr_t)user - p->lowpc;
104
if (frompci >= p->textsize)
105
goto done;
106
}
107
#endif
108
109
#ifdef GUPROF
110
if (p->state != GMON_PROF_HIRES)
111
goto skip_guprof_stuff;
112
/*
113
* Look at the clock and add the count of clock cycles since the
114
* clock was last looked at to a counter for frompc. This
115
* solidifies the count for the function containing frompc and
116
* effectively starts another clock for the current function.
117
* The count for the new clock will be solidified when another
118
* function call is made or the function returns.
119
*
120
* We use the usual sampling counters since they can be located
121
* efficiently. 4-byte counters are usually necessary.
122
*
123
* There are many complications for subtracting the profiling
124
* overheads from the counts for normal functions and adding
125
* them to the counts for mcount(), mexitcount() and cputime().
126
* We attempt to handle fractional cycles, but the overheads
127
* are usually underestimated because they are calibrated for
128
* a simpler than usual setup.
129
*/
130
delta = cputime() - p->mcount_overhead;
131
p->cputime_overhead_resid += p->cputime_overhead_frac;
132
p->mcount_overhead_resid += p->mcount_overhead_frac;
133
if ((int)delta < 0)
134
*p->mcount_count += delta + p->mcount_overhead
135
- p->cputime_overhead;
136
else if (delta != 0) {
137
if (p->cputime_overhead_resid >= CALIB_SCALE) {
138
p->cputime_overhead_resid -= CALIB_SCALE;
139
++*p->cputime_count;
140
--delta;
141
}
142
if (delta != 0) {
143
if (p->mcount_overhead_resid >= CALIB_SCALE) {
144
p->mcount_overhead_resid -= CALIB_SCALE;
145
++*p->mcount_count;
146
--delta;
147
}
148
KCOUNT(p, frompci) += delta;
149
}
150
*p->mcount_count += p->mcount_overhead_sub;
151
}
152
*p->cputime_count += p->cputime_overhead;
153
skip_guprof_stuff:
154
#endif /* GUPROF */
155
156
#ifdef _KERNEL
157
/*
158
* When we are called from an exception handler, frompc is faked
159
* to be for where the exception occurred. We've just solidified
160
* the count for there. Now convert frompci to the index of btrap()
161
* for trap handlers and bintr() for interrupt handlers to make
162
* exceptions appear in the call graph as calls from btrap() and
163
* bintr() instead of calls from all over.
164
*/
165
if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
166
&& (uintfptr_t)selfpc < (uintfptr_t)eintr) {
167
if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
168
frompci = (uintfptr_t)bintr - p->lowpc;
169
else
170
frompci = (uintfptr_t)btrap - p->lowpc;
171
}
172
#endif
173
174
/*
175
* check that frompc is a reasonable pc value.
176
* for example: signal catchers get called from the stack,
177
* not from text space. too bad.
178
*/
179
if (frompci >= p->textsize)
180
goto done;
181
182
frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
183
toindex = *frompcindex;
184
if (toindex == 0) {
185
/*
186
* first time traversing this arc
187
*/
188
toindex = ++p->tos[0].link;
189
if (toindex >= p->tolimit)
190
/* halt further profiling */
191
goto overflow;
192
193
*frompcindex = toindex;
194
top = &p->tos[toindex];
195
top->selfpc = selfpc;
196
top->count = 1;
197
top->link = 0;
198
goto done;
199
}
200
top = &p->tos[toindex];
201
if (top->selfpc == selfpc) {
202
/*
203
* arc at front of chain; usual case.
204
*/
205
top->count++;
206
goto done;
207
}
208
/*
209
* have to go looking down chain for it.
210
* top points to what we are looking at,
211
* prevtop points to previous top.
212
* we know it is not at the head of the chain.
213
*/
214
for (; /* goto done */; ) {
215
if (top->link == 0) {
216
/*
217
* top is end of the chain and none of the chain
218
* had top->selfpc == selfpc.
219
* so we allocate a new tostruct
220
* and link it to the head of the chain.
221
*/
222
toindex = ++p->tos[0].link;
223
if (toindex >= p->tolimit)
224
goto overflow;
225
226
top = &p->tos[toindex];
227
top->selfpc = selfpc;
228
top->count = 1;
229
top->link = *frompcindex;
230
*frompcindex = toindex;
231
goto done;
232
}
233
/*
234
* otherwise, check the next arc on the chain.
235
*/
236
prevtop = top;
237
top = &p->tos[top->link];
238
if (top->selfpc == selfpc) {
239
/*
240
* there it is.
241
* increment its count
242
* move it to the head of the chain.
243
*/
244
top->count++;
245
toindex = prevtop->link;
246
prevtop->link = top->link;
247
top->link = *frompcindex;
248
*frompcindex = toindex;
249
goto done;
250
}
251
252
}
253
done:
254
#ifdef _KERNEL
255
MCOUNT_EXIT(s);
256
#else
257
atomic_store_rel_int(&p->state, GMON_PROF_ON);
258
#endif
259
return;
260
overflow:
261
atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
262
#ifdef _KERNEL
263
MCOUNT_EXIT(s);
264
#endif
265
return;
266
}
267
268
/*
269
* Actual definition of mcount function. Defined in <machine/profile.h>,
270
* which is included by <sys/gmon.h>.
271
*/
272
MCOUNT
273
274
#ifdef GUPROF
275
void
276
mexitcount(uintfptr_t selfpc)
277
{
278
struct gmonparam *p;
279
uintfptr_t selfpcdiff;
280
281
p = &_gmonparam;
282
selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
283
if (selfpcdiff < p->textsize) {
284
u_int delta;
285
286
/*
287
* Solidify the count for the current function.
288
*/
289
delta = cputime() - p->mexitcount_overhead;
290
p->cputime_overhead_resid += p->cputime_overhead_frac;
291
p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
292
if ((int)delta < 0)
293
*p->mexitcount_count += delta + p->mexitcount_overhead
294
- p->cputime_overhead;
295
else if (delta != 0) {
296
if (p->cputime_overhead_resid >= CALIB_SCALE) {
297
p->cputime_overhead_resid -= CALIB_SCALE;
298
++*p->cputime_count;
299
--delta;
300
}
301
if (delta != 0) {
302
if (p->mexitcount_overhead_resid
303
>= CALIB_SCALE) {
304
p->mexitcount_overhead_resid
305
-= CALIB_SCALE;
306
++*p->mexitcount_count;
307
--delta;
308
}
309
KCOUNT(p, selfpcdiff) += delta;
310
}
311
*p->mexitcount_count += p->mexitcount_overhead_sub;
312
}
313
*p->cputime_count += p->cputime_overhead;
314
}
315
}
316
#endif /* GUPROF */
317
318