Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_log_writer.h
35263 views
1
//===-- xray_fdr_log_writer.h ---------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of XRay, a function call tracing system.
10
//
11
//===----------------------------------------------------------------------===//
12
#ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
13
#define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
14
15
#include "xray_buffer_queue.h"
16
#include "xray_fdr_log_records.h"
17
#include <functional>
18
#include <tuple>
19
#include <type_traits>
20
#include <utility>
21
22
namespace __xray {
23
24
template <size_t Index> struct SerializerImpl {
25
template <class Tuple,
26
typename std::enable_if<
27
Index<std::tuple_size<
28
typename std::remove_reference<Tuple>::type>::value,
29
int>::type = 0> static void serializeTo(char *Buffer,
30
Tuple &&T) {
31
auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
32
constexpr auto Size = sizeof(std::get<Index>(T));
33
internal_memcpy(Buffer, P, Size);
34
SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
35
std::forward<Tuple>(T));
36
}
37
38
template <class Tuple,
39
typename std::enable_if<
40
Index >= std::tuple_size<typename std::remove_reference<
41
Tuple>::type>::value,
42
int>::type = 0>
43
static void serializeTo(char *, Tuple &&) {}
44
};
45
46
using Serializer = SerializerImpl<0>;
47
48
template <class Tuple, size_t Index> struct AggregateSizesImpl {
49
static constexpr size_t value =
50
sizeof(typename std::tuple_element<Index, Tuple>::type) +
51
AggregateSizesImpl<Tuple, Index - 1>::value;
52
};
53
54
template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
55
static constexpr size_t value =
56
sizeof(typename std::tuple_element<0, Tuple>::type);
57
};
58
59
template <class Tuple> struct AggregateSizes {
60
static constexpr size_t value =
61
AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
62
};
63
64
template <MetadataRecord::RecordKinds Kind, class... DataTypes>
65
MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
66
static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
67
sizeof(MetadataRecord) - 1,
68
"Metadata payload longer than metadata buffer!");
69
MetadataRecord R;
70
R.Type = 1;
71
R.RecordKind = static_cast<uint8_t>(Kind);
72
Serializer::serializeTo(R.Data,
73
std::make_tuple(std::forward<DataTypes>(Ds)...));
74
return R;
75
}
76
77
class FDRLogWriter {
78
BufferQueue::Buffer &Buffer;
79
char *NextRecord = nullptr;
80
81
template <class T> void writeRecord(const T &R) {
82
internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
83
NextRecord += sizeof(T);
84
// We need this atomic fence here to ensure that other threads attempting to
85
// read the bytes in the buffer will see the writes committed before the
86
// extents are updated.
87
atomic_thread_fence(memory_order_release);
88
atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
89
}
90
91
public:
92
explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
93
: Buffer(B), NextRecord(P) {
94
DCHECK_NE(Buffer.Data, nullptr);
95
DCHECK_NE(NextRecord, nullptr);
96
}
97
98
explicit FDRLogWriter(BufferQueue::Buffer &B)
99
: FDRLogWriter(B, static_cast<char *>(B.Data)) {}
100
101
template <MetadataRecord::RecordKinds Kind, class... Data>
102
bool writeMetadata(Data &&... Ds) {
103
// TODO: Check boundary conditions:
104
// 1) Buffer is full, and cannot handle one metadata record.
105
// 2) Buffer queue is finalising.
106
writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
107
return true;
108
}
109
110
template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
111
constexpr auto Size = sizeof(MetadataRecord) * N;
112
internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
113
NextRecord += Size;
114
// We need this atomic fence here to ensure that other threads attempting to
115
// read the bytes in the buffer will see the writes committed before the
116
// extents are updated.
117
atomic_thread_fence(memory_order_release);
118
atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
119
return Size;
120
}
121
122
enum class FunctionRecordKind : uint8_t {
123
Enter = 0x00,
124
Exit = 0x01,
125
TailExit = 0x02,
126
EnterArg = 0x03,
127
};
128
129
bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
130
FunctionRecord R;
131
R.Type = 0;
132
R.RecordKind = uint8_t(Kind);
133
R.FuncId = FuncId;
134
R.TSCDelta = Delta;
135
writeRecord(R);
136
return true;
137
}
138
139
bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
140
int32_t Delta, uint64_t Arg) {
141
// We need to write the function with arg into the buffer, and then
142
// atomically update the buffer extents. This ensures that any reads
143
// synchronised on the buffer extents record will always see the writes
144
// that happen before the atomic update.
145
FunctionRecord R;
146
R.Type = 0;
147
R.RecordKind = uint8_t(Kind);
148
R.FuncId = FuncId;
149
R.TSCDelta = Delta;
150
MetadataRecord A =
151
createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
152
NextRecord = reinterpret_cast<char *>(internal_memcpy(
153
NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
154
sizeof(R);
155
NextRecord = reinterpret_cast<char *>(internal_memcpy(
156
NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
157
sizeof(A);
158
// We need this atomic fence here to ensure that other threads attempting to
159
// read the bytes in the buffer will see the writes committed before the
160
// extents are updated.
161
atomic_thread_fence(memory_order_release);
162
atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
163
memory_order_acq_rel);
164
return true;
165
}
166
167
bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
168
// We write the metadata record and the custom event data into the buffer
169
// first, before we atomically update the extents for the buffer. This
170
// allows us to ensure that any threads reading the extents of the buffer
171
// will only ever see the full metadata and custom event payload accounted
172
// (no partial writes accounted).
173
MetadataRecord R =
174
createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
175
EventSize, Delta);
176
NextRecord = reinterpret_cast<char *>(internal_memcpy(
177
NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
178
sizeof(R);
179
NextRecord = reinterpret_cast<char *>(
180
internal_memcpy(NextRecord, Event, EventSize)) +
181
EventSize;
182
183
// We need this atomic fence here to ensure that other threads attempting to
184
// read the bytes in the buffer will see the writes committed before the
185
// extents are updated.
186
atomic_thread_fence(memory_order_release);
187
atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
188
memory_order_acq_rel);
189
return true;
190
}
191
192
bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
193
int32_t EventSize) {
194
// We do something similar when writing out typed events, see
195
// writeCustomEvent(...) above for details.
196
MetadataRecord R =
197
createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
198
EventSize, Delta, EventType);
199
NextRecord = reinterpret_cast<char *>(internal_memcpy(
200
NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
201
sizeof(R);
202
NextRecord = reinterpret_cast<char *>(
203
internal_memcpy(NextRecord, Event, EventSize)) +
204
EventSize;
205
206
// We need this atomic fence here to ensure that other threads attempting to
207
// read the bytes in the buffer will see the writes committed before the
208
// extents are updated.
209
atomic_thread_fence(memory_order_release);
210
atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
211
return true;
212
}
213
214
char *getNextRecord() const { return NextRecord; }
215
216
void resetRecord() {
217
NextRecord = reinterpret_cast<char *>(Buffer.Data);
218
atomic_store(Buffer.Extents, 0, memory_order_release);
219
}
220
221
void undoWrites(size_t B) {
222
DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
223
NextRecord -= B;
224
atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
225
}
226
227
}; // namespace __xray
228
229
} // namespace __xray
230
231
#endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
232
233