Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/kfence/kfence.h
52030 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Kernel Electric-Fence (KFENCE). For more info please see
4
* Documentation/dev-tools/kfence.rst.
5
*
6
* Copyright (C) 2020, Google LLC.
7
*/
8
9
#ifndef MM_KFENCE_KFENCE_H
10
#define MM_KFENCE_KFENCE_H
11
12
#include <linux/mm.h>
13
#include <linux/slab.h>
14
#include <linux/spinlock.h>
15
#include <linux/types.h>
16
17
#include "../slab.h" /* for struct kmem_cache */
18
19
/*
20
* Get the canary byte pattern for @addr. Use a pattern that varies based on the
21
* lower 3 bits of the address, to detect memory corruptions with higher
22
* probability, where similar constants are used.
23
*/
24
#define KFENCE_CANARY_PATTERN_U8(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
25
26
/*
27
* Define a continuous 8-byte canary starting from a multiple of 8. The canary
28
* of each byte is only related to the lowest three bits of its address, so the
29
* canary of every 8 bytes is the same. 64-bit memory can be filled and checked
30
* at a time instead of byte by byte to improve performance.
31
*/
32
#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
33
34
/* Maximum stack depth for reports. */
35
#define KFENCE_STACK_DEPTH 64
36
37
extern raw_spinlock_t kfence_freelist_lock;
38
39
/* KFENCE object states. */
40
enum kfence_object_state {
41
KFENCE_OBJECT_UNUSED, /* Object is unused. */
42
KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */
43
KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */
44
KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */
45
};
46
47
/* Alloc/free tracking information. */
48
struct kfence_track {
49
pid_t pid;
50
int cpu;
51
u64 ts_nsec;
52
int num_stack_entries;
53
unsigned long stack_entries[KFENCE_STACK_DEPTH];
54
};
55
56
/* KFENCE metadata per guarded allocation. */
57
struct kfence_metadata {
58
struct list_head list __guarded_by(&kfence_freelist_lock); /* Freelist node. */
59
struct rcu_head rcu_head; /* For delayed freeing. */
60
61
/*
62
* Lock protecting below data; to ensure consistency of the below data,
63
* since the following may execute concurrently: __kfence_alloc(),
64
* __kfence_free(), kfence_handle_page_fault(). However, note that we
65
* cannot grab the same metadata off the freelist twice, and multiple
66
* __kfence_alloc() cannot run concurrently on the same metadata.
67
*/
68
raw_spinlock_t lock;
69
70
/* The current state of the object; see above. */
71
enum kfence_object_state state;
72
73
/*
74
* Allocated object address; cannot be calculated from size, because of
75
* alignment requirements.
76
*
77
* Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant.
78
*/
79
unsigned long addr;
80
81
/*
82
* The size of the original allocation.
83
*/
84
size_t size;
85
86
/*
87
* The kmem_cache cache of the last allocation; NULL if never allocated
88
* or the cache has already been destroyed.
89
*/
90
struct kmem_cache *cache;
91
92
/*
93
* In case of an invalid access, the page that was unprotected; we
94
* optimistically only store one address.
95
*/
96
unsigned long unprotected_page __guarded_by(&lock);
97
98
/* Allocation and free stack information. */
99
struct kfence_track alloc_track __guarded_by(&lock);
100
struct kfence_track free_track __guarded_by(&lock);
101
/* For updating alloc_covered on frees. */
102
u32 alloc_stack_hash __guarded_by(&lock);
103
#ifdef CONFIG_MEMCG
104
struct slabobj_ext obj_exts;
105
#endif
106
};
107
108
#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
109
CONFIG_KFENCE_NUM_OBJECTS)
110
111
extern struct kfence_metadata *kfence_metadata;
112
113
static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
114
{
115
long index;
116
117
/* The checks do not affect performance; only called from slow-paths. */
118
119
if (!is_kfence_address((void *)addr))
120
return NULL;
121
122
/*
123
* May be an invalid index if called with an address at the edge of
124
* __kfence_pool, in which case we would report an "invalid access"
125
* error.
126
*/
127
index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
128
if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
129
return NULL;
130
131
return &kfence_metadata[index];
132
}
133
134
/* KFENCE error types for report generation. */
135
enum kfence_error_type {
136
KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
137
KFENCE_ERROR_UAF, /* Detected a use-after-free access. */
138
KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */
139
KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */
140
KFENCE_ERROR_INVALID_FREE, /* Invalid free. */
141
};
142
143
void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
144
const struct kfence_metadata *meta, enum kfence_error_type type);
145
146
void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) __must_hold(&meta->lock);
147
148
#endif /* MM_KFENCE_KFENCE_H */
149
150