Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/mem.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3
#include <linux/debugfs.h>
4
#include <linux/device.h>
5
#include <linux/module.h>
6
#include <linux/pci.h>
7
8
#include "cxlmem.h"
9
#include "cxlpci.h"
10
11
/**
12
* DOC: cxl mem
13
*
14
* CXL memory endpoint devices and switches are CXL capable devices that are
15
* participating in CXL.mem protocol. Their functionality builds on top of the
16
* CXL.io protocol that allows enumerating and configuring components via
17
* standard PCI mechanisms.
18
*
19
* The cxl_mem driver owns kicking off the enumeration of this CXL.mem
20
* capability. With the detection of a CXL capable endpoint, the driver will
21
* walk up to find the platform specific port it is connected to, and determine
22
* if there are intervening switches in the path. If there are switches, a
23
* secondary action is to enumerate those (implemented in cxl_core). Finally the
24
* cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use
25
* in higher level operations.
26
*/
27
28
static void enable_suspend(void *data)
29
{
30
cxl_mem_active_dec();
31
}
32
33
static void remove_debugfs(void *dentry)
34
{
35
debugfs_remove_recursive(dentry);
36
}
37
38
static int cxl_mem_dpa_show(struct seq_file *file, void *data)
39
{
40
struct device *dev = file->private;
41
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
42
43
cxl_dpa_debug(file, cxlmd->cxlds);
44
45
return 0;
46
}
47
48
static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
49
struct cxl_dport *parent_dport)
50
{
51
struct cxl_port *parent_port = parent_dport->port;
52
struct cxl_port *endpoint, *iter, *down;
53
int rc;
54
55
/*
56
* Now that the path to the root is established record all the
57
* intervening ports in the chain.
58
*/
59
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
60
down = iter, iter = to_cxl_port(iter->dev.parent)) {
61
struct cxl_ep *ep;
62
63
ep = cxl_ep_load(iter, cxlmd);
64
ep->next = down;
65
}
66
67
/* Note: endpoint port component registers are derived from @cxlds */
68
endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
69
parent_dport);
70
if (IS_ERR(endpoint))
71
return PTR_ERR(endpoint);
72
73
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
74
if (rc)
75
return rc;
76
77
if (!endpoint->dev.driver) {
78
dev_err(&cxlmd->dev, "%s failed probe\n",
79
dev_name(&endpoint->dev));
80
return -ENXIO;
81
}
82
83
return 0;
84
}
85
86
static int cxl_debugfs_poison_inject(void *data, u64 dpa)
87
{
88
struct cxl_memdev *cxlmd = data;
89
90
return cxl_inject_poison(cxlmd, dpa);
91
}
92
93
DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
94
cxl_debugfs_poison_inject, "%llx\n");
95
96
static int cxl_debugfs_poison_clear(void *data, u64 dpa)
97
{
98
struct cxl_memdev *cxlmd = data;
99
100
return cxl_clear_poison(cxlmd, dpa);
101
}
102
103
DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
104
cxl_debugfs_poison_clear, "%llx\n");
105
106
static int cxl_mem_probe(struct device *dev)
107
{
108
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
109
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
110
struct cxl_dev_state *cxlds = cxlmd->cxlds;
111
struct device *endpoint_parent;
112
struct cxl_dport *dport;
113
struct dentry *dentry;
114
int rc;
115
116
if (!cxlds->media_ready)
117
return -EBUSY;
118
119
/*
120
* Someone is trying to reattach this device after it lost its port
121
* connection (an endpoint port previously registered by this memdev was
122
* disabled). This racy check is ok because if the port is still gone,
123
* no harm done, and if the port hierarchy comes back it will re-trigger
124
* this probe. Port rescan and memdev detach work share the same
125
* single-threaded workqueue.
126
*/
127
if (work_pending(&cxlmd->detach_work))
128
return -EBUSY;
129
130
dentry = cxl_debugfs_create_dir(dev_name(dev));
131
debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
132
133
if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds))
134
debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
135
&cxl_poison_inject_fops);
136
if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds))
137
debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
138
&cxl_poison_clear_fops);
139
140
rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
141
if (rc)
142
return rc;
143
144
rc = devm_cxl_enumerate_ports(cxlmd);
145
if (rc)
146
return rc;
147
148
struct cxl_port *parent_port __free(put_cxl_port) =
149
cxl_mem_find_port(cxlmd, &dport);
150
if (!parent_port) {
151
dev_err(dev, "CXL port topology not found\n");
152
return -ENXIO;
153
}
154
155
if (cxl_pmem_size(cxlds) && IS_ENABLED(CONFIG_CXL_PMEM)) {
156
rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
157
if (rc) {
158
if (rc == -ENODEV)
159
dev_info(dev, "PMEM disabled by platform\n");
160
return rc;
161
}
162
}
163
164
if (dport->rch)
165
endpoint_parent = parent_port->uport_dev;
166
else
167
endpoint_parent = &parent_port->dev;
168
169
cxl_dport_init_ras_reporting(dport, dev);
170
171
scoped_guard(device, endpoint_parent) {
172
if (!endpoint_parent->driver) {
173
dev_err(dev, "CXL port topology %s not enabled\n",
174
dev_name(endpoint_parent));
175
return -ENXIO;
176
}
177
178
rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
179
if (rc)
180
return rc;
181
}
182
183
rc = devm_cxl_memdev_edac_register(cxlmd);
184
if (rc)
185
dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
186
187
/*
188
* The kernel may be operating out of CXL memory on this device,
189
* there is no spec defined way to determine whether this device
190
* preserves contents over suspend, and there is no simple way
191
* to arrange for the suspend image to avoid CXL memory which
192
* would setup a circular dependency between PCI resume and save
193
* state restoration.
194
*
195
* TODO: support suspend when all the regions this device is
196
* hosting are locked and covered by the system address map,
197
* i.e. platform firmware owns restoring the HDM configuration
198
* that it locked.
199
*/
200
cxl_mem_active_inc();
201
return devm_add_action_or_reset(dev, enable_suspend, NULL);
202
}
203
204
static ssize_t trigger_poison_list_store(struct device *dev,
205
struct device_attribute *attr,
206
const char *buf, size_t len)
207
{
208
bool trigger;
209
int rc;
210
211
if (kstrtobool(buf, &trigger) || !trigger)
212
return -EINVAL;
213
214
rc = cxl_trigger_poison_list(to_cxl_memdev(dev));
215
216
return rc ? rc : len;
217
}
218
static DEVICE_ATTR_WO(trigger_poison_list);
219
220
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
221
{
222
struct device *dev = kobj_to_dev(kobj);
223
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
224
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
225
226
if (a == &dev_attr_trigger_poison_list.attr)
227
if (!test_bit(CXL_POISON_ENABLED_LIST,
228
mds->poison.enabled_cmds))
229
return 0;
230
231
return a->mode;
232
}
233
234
static struct attribute *cxl_mem_attrs[] = {
235
&dev_attr_trigger_poison_list.attr,
236
NULL
237
};
238
239
static struct attribute_group cxl_mem_group = {
240
.attrs = cxl_mem_attrs,
241
.is_visible = cxl_mem_visible,
242
};
243
244
__ATTRIBUTE_GROUPS(cxl_mem);
245
246
static struct cxl_driver cxl_mem_driver = {
247
.name = "cxl_mem",
248
.probe = cxl_mem_probe,
249
.id = CXL_DEVICE_MEMORY_EXPANDER,
250
.drv = {
251
.dev_groups = cxl_mem_groups,
252
},
253
};
254
255
module_cxl_driver(cxl_mem_driver);
256
257
MODULE_DESCRIPTION("CXL: Memory Expansion");
258
MODULE_LICENSE("GPL v2");
259
MODULE_IMPORT_NS("CXL");
260
MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
261
/*
262
* create_endpoint() wants to validate port driver attach immediately after
263
* endpoint registration.
264
*/
265
MODULE_SOFTDEP("pre: cxl_port");
266
267