Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/core/mbox.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3
#include <linux/security.h>
4
#include <linux/debugfs.h>
5
#include <linux/ktime.h>
6
#include <linux/mutex.h>
7
#include <linux/unaligned.h>
8
#include <cxlpci.h>
9
#include <cxlmem.h>
10
#include <cxl.h>
11
12
#include "core.h"
13
#include "trace.h"
14
#include "mce.h"
15
16
static bool cxl_raw_allow_all;
17
18
/**
19
* DOC: cxl mbox
20
*
21
* Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
22
* implementation is used by the cxl_pci driver to initialize the device
23
* and implement the cxl_mem.h IOCTL UAPI. It also implements the
24
* backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
25
*/
26
27
#define cxl_for_each_cmd(cmd) \
28
for ((cmd) = &cxl_mem_commands[0]; \
29
((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
30
31
#define CXL_CMD(_id, sin, sout, _flags) \
32
[CXL_MEM_COMMAND_ID_##_id] = { \
33
.info = { \
34
.id = CXL_MEM_COMMAND_ID_##_id, \
35
.size_in = sin, \
36
.size_out = sout, \
37
}, \
38
.opcode = CXL_MBOX_OP_##_id, \
39
.flags = _flags, \
40
}
41
42
#define CXL_VARIABLE_PAYLOAD ~0U
43
/*
44
* This table defines the supported mailbox commands for the driver. This table
45
* is made up of a UAPI structure. Non-negative values as parameters in the
46
* table will be validated against the user's input. For example, if size_in is
47
* 0, and the user passed in 1, it is an error.
48
*/
49
static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
50
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
51
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
52
CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
53
#endif
54
CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
55
CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
56
CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
57
CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
58
CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
59
CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
60
CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
61
CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
62
CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
63
CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
64
CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
65
CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
66
CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
67
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
68
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
69
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
70
CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
71
};
72
73
/*
74
* Commands that RAW doesn't permit. The rationale for each:
75
*
76
* CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
77
* coordination of transaction timeout values at the root bridge level.
78
*
79
* CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
80
* and needs to be coordinated with HDM updates.
81
*
82
* CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
83
* driver and any writes from userspace invalidates those contents.
84
*
85
* CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
86
* to the device after it is marked clean, userspace can not make that
87
* assertion.
88
*
89
* CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
90
* is kept up to date with patrol notifications and error management.
91
*
92
* CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
93
* driver orchestration for safety.
94
*/
95
static u16 cxl_disabled_raw_commands[] = {
96
CXL_MBOX_OP_ACTIVATE_FW,
97
CXL_MBOX_OP_SET_PARTITION_INFO,
98
CXL_MBOX_OP_SET_LSA,
99
CXL_MBOX_OP_SET_SHUTDOWN_STATE,
100
CXL_MBOX_OP_SCAN_MEDIA,
101
CXL_MBOX_OP_GET_SCAN_MEDIA,
102
CXL_MBOX_OP_GET_POISON,
103
CXL_MBOX_OP_INJECT_POISON,
104
CXL_MBOX_OP_CLEAR_POISON,
105
};
106
107
/*
108
* Command sets that RAW doesn't permit. All opcodes in this set are
109
* disabled because they pass plain text security payloads over the
110
* user/kernel boundary. This functionality is intended to be wrapped
111
* behind the keys ABI which allows for encrypted payloads in the UAPI
112
*/
113
static u8 security_command_sets[] = {
114
0x44, /* Sanitize */
115
0x45, /* Persistent Memory Data-at-rest Security */
116
0x46, /* Security Passthrough */
117
};
118
119
static bool cxl_is_security_command(u16 opcode)
120
{
121
int i;
122
123
for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
124
if (security_command_sets[i] == (opcode >> 8))
125
return true;
126
return false;
127
}
128
129
static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
130
u16 opcode)
131
{
132
switch (opcode) {
133
case CXL_MBOX_OP_SANITIZE:
134
set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
135
break;
136
case CXL_MBOX_OP_SECURE_ERASE:
137
set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
138
security->enabled_cmds);
139
break;
140
case CXL_MBOX_OP_GET_SECURITY_STATE:
141
set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
142
security->enabled_cmds);
143
break;
144
case CXL_MBOX_OP_SET_PASSPHRASE:
145
set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
146
security->enabled_cmds);
147
break;
148
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
149
set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
150
security->enabled_cmds);
151
break;
152
case CXL_MBOX_OP_UNLOCK:
153
set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
154
break;
155
case CXL_MBOX_OP_FREEZE_SECURITY:
156
set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
157
security->enabled_cmds);
158
break;
159
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
160
set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
161
security->enabled_cmds);
162
break;
163
default:
164
break;
165
}
166
}
167
168
static bool cxl_is_poison_command(u16 opcode)
169
{
170
#define CXL_MBOX_OP_POISON_CMDS 0x43
171
172
if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
173
return true;
174
175
return false;
176
}
177
178
static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
179
u16 opcode)
180
{
181
switch (opcode) {
182
case CXL_MBOX_OP_GET_POISON:
183
set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
184
break;
185
case CXL_MBOX_OP_INJECT_POISON:
186
set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
187
break;
188
case CXL_MBOX_OP_CLEAR_POISON:
189
set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
190
break;
191
case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
192
set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
193
break;
194
case CXL_MBOX_OP_SCAN_MEDIA:
195
set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
196
break;
197
case CXL_MBOX_OP_GET_SCAN_MEDIA:
198
set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
199
break;
200
default:
201
break;
202
}
203
}
204
205
static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
206
{
207
struct cxl_mem_command *c;
208
209
cxl_for_each_cmd(c)
210
if (c->opcode == opcode)
211
return c;
212
213
return NULL;
214
}
215
216
static const char *cxl_mem_opcode_to_name(u16 opcode)
217
{
218
struct cxl_mem_command *c;
219
220
c = cxl_mem_find_command(opcode);
221
if (!c)
222
return NULL;
223
224
return cxl_command_names[c->info.id].name;
225
}
226
227
/**
228
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
229
* @cxl_mbox: CXL mailbox context
230
* @mbox_cmd: initialized command to execute
231
*
232
* Context: Any context.
233
* Return:
234
* * %>=0 - Number of bytes returned in @out.
235
* * %-E2BIG - Payload is too large for hardware.
236
* * %-EBUSY - Couldn't acquire exclusive mailbox access.
237
* * %-EFAULT - Hardware error occurred.
238
* * %-ENXIO - Command completed, but device reported an error.
239
* * %-EIO - Unexpected output size.
240
*
241
* Mailbox commands may execute successfully yet the device itself reported an
242
* error. While this distinction can be useful for commands from userspace, the
243
* kernel will only be able to use results when both are successful.
244
*/
245
int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
246
struct cxl_mbox_cmd *mbox_cmd)
247
{
248
size_t out_size, min_out;
249
int rc;
250
251
if (mbox_cmd->size_in > cxl_mbox->payload_size ||
252
mbox_cmd->size_out > cxl_mbox->payload_size)
253
return -E2BIG;
254
255
out_size = mbox_cmd->size_out;
256
min_out = mbox_cmd->min_out;
257
rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
258
/*
259
* EIO is reserved for a payload size mismatch and mbox_send()
260
* may not return this error.
261
*/
262
if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
263
return -ENXIO;
264
if (rc)
265
return rc;
266
267
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
268
mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
269
return cxl_mbox_cmd_rc2errno(mbox_cmd);
270
271
if (!out_size)
272
return 0;
273
274
/*
275
* Variable sized output needs to at least satisfy the caller's
276
* minimum if not the fully requested size.
277
*/
278
if (min_out == 0)
279
min_out = out_size;
280
281
if (mbox_cmd->size_out < min_out)
282
return -EIO;
283
return 0;
284
}
285
EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
286
287
static bool cxl_mem_raw_command_allowed(u16 opcode)
288
{
289
int i;
290
291
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
292
return false;
293
294
if (security_locked_down(LOCKDOWN_PCI_ACCESS))
295
return false;
296
297
if (cxl_raw_allow_all)
298
return true;
299
300
if (cxl_is_security_command(opcode))
301
return false;
302
303
for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
304
if (cxl_disabled_raw_commands[i] == opcode)
305
return false;
306
307
return true;
308
}
309
310
/**
311
* cxl_payload_from_user_allowed() - Check contents of in_payload.
312
* @opcode: The mailbox command opcode.
313
* @payload_in: Pointer to the input payload passed in from user space.
314
*
315
* Return:
316
* * true - payload_in passes check for @opcode.
317
* * false - payload_in contains invalid or unsupported values.
318
*
319
* The driver may inspect payload contents before sending a mailbox
320
* command from user space to the device. The intent is to reject
321
* commands with input payloads that are known to be unsafe. This
322
* check is not intended to replace the users careful selection of
323
* mailbox command parameters and makes no guarantee that the user
324
* command will succeed, nor that it is appropriate.
325
*
326
* The specific checks are determined by the opcode.
327
*/
328
static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
329
{
330
switch (opcode) {
331
case CXL_MBOX_OP_SET_PARTITION_INFO: {
332
struct cxl_mbox_set_partition_info *pi = payload_in;
333
334
if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
335
return false;
336
break;
337
}
338
case CXL_MBOX_OP_CLEAR_LOG: {
339
const uuid_t *uuid = (uuid_t *)payload_in;
340
341
/*
342
* Restrict the ‘Clear log’ action to only apply to
343
* Vendor debug logs.
344
*/
345
return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
346
}
347
default:
348
break;
349
}
350
return true;
351
}
352
353
static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
354
struct cxl_mailbox *cxl_mbox, u16 opcode,
355
size_t in_size, size_t out_size, u64 in_payload)
356
{
357
*mbox_cmd = (struct cxl_mbox_cmd) {
358
.opcode = opcode,
359
.size_in = in_size,
360
};
361
362
if (in_size) {
363
mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
364
in_size);
365
if (IS_ERR(mbox_cmd->payload_in))
366
return PTR_ERR(mbox_cmd->payload_in);
367
368
if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
369
dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
370
cxl_mem_opcode_to_name(opcode));
371
kvfree(mbox_cmd->payload_in);
372
return -EBUSY;
373
}
374
}
375
376
/* Prepare to handle a full payload for variable sized output */
377
if (out_size == CXL_VARIABLE_PAYLOAD)
378
mbox_cmd->size_out = cxl_mbox->payload_size;
379
else
380
mbox_cmd->size_out = out_size;
381
382
if (mbox_cmd->size_out) {
383
mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
384
if (!mbox_cmd->payload_out) {
385
kvfree(mbox_cmd->payload_in);
386
return -ENOMEM;
387
}
388
}
389
return 0;
390
}
391
392
static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
393
{
394
kvfree(mbox->payload_in);
395
kvfree(mbox->payload_out);
396
}
397
398
static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
399
const struct cxl_send_command *send_cmd,
400
struct cxl_mailbox *cxl_mbox)
401
{
402
if (send_cmd->raw.rsvd)
403
return -EINVAL;
404
405
/*
406
* Unlike supported commands, the output size of RAW commands
407
* gets passed along without further checking, so it must be
408
* validated here.
409
*/
410
if (send_cmd->out.size > cxl_mbox->payload_size)
411
return -EINVAL;
412
413
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
414
return -EPERM;
415
416
dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
417
418
*mem_cmd = (struct cxl_mem_command) {
419
.info = {
420
.id = CXL_MEM_COMMAND_ID_RAW,
421
.size_in = send_cmd->in.size,
422
.size_out = send_cmd->out.size,
423
},
424
.opcode = send_cmd->raw.opcode
425
};
426
427
return 0;
428
}
429
430
static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
431
const struct cxl_send_command *send_cmd,
432
struct cxl_mailbox *cxl_mbox)
433
{
434
struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
435
const struct cxl_command_info *info = &c->info;
436
437
if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
438
return -EINVAL;
439
440
if (send_cmd->rsvd)
441
return -EINVAL;
442
443
if (send_cmd->in.rsvd || send_cmd->out.rsvd)
444
return -EINVAL;
445
446
/* Check that the command is enabled for hardware */
447
if (!test_bit(info->id, cxl_mbox->enabled_cmds))
448
return -ENOTTY;
449
450
/* Check that the command is not claimed for exclusive kernel use */
451
if (test_bit(info->id, cxl_mbox->exclusive_cmds))
452
return -EBUSY;
453
454
/* Check the input buffer is the expected size */
455
if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
456
(info->size_in != send_cmd->in.size))
457
return -ENOMEM;
458
459
/* Check the output buffer is at least large enough */
460
if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
461
(send_cmd->out.size < info->size_out))
462
return -ENOMEM;
463
464
*mem_cmd = (struct cxl_mem_command) {
465
.info = {
466
.id = info->id,
467
.flags = info->flags,
468
.size_in = send_cmd->in.size,
469
.size_out = send_cmd->out.size,
470
},
471
.opcode = c->opcode
472
};
473
474
return 0;
475
}
476
477
/**
478
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
479
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
480
* @cxl_mbox: CXL mailbox context
481
* @send_cmd: &struct cxl_send_command copied in from userspace.
482
*
483
* Return:
484
* * %0 - @out_cmd is ready to send.
485
* * %-ENOTTY - Invalid command specified.
486
* * %-EINVAL - Reserved fields or invalid values were used.
487
* * %-ENOMEM - Input or output buffer wasn't sized properly.
488
* * %-EPERM - Attempted to use a protected command.
489
* * %-EBUSY - Kernel has claimed exclusive access to this opcode
490
*
491
* The result of this command is a fully validated command in @mbox_cmd that is
492
* safe to send to the hardware.
493
*/
494
static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
495
struct cxl_mailbox *cxl_mbox,
496
const struct cxl_send_command *send_cmd)
497
{
498
struct cxl_mem_command mem_cmd;
499
int rc;
500
501
if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
502
return -ENOTTY;
503
504
/*
505
* The user can never specify an input payload larger than what hardware
506
* supports, but output can be arbitrarily large (simply write out as
507
* much data as the hardware provides).
508
*/
509
if (send_cmd->in.size > cxl_mbox->payload_size)
510
return -EINVAL;
511
512
/* Sanitize and construct a cxl_mem_command */
513
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
514
rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox);
515
else
516
rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox);
517
518
if (rc)
519
return rc;
520
521
/* Sanitize and construct a cxl_mbox_cmd */
522
return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode,
523
mem_cmd.info.size_in, mem_cmd.info.size_out,
524
send_cmd->in.payload);
525
}
526
527
int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
528
struct cxl_mem_query_commands __user *q)
529
{
530
struct device *dev = cxl_mbox->host;
531
struct cxl_mem_command *cmd;
532
u32 n_commands;
533
int j = 0;
534
535
dev_dbg(dev, "Query IOCTL\n");
536
537
if (get_user(n_commands, &q->n_commands))
538
return -EFAULT;
539
540
/* returns the total number if 0 elements are requested. */
541
if (n_commands == 0)
542
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
543
544
/*
545
* otherwise, return min(n_commands, total commands) cxl_command_info
546
* structures.
547
*/
548
cxl_for_each_cmd(cmd) {
549
struct cxl_command_info info = cmd->info;
550
551
if (test_bit(info.id, cxl_mbox->enabled_cmds))
552
info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
553
if (test_bit(info.id, cxl_mbox->exclusive_cmds))
554
info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
555
556
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
557
return -EFAULT;
558
559
if (j == n_commands)
560
break;
561
}
562
563
return 0;
564
}
565
566
/**
567
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
568
* @cxl_mbox: The mailbox context for the operation.
569
* @mbox_cmd: The validated mailbox command.
570
* @out_payload: Pointer to userspace's output payload.
571
* @size_out: (Input) Max payload size to copy out.
572
* (Output) Payload size hardware generated.
573
* @retval: Hardware generated return code from the operation.
574
*
575
* Return:
576
* * %0 - Mailbox transaction succeeded. This implies the mailbox
577
* protocol completed successfully not that the operation itself
578
* was successful.
579
* * %-ENOMEM - Couldn't allocate a bounce buffer.
580
* * %-EFAULT - Something happened with copy_to/from_user.
581
* * %-EINTR - Mailbox acquisition interrupted.
582
* * %-EXXX - Transaction level failures.
583
*
584
* Dispatches a mailbox command on behalf of a userspace request.
585
* The output payload is copied to userspace.
586
*
587
* See cxl_send_cmd().
588
*/
589
static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
590
struct cxl_mbox_cmd *mbox_cmd,
591
u64 out_payload, s32 *size_out,
592
u32 *retval)
593
{
594
struct device *dev = cxl_mbox->host;
595
int rc;
596
597
dev_dbg(dev,
598
"Submitting %s command for user\n"
599
"\topcode: %x\n"
600
"\tsize: %zx\n",
601
cxl_mem_opcode_to_name(mbox_cmd->opcode),
602
mbox_cmd->opcode, mbox_cmd->size_in);
603
604
rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
605
if (rc)
606
goto out;
607
608
/*
609
* @size_out contains the max size that's allowed to be written back out
610
* to userspace. While the payload may have written more output than
611
* this it will have to be ignored.
612
*/
613
if (mbox_cmd->size_out) {
614
dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
615
"Invalid return size\n");
616
if (copy_to_user(u64_to_user_ptr(out_payload),
617
mbox_cmd->payload_out, mbox_cmd->size_out)) {
618
rc = -EFAULT;
619
goto out;
620
}
621
}
622
623
*size_out = mbox_cmd->size_out;
624
*retval = mbox_cmd->return_code;
625
626
out:
627
cxl_mbox_cmd_dtor(mbox_cmd);
628
return rc;
629
}
630
631
int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
632
{
633
struct device *dev = cxl_mbox->host;
634
struct cxl_send_command send;
635
struct cxl_mbox_cmd mbox_cmd;
636
int rc;
637
638
dev_dbg(dev, "Send IOCTL\n");
639
640
if (copy_from_user(&send, s, sizeof(send)))
641
return -EFAULT;
642
643
rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send);
644
if (rc)
645
return rc;
646
647
rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload,
648
&send.out.size, &send.retval);
649
if (rc)
650
return rc;
651
652
if (copy_to_user(s, &send, sizeof(send)))
653
return -EFAULT;
654
655
return 0;
656
}
657
658
static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
659
u32 *size, u8 *out)
660
{
661
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
662
u32 remaining = *size;
663
u32 offset = 0;
664
665
while (remaining) {
666
u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
667
struct cxl_mbox_cmd mbox_cmd;
668
struct cxl_mbox_get_log log;
669
int rc;
670
671
log = (struct cxl_mbox_get_log) {
672
.uuid = *uuid,
673
.offset = cpu_to_le32(offset),
674
.length = cpu_to_le32(xfer_size),
675
};
676
677
mbox_cmd = (struct cxl_mbox_cmd) {
678
.opcode = CXL_MBOX_OP_GET_LOG,
679
.size_in = sizeof(log),
680
.payload_in = &log,
681
.size_out = xfer_size,
682
.payload_out = out,
683
};
684
685
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
686
687
/*
688
* The output payload length that indicates the number
689
* of valid bytes can be smaller than the Log buffer
690
* size.
691
*/
692
if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
693
offset += mbox_cmd.size_out;
694
break;
695
}
696
697
if (rc < 0)
698
return rc;
699
700
out += xfer_size;
701
remaining -= xfer_size;
702
offset += xfer_size;
703
}
704
705
*size = offset;
706
707
return 0;
708
}
709
710
static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
711
{
712
switch (opcode) {
713
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
714
case CXL_MBOX_OP_GET_FEATURE:
715
(*ro_cmds)++;
716
return 1;
717
case CXL_MBOX_OP_SET_FEATURE:
718
(*wr_cmds)++;
719
return 1;
720
default:
721
return 0;
722
}
723
}
724
725
/* 'Get Supported Features' and 'Get Feature' */
726
#define MAX_FEATURES_READ_CMDS 2
727
static void set_features_cap(struct cxl_mailbox *cxl_mbox,
728
int ro_cmds, int wr_cmds)
729
{
730
/* Setting up Features capability while walking the CEL */
731
if (ro_cmds == MAX_FEATURES_READ_CMDS) {
732
if (wr_cmds)
733
cxl_mbox->feat_cap = CXL_FEATURES_RW;
734
else
735
cxl_mbox->feat_cap = CXL_FEATURES_RO;
736
}
737
}
738
739
/**
740
* cxl_walk_cel() - Walk through the Command Effects Log.
741
* @mds: The driver data for the operation
742
* @size: Length of the Command Effects Log.
743
* @cel: CEL
744
*
745
* Iterate over each entry in the CEL and determine if the driver supports the
746
* command. If so, the command is enabled for the device and can be used later.
747
*/
748
static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
749
{
750
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
751
struct cxl_cel_entry *cel_entry;
752
const int cel_entries = size / sizeof(*cel_entry);
753
struct device *dev = mds->cxlds.dev;
754
int i, ro_cmds = 0, wr_cmds = 0;
755
756
cel_entry = (struct cxl_cel_entry *) cel;
757
758
for (i = 0; i < cel_entries; i++) {
759
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
760
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
761
int enabled = 0;
762
763
if (cmd) {
764
set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
765
enabled++;
766
}
767
768
enabled += check_features_opcodes(opcode, &ro_cmds,
769
&wr_cmds);
770
771
if (cxl_is_poison_command(opcode)) {
772
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
773
enabled++;
774
}
775
776
if (cxl_is_security_command(opcode)) {
777
cxl_set_security_cmd_enabled(&mds->security, opcode);
778
enabled++;
779
}
780
781
dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
782
enabled ? "enabled" : "unsupported by driver");
783
}
784
785
set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
786
}
787
788
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
789
{
790
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
791
struct cxl_mbox_get_supported_logs *ret;
792
struct cxl_mbox_cmd mbox_cmd;
793
int rc;
794
795
ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
796
if (!ret)
797
return ERR_PTR(-ENOMEM);
798
799
mbox_cmd = (struct cxl_mbox_cmd) {
800
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
801
.size_out = cxl_mbox->payload_size,
802
.payload_out = ret,
803
/* At least the record number field must be valid */
804
.min_out = 2,
805
};
806
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
807
if (rc < 0) {
808
kvfree(ret);
809
return ERR_PTR(rc);
810
}
811
812
813
return ret;
814
}
815
816
enum {
817
CEL_UUID,
818
VENDOR_DEBUG_UUID,
819
};
820
821
/* See CXL 2.0 Table 170. Get Log Input Payload */
822
static const uuid_t log_uuid[] = {
823
[CEL_UUID] = DEFINE_CXL_CEL_UUID,
824
[VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
825
};
826
827
/**
828
* cxl_enumerate_cmds() - Enumerate commands for a device.
829
* @mds: The driver data for the operation
830
*
831
* Returns 0 if enumerate completed successfully.
832
*
833
* CXL devices have optional support for certain commands. This function will
834
* determine the set of supported commands for the hardware and update the
835
* enabled_cmds bitmap in the @mds.
836
*/
837
int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
838
{
839
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
840
struct cxl_mbox_get_supported_logs *gsl;
841
struct device *dev = mds->cxlds.dev;
842
struct cxl_mem_command *cmd;
843
int i, rc;
844
845
gsl = cxl_get_gsl(mds);
846
if (IS_ERR(gsl))
847
return PTR_ERR(gsl);
848
849
rc = -ENOENT;
850
for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
851
u32 size = le32_to_cpu(gsl->entry[i].size);
852
uuid_t uuid = gsl->entry[i].uuid;
853
u8 *log;
854
855
dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
856
857
if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
858
continue;
859
860
log = kvmalloc(size, GFP_KERNEL);
861
if (!log) {
862
rc = -ENOMEM;
863
goto out;
864
}
865
866
rc = cxl_xfer_log(mds, &uuid, &size, log);
867
if (rc) {
868
kvfree(log);
869
goto out;
870
}
871
872
cxl_walk_cel(mds, size, log);
873
kvfree(log);
874
875
/* In case CEL was bogus, enable some default commands. */
876
cxl_for_each_cmd(cmd)
877
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
878
set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
879
880
/* Found the required CEL */
881
rc = 0;
882
}
883
out:
884
kvfree(gsl);
885
return rc;
886
}
887
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
888
889
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
890
enum cxl_event_log_type type,
891
enum cxl_event_type event_type,
892
const uuid_t *uuid, union cxl_event *evt)
893
{
894
if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
895
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
896
return;
897
}
898
if (event_type == CXL_CPER_EVENT_GENERIC) {
899
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
900
return;
901
}
902
if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
903
trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
904
return;
905
}
906
907
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
908
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
909
struct cxl_region *cxlr;
910
911
/*
912
* These trace points are annotated with HPA and region
913
* translations. Take topology mutation locks and lookup
914
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
915
*/
916
guard(rwsem_read)(&cxl_rwsem.region);
917
guard(rwsem_read)(&cxl_rwsem.dpa);
918
919
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
920
cxlr = cxl_dpa_to_region(cxlmd, dpa);
921
if (cxlr) {
922
u64 cache_size = cxlr->params.cache_size;
923
924
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
925
if (cache_size)
926
hpa_alias = hpa - cache_size;
927
}
928
929
if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
930
if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
931
dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
932
933
if (evt->gen_media.media_hdr.descriptor &
934
CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
935
WARN_ON_ONCE((evt->gen_media.media_hdr.type &
936
CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
937
!get_unaligned_le24(evt->gen_media.cme_count));
938
else
939
WARN_ON_ONCE(evt->gen_media.media_hdr.type &
940
CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
941
942
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
943
hpa_alias, &evt->gen_media);
944
} else if (event_type == CXL_CPER_EVENT_DRAM) {
945
if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
946
dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
947
948
if (evt->dram.media_hdr.descriptor &
949
CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
950
WARN_ON_ONCE((evt->dram.media_hdr.type &
951
CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
952
!get_unaligned_le24(evt->dram.cvme_count));
953
else
954
WARN_ON_ONCE(evt->dram.media_hdr.type &
955
CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
956
957
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
958
&evt->dram);
959
}
960
}
961
}
962
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
963
964
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
965
enum cxl_event_log_type type,
966
struct cxl_event_record_raw *record)
967
{
968
enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
969
const uuid_t *uuid = &record->id;
970
971
if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
972
ev_type = CXL_CPER_EVENT_GEN_MEDIA;
973
else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
974
ev_type = CXL_CPER_EVENT_DRAM;
975
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
976
ev_type = CXL_CPER_EVENT_MEM_MODULE;
977
else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
978
ev_type = CXL_CPER_EVENT_MEM_SPARING;
979
980
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
981
}
982
983
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
984
enum cxl_event_log_type log,
985
struct cxl_get_event_payload *get_pl)
986
{
987
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
988
struct cxl_mbox_clear_event_payload *payload;
989
u16 total = le16_to_cpu(get_pl->record_count);
990
u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
991
size_t pl_size = struct_size(payload, handles, max_handles);
992
struct cxl_mbox_cmd mbox_cmd;
993
u16 cnt;
994
int rc = 0;
995
int i;
996
997
/* Payload size may limit the max handles */
998
if (pl_size > cxl_mbox->payload_size) {
999
max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
1000
sizeof(__le16);
1001
pl_size = struct_size(payload, handles, max_handles);
1002
}
1003
1004
payload = kvzalloc(pl_size, GFP_KERNEL);
1005
if (!payload)
1006
return -ENOMEM;
1007
1008
*payload = (struct cxl_mbox_clear_event_payload) {
1009
.event_log = log,
1010
};
1011
1012
mbox_cmd = (struct cxl_mbox_cmd) {
1013
.opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
1014
.payload_in = payload,
1015
.size_in = pl_size,
1016
};
1017
1018
/*
1019
* Clear Event Records uses u8 for the handle cnt while Get Event
1020
* Record can return up to 0xffff records.
1021
*/
1022
i = 0;
1023
for (cnt = 0; cnt < total; cnt++) {
1024
struct cxl_event_record_raw *raw = &get_pl->records[cnt];
1025
struct cxl_event_generic *gen = &raw->event.generic;
1026
1027
payload->handles[i++] = gen->hdr.handle;
1028
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
1029
le16_to_cpu(payload->handles[i - 1]));
1030
1031
if (i == max_handles) {
1032
payload->nr_recs = i;
1033
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1034
if (rc)
1035
goto free_pl;
1036
i = 0;
1037
}
1038
}
1039
1040
/* Clear what is left if any */
1041
if (i) {
1042
payload->nr_recs = i;
1043
mbox_cmd.size_in = struct_size(payload, handles, i);
1044
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1045
if (rc)
1046
goto free_pl;
1047
}
1048
1049
free_pl:
1050
kvfree(payload);
1051
return rc;
1052
}
1053
1054
static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
1055
enum cxl_event_log_type type)
1056
{
1057
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1058
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
1059
struct device *dev = mds->cxlds.dev;
1060
struct cxl_get_event_payload *payload;
1061
u8 log_type = type;
1062
u16 nr_rec;
1063
1064
mutex_lock(&mds->event.log_lock);
1065
payload = mds->event.buf;
1066
1067
do {
1068
int rc, i;
1069
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
1070
.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
1071
.payload_in = &log_type,
1072
.size_in = sizeof(log_type),
1073
.payload_out = payload,
1074
.size_out = cxl_mbox->payload_size,
1075
.min_out = struct_size(payload, records, 0),
1076
};
1077
1078
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1079
if (rc) {
1080
dev_err_ratelimited(dev,
1081
"Event log '%d': Failed to query event records : %d",
1082
type, rc);
1083
break;
1084
}
1085
1086
nr_rec = le16_to_cpu(payload->record_count);
1087
if (!nr_rec)
1088
break;
1089
1090
for (i = 0; i < nr_rec; i++)
1091
__cxl_event_trace_record(cxlmd, type,
1092
&payload->records[i]);
1093
1094
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
1095
trace_cxl_overflow(cxlmd, type, payload);
1096
1097
rc = cxl_clear_event_record(mds, type, payload);
1098
if (rc) {
1099
dev_err_ratelimited(dev,
1100
"Event log '%d': Failed to clear events : %d",
1101
type, rc);
1102
break;
1103
}
1104
} while (nr_rec);
1105
1106
mutex_unlock(&mds->event.log_lock);
1107
}
1108
1109
/**
1110
* cxl_mem_get_event_records - Get Event Records from the device
1111
* @mds: The driver data for the operation
1112
* @status: Event Status register value identifying which events are available.
1113
*
1114
* Retrieve all event records available on the device, report them as trace
1115
* events, and clear them.
1116
*
1117
* See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1118
* See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1119
*/
1120
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1121
{
1122
dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1123
1124
if (status & CXLDEV_EVENT_STATUS_FATAL)
1125
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1126
if (status & CXLDEV_EVENT_STATUS_FAIL)
1127
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1128
if (status & CXLDEV_EVENT_STATUS_WARN)
1129
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1130
if (status & CXLDEV_EVENT_STATUS_INFO)
1131
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1132
}
1133
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
1134
1135
/**
1136
* cxl_mem_get_partition_info - Get partition info
1137
* @mds: The driver data for the operation
1138
*
1139
* Retrieve the current partition info for the device specified. The active
1140
* values are the current capacity in bytes. If not 0, the 'next' values are
1141
* the pending values, in bytes, which take affect on next cold reset.
1142
*
1143
* Return: 0 if no error: or the result of the mailbox command.
1144
*
1145
* See CXL @8.2.9.5.2.1 Get Partition Info
1146
*/
1147
static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1148
{
1149
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1150
struct cxl_mbox_get_partition_info pi;
1151
struct cxl_mbox_cmd mbox_cmd;
1152
int rc;
1153
1154
mbox_cmd = (struct cxl_mbox_cmd) {
1155
.opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1156
.size_out = sizeof(pi),
1157
.payload_out = &pi,
1158
};
1159
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1160
if (rc)
1161
return rc;
1162
1163
mds->active_volatile_bytes =
1164
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1165
mds->active_persistent_bytes =
1166
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1167
1168
return 0;
1169
}
1170
1171
/**
1172
* cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1173
* @mds: The driver data for the operation
1174
*
1175
* Return: 0 if identify was executed successfully or media not ready.
1176
*
1177
* This will dispatch the identify command to the device and on success populate
1178
* structures to be exported to sysfs.
1179
*/
1180
int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1181
{
1182
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1183
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1184
struct cxl_mbox_identify id;
1185
struct cxl_mbox_cmd mbox_cmd;
1186
u32 val;
1187
int rc;
1188
1189
if (!mds->cxlds.media_ready)
1190
return 0;
1191
1192
mbox_cmd = (struct cxl_mbox_cmd) {
1193
.opcode = CXL_MBOX_OP_IDENTIFY,
1194
.size_out = sizeof(id),
1195
.payload_out = &id,
1196
};
1197
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1198
if (rc < 0)
1199
return rc;
1200
1201
mds->total_bytes =
1202
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1203
mds->volatile_only_bytes =
1204
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1205
mds->persistent_only_bytes =
1206
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1207
mds->partition_align_bytes =
1208
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1209
1210
mds->lsa_size = le32_to_cpu(id.lsa_size);
1211
memcpy(mds->firmware_version, id.fw_revision,
1212
sizeof(id.fw_revision));
1213
1214
if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1215
val = get_unaligned_le24(id.poison_list_max_mer);
1216
mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1217
}
1218
1219
return 0;
1220
}
1221
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
1222
1223
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1224
{
1225
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1226
int rc;
1227
u32 sec_out = 0;
1228
struct cxl_get_security_output {
1229
__le32 flags;
1230
} out;
1231
struct cxl_mbox_cmd sec_cmd = {
1232
.opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1233
.payload_out = &out,
1234
.size_out = sizeof(out),
1235
};
1236
struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1237
1238
if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1239
return -EINVAL;
1240
1241
rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
1242
if (rc < 0) {
1243
dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
1244
return rc;
1245
}
1246
1247
/*
1248
* Prior to using these commands, any security applied to
1249
* the user data areas of the device shall be DISABLED (or
1250
* UNLOCKED for secure erase case).
1251
*/
1252
sec_out = le32_to_cpu(out.flags);
1253
if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1254
return -EINVAL;
1255
1256
if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1257
sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1258
return -EINVAL;
1259
1260
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1261
if (rc < 0) {
1262
dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
1263
return rc;
1264
}
1265
1266
return 0;
1267
}
1268
1269
1270
/**
1271
* cxl_mem_sanitize() - Send a sanitization command to the device.
1272
* @cxlmd: The device for the operation
1273
* @cmd: The specific sanitization command opcode
1274
*
1275
* Return: 0 if the command was executed successfully, regardless of
1276
* whether or not the actual security operation is done in the background,
1277
* such as for the Sanitize case.
1278
* Error return values can be the result of the mailbox command, -EINVAL
1279
* when security requirements are not met or invalid contexts, or -EBUSY
1280
* if the sanitize operation is already in flight.
1281
*
1282
* See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1283
*/
1284
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1285
{
1286
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1287
struct cxl_port *endpoint;
1288
1289
/* synchronize with cxl_mem_probe() and decoder write operations */
1290
guard(device)(&cxlmd->dev);
1291
endpoint = cxlmd->endpoint;
1292
guard(rwsem_read)(&cxl_rwsem.region);
1293
/*
1294
* Require an endpoint to be safe otherwise the driver can not
1295
* be sure that the device is unmapped.
1296
*/
1297
if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1298
return __cxl_mem_sanitize(mds, cmd);
1299
1300
return -EBUSY;
1301
}
1302
1303
static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
1304
{
1305
int i = info->nr_partitions;
1306
1307
if (size == 0)
1308
return;
1309
1310
info->part[i].range = (struct range) {
1311
.start = start,
1312
.end = start + size - 1,
1313
};
1314
info->part[i].mode = mode;
1315
info->nr_partitions++;
1316
}
1317
1318
int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
1319
{
1320
struct cxl_dev_state *cxlds = &mds->cxlds;
1321
struct device *dev = cxlds->dev;
1322
int rc;
1323
1324
if (!cxlds->media_ready) {
1325
info->size = 0;
1326
return 0;
1327
}
1328
1329
info->size = mds->total_bytes;
1330
1331
if (mds->partition_align_bytes == 0) {
1332
add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
1333
add_part(info, mds->volatile_only_bytes,
1334
mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
1335
return 0;
1336
}
1337
1338
rc = cxl_mem_get_partition_info(mds);
1339
if (rc) {
1340
dev_err(dev, "Failed to query partition information\n");
1341
return rc;
1342
}
1343
1344
add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
1345
add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
1346
CXL_PARTMODE_PMEM);
1347
1348
return 0;
1349
}
1350
EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
1351
1352
int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
1353
{
1354
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1355
struct cxl_mbox_get_health_info_out hi;
1356
struct cxl_mbox_cmd mbox_cmd;
1357
int rc;
1358
1359
mbox_cmd = (struct cxl_mbox_cmd) {
1360
.opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
1361
.size_out = sizeof(hi),
1362
.payload_out = &hi,
1363
};
1364
1365
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1366
if (!rc)
1367
*count = le32_to_cpu(hi.dirty_shutdown_cnt);
1368
1369
return rc;
1370
}
1371
EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
1372
1373
int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
1374
{
1375
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1376
struct cxl_mbox_cmd mbox_cmd;
1377
struct cxl_mbox_set_shutdown_state_in in = {
1378
.state = 1
1379
};
1380
1381
mbox_cmd = (struct cxl_mbox_cmd) {
1382
.opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
1383
.size_in = sizeof(in),
1384
.payload_in = &in,
1385
};
1386
1387
return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1388
}
1389
EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
1390
1391
int cxl_set_timestamp(struct cxl_memdev_state *mds)
1392
{
1393
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1394
struct cxl_mbox_cmd mbox_cmd;
1395
struct cxl_mbox_set_timestamp_in pi;
1396
int rc;
1397
1398
pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1399
mbox_cmd = (struct cxl_mbox_cmd) {
1400
.opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1401
.size_in = sizeof(pi),
1402
.payload_in = &pi,
1403
};
1404
1405
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1406
/*
1407
* Command is optional. Devices may have another way of providing
1408
* a timestamp, or may return all 0s in timestamp fields.
1409
* Don't report an error if this command isn't supported
1410
*/
1411
if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1412
return rc;
1413
1414
return 0;
1415
}
1416
EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
1417
1418
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1419
struct cxl_region *cxlr)
1420
{
1421
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1422
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1423
struct cxl_mbox_poison_out *po;
1424
struct cxl_mbox_poison_in pi;
1425
int nr_records = 0;
1426
int rc;
1427
1428
ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
1429
if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
1430
return rc;
1431
1432
po = mds->poison.list_out;
1433
pi.offset = cpu_to_le64(offset);
1434
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1435
1436
do {
1437
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
1438
.opcode = CXL_MBOX_OP_GET_POISON,
1439
.size_in = sizeof(pi),
1440
.payload_in = &pi,
1441
.size_out = cxl_mbox->payload_size,
1442
.payload_out = po,
1443
.min_out = struct_size(po, record, 0),
1444
};
1445
1446
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1447
if (rc)
1448
break;
1449
1450
for (int i = 0; i < le16_to_cpu(po->count); i++)
1451
trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1452
po->flags, po->overflow_ts,
1453
CXL_POISON_TRACE_LIST);
1454
1455
/* Protect against an uncleared _FLAG_MORE */
1456
nr_records = nr_records + le16_to_cpu(po->count);
1457
if (nr_records >= mds->poison.max_errors) {
1458
dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1459
nr_records);
1460
break;
1461
}
1462
} while (po->flags & CXL_POISON_FLAG_MORE);
1463
1464
return rc;
1465
}
1466
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
1467
1468
static void free_poison_buf(void *buf)
1469
{
1470
kvfree(buf);
1471
}
1472
1473
/* Get Poison List output buffer is protected by mds->poison.lock */
1474
static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1475
{
1476
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1477
1478
mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
1479
if (!mds->poison.list_out)
1480
return -ENOMEM;
1481
1482
return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1483
mds->poison.list_out);
1484
}
1485
1486
int cxl_poison_state_init(struct cxl_memdev_state *mds)
1487
{
1488
int rc;
1489
1490
if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1491
return 0;
1492
1493
rc = cxl_poison_alloc_buf(mds);
1494
if (rc) {
1495
clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1496
return rc;
1497
}
1498
1499
mutex_init(&mds->poison.mutex);
1500
return 0;
1501
}
1502
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
1503
1504
int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
1505
{
1506
if (!cxl_mbox || !host)
1507
return -EINVAL;
1508
1509
cxl_mbox->host = host;
1510
mutex_init(&cxl_mbox->mbox_mutex);
1511
rcuwait_init(&cxl_mbox->mbox_wait);
1512
1513
return 0;
1514
}
1515
EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
1516
1517
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1518
{
1519
struct cxl_memdev_state *mds;
1520
int rc;
1521
1522
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1523
if (!mds) {
1524
dev_err(dev, "No memory available\n");
1525
return ERR_PTR(-ENOMEM);
1526
}
1527
1528
mutex_init(&mds->event.log_lock);
1529
mds->cxlds.dev = dev;
1530
mds->cxlds.reg_map.host = dev;
1531
mds->cxlds.cxl_mbox.host = dev;
1532
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1533
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1534
1535
rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
1536
if (rc == -EOPNOTSUPP)
1537
dev_warn(dev, "CXL MCE unsupported\n");
1538
else if (rc)
1539
return ERR_PTR(rc);
1540
1541
return mds;
1542
}
1543
EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
1544
1545
void __init cxl_mbox_init(void)
1546
{
1547
struct dentry *mbox_debugfs;
1548
1549
mbox_debugfs = cxl_debugfs_create_dir("mbox");
1550
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1551
&cxl_raw_allow_all);
1552
}
1553
1554