Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cxl/core/features.c
49600 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3
#include <linux/fwctl.h>
4
#include <linux/device.h>
5
#include <cxl/mailbox.h>
6
#include <cxl/features.h>
7
#include <uapi/fwctl/cxl.h>
8
#include "cxl.h"
9
#include "core.h"
10
#include "cxlmem.h"
11
12
/**
13
* DOC: cxl features
14
*
15
* CXL Features:
16
* A CXL device that includes a mailbox supports commands that allows
17
* listing, getting, and setting of optionally defined features such
18
* as memory sparing or post package sparing. Vendors may define custom
19
* features for the device.
20
*/
21
22
/* All the features below are exclusive to the kernel */
23
static const uuid_t cxl_exclusive_feats[] = {
24
CXL_FEAT_PATROL_SCRUB_UUID,
25
CXL_FEAT_ECS_UUID,
26
CXL_FEAT_SPPR_UUID,
27
CXL_FEAT_HPPR_UUID,
28
CXL_FEAT_CACHELINE_SPARING_UUID,
29
CXL_FEAT_ROW_SPARING_UUID,
30
CXL_FEAT_BANK_SPARING_UUID,
31
CXL_FEAT_RANK_SPARING_UUID,
32
};
33
34
static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
35
{
36
for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
37
if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
38
return true;
39
}
40
41
return false;
42
}
43
44
static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
45
{
46
return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
47
}
48
49
struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
50
{
51
return cxlds->cxlfs;
52
}
53
EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
54
55
static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
56
{
57
struct cxl_mbox_get_sup_feats_out mbox_out;
58
struct cxl_mbox_get_sup_feats_in mbox_in;
59
struct cxl_mbox_cmd mbox_cmd;
60
int rc;
61
62
memset(&mbox_in, 0, sizeof(mbox_in));
63
mbox_in.count = cpu_to_le32(sizeof(mbox_out));
64
memset(&mbox_out, 0, sizeof(mbox_out));
65
mbox_cmd = (struct cxl_mbox_cmd) {
66
.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
67
.size_in = sizeof(mbox_in),
68
.payload_in = &mbox_in,
69
.size_out = sizeof(mbox_out),
70
.payload_out = &mbox_out,
71
.min_out = sizeof(mbox_out),
72
};
73
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
74
if (rc < 0)
75
return rc;
76
77
return le16_to_cpu(mbox_out.supported_feats);
78
}
79
80
static struct cxl_feat_entries *
81
get_supported_features(struct cxl_features_state *cxlfs)
82
{
83
int remain_feats, max_size, max_feats, start, rc, hdr_size;
84
struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
85
int feat_size = sizeof(struct cxl_feat_entry);
86
struct cxl_mbox_get_sup_feats_in mbox_in;
87
struct cxl_feat_entry *entry;
88
struct cxl_mbox_cmd mbox_cmd;
89
int user_feats = 0;
90
int count;
91
92
count = cxl_get_supported_features_count(cxl_mbox);
93
if (count <= 0)
94
return NULL;
95
96
struct cxl_feat_entries *entries __free(kvfree) =
97
kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
98
if (!entries)
99
return NULL;
100
101
struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
102
kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
103
if (!mbox_out)
104
return NULL;
105
106
hdr_size = struct_size(mbox_out, ents, 0);
107
max_size = cxl_mbox->payload_size - hdr_size;
108
/* max feat entries that can fit in mailbox max payload size */
109
max_feats = max_size / feat_size;
110
entry = entries->ent;
111
112
start = 0;
113
remain_feats = count;
114
do {
115
int retrieved, alloc_size, copy_feats;
116
int num_entries;
117
118
if (remain_feats > max_feats) {
119
alloc_size = struct_size(mbox_out, ents, max_feats);
120
remain_feats = remain_feats - max_feats;
121
copy_feats = max_feats;
122
} else {
123
alloc_size = struct_size(mbox_out, ents, remain_feats);
124
copy_feats = remain_feats;
125
remain_feats = 0;
126
}
127
128
memset(&mbox_in, 0, sizeof(mbox_in));
129
mbox_in.count = cpu_to_le32(alloc_size);
130
mbox_in.start_idx = cpu_to_le16(start);
131
memset(mbox_out, 0, alloc_size);
132
mbox_cmd = (struct cxl_mbox_cmd) {
133
.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
134
.size_in = sizeof(mbox_in),
135
.payload_in = &mbox_in,
136
.size_out = alloc_size,
137
.payload_out = mbox_out,
138
.min_out = hdr_size,
139
};
140
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
141
if (rc < 0)
142
return NULL;
143
144
if (mbox_cmd.size_out <= hdr_size)
145
return NULL;
146
147
/*
148
* Make sure retrieved out buffer is multiple of feature
149
* entries.
150
*/
151
retrieved = mbox_cmd.size_out - hdr_size;
152
if (retrieved % feat_size)
153
return NULL;
154
155
num_entries = le16_to_cpu(mbox_out->num_entries);
156
/*
157
* If the reported output entries * defined entry size !=
158
* retrieved output bytes, then the output package is incorrect.
159
*/
160
if (num_entries * feat_size != retrieved)
161
return NULL;
162
163
memcpy(entry, mbox_out->ents, retrieved);
164
for (int i = 0; i < num_entries; i++) {
165
if (!is_cxl_feature_exclusive(entry + i))
166
user_feats++;
167
}
168
entry += num_entries;
169
/*
170
* If the number of output entries is less than expected, add the
171
* remaining entries to the next batch.
172
*/
173
remain_feats += copy_feats - num_entries;
174
start += num_entries;
175
} while (remain_feats);
176
177
entries->num_features = count;
178
entries->num_user_features = user_feats;
179
180
return no_free_ptr(entries);
181
}
182
183
static void free_cxlfs(void *_cxlfs)
184
{
185
struct cxl_features_state *cxlfs = _cxlfs;
186
struct cxl_dev_state *cxlds = cxlfs->cxlds;
187
188
cxlds->cxlfs = NULL;
189
kvfree(cxlfs->entries);
190
kfree(cxlfs);
191
}
192
193
/**
194
* devm_cxl_setup_features() - Allocate and initialize features context
195
* @cxlds: CXL device context
196
*
197
* Return 0 on success or -errno on failure.
198
*/
199
int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
200
{
201
struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
202
203
if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
204
return -ENODEV;
205
206
struct cxl_features_state *cxlfs __free(kfree) =
207
kzalloc(sizeof(*cxlfs), GFP_KERNEL);
208
if (!cxlfs)
209
return -ENOMEM;
210
211
cxlfs->cxlds = cxlds;
212
213
cxlfs->entries = get_supported_features(cxlfs);
214
if (!cxlfs->entries)
215
return -ENOMEM;
216
217
cxlds->cxlfs = cxlfs;
218
219
return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
220
}
221
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
222
223
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
224
enum cxl_get_feat_selection selection,
225
void *feat_out, size_t feat_out_size, u16 offset,
226
u16 *return_code)
227
{
228
size_t data_to_rd_size, size_out;
229
struct cxl_mbox_get_feat_in pi;
230
struct cxl_mbox_cmd mbox_cmd;
231
size_t data_rcvd_size = 0;
232
int rc;
233
234
if (return_code)
235
*return_code = CXL_MBOX_CMD_RC_INPUT;
236
237
if (!feat_out || !feat_out_size)
238
return 0;
239
240
size_out = min(feat_out_size, cxl_mbox->payload_size);
241
uuid_copy(&pi.uuid, feat_uuid);
242
pi.selection = selection;
243
do {
244
data_to_rd_size = min(feat_out_size - data_rcvd_size,
245
cxl_mbox->payload_size);
246
pi.offset = cpu_to_le16(offset + data_rcvd_size);
247
pi.count = cpu_to_le16(data_to_rd_size);
248
249
mbox_cmd = (struct cxl_mbox_cmd) {
250
.opcode = CXL_MBOX_OP_GET_FEATURE,
251
.size_in = sizeof(pi),
252
.payload_in = &pi,
253
.size_out = size_out,
254
.payload_out = feat_out + data_rcvd_size,
255
.min_out = data_to_rd_size,
256
};
257
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
258
if (rc < 0 || !mbox_cmd.size_out) {
259
if (return_code)
260
*return_code = mbox_cmd.return_code;
261
return 0;
262
}
263
data_rcvd_size += mbox_cmd.size_out;
264
} while (data_rcvd_size < feat_out_size);
265
266
if (return_code)
267
*return_code = CXL_MBOX_CMD_RC_SUCCESS;
268
269
return data_rcvd_size;
270
}
271
272
/*
273
* FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
274
* available in the mailbox for storing the actual feature data so that
275
* the feature data transfer would work as expected.
276
*/
277
#define FEAT_DATA_MIN_PAYLOAD_SIZE 10
278
int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
279
const uuid_t *feat_uuid, u8 feat_version,
280
const void *feat_data, size_t feat_data_size,
281
u32 feat_flag, u16 offset, u16 *return_code)
282
{
283
size_t data_in_size, data_sent_size = 0;
284
struct cxl_mbox_cmd mbox_cmd;
285
size_t hdr_size;
286
287
if (return_code)
288
*return_code = CXL_MBOX_CMD_RC_INPUT;
289
290
struct cxl_mbox_set_feat_in *pi __free(kfree) =
291
kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
292
if (!pi)
293
return -ENOMEM;
294
295
uuid_copy(&pi->uuid, feat_uuid);
296
pi->version = feat_version;
297
feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
298
feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
299
hdr_size = sizeof(pi->hdr);
300
/*
301
* Check minimum mbox payload size is available for
302
* the feature data transfer.
303
*/
304
if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
305
return -ENOMEM;
306
307
if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
308
pi->flags = cpu_to_le32(feat_flag |
309
CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
310
data_in_size = feat_data_size;
311
} else {
312
pi->flags = cpu_to_le32(feat_flag |
313
CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
314
data_in_size = cxl_mbox->payload_size - hdr_size;
315
}
316
317
do {
318
int rc;
319
320
pi->offset = cpu_to_le16(offset + data_sent_size);
321
memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
322
mbox_cmd = (struct cxl_mbox_cmd) {
323
.opcode = CXL_MBOX_OP_SET_FEATURE,
324
.size_in = hdr_size + data_in_size,
325
.payload_in = pi,
326
};
327
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
328
if (rc < 0) {
329
if (return_code)
330
*return_code = mbox_cmd.return_code;
331
return rc;
332
}
333
334
data_sent_size += data_in_size;
335
if (data_sent_size >= feat_data_size) {
336
if (return_code)
337
*return_code = CXL_MBOX_CMD_RC_SUCCESS;
338
return 0;
339
}
340
341
if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
342
data_in_size = feat_data_size - data_sent_size;
343
pi->flags = cpu_to_le32(feat_flag |
344
CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
345
} else {
346
pi->flags = cpu_to_le32(feat_flag |
347
CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
348
}
349
} while (true);
350
}
351
352
/* FWCTL support */
353
354
static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
355
{
356
return to_cxl_memdev(fwctl_dev->dev.parent);
357
}
358
359
static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
360
{
361
return 0;
362
}
363
364
static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
365
{
366
}
367
368
struct cxl_feat_entry *
369
cxl_feature_info(struct cxl_features_state *cxlfs,
370
const uuid_t *uuid)
371
{
372
struct cxl_feat_entry *feat;
373
374
if (!cxlfs || !cxlfs->entries)
375
return ERR_PTR(-EOPNOTSUPP);
376
377
for (int i = 0; i < cxlfs->entries->num_features; i++) {
378
feat = &cxlfs->entries->ent[i];
379
if (uuid_equal(uuid, &feat->uuid))
380
return feat;
381
}
382
383
return ERR_PTR(-EINVAL);
384
}
385
386
static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
387
const struct fwctl_rpc_cxl *rpc_in,
388
size_t *out_len)
389
{
390
const struct cxl_mbox_get_sup_feats_in *feat_in;
391
struct cxl_mbox_get_sup_feats_out *feat_out;
392
struct cxl_feat_entry *pos;
393
size_t out_size;
394
int requested;
395
u32 count;
396
u16 start;
397
int i;
398
399
if (rpc_in->op_size != sizeof(*feat_in))
400
return ERR_PTR(-EINVAL);
401
402
feat_in = &rpc_in->get_sup_feats_in;
403
count = le32_to_cpu(feat_in->count);
404
start = le16_to_cpu(feat_in->start_idx);
405
requested = count / sizeof(*pos);
406
407
/*
408
* Make sure that the total requested number of entries is not greater
409
* than the total number of supported features allowed for userspace.
410
*/
411
if (start >= cxlfs->entries->num_features)
412
return ERR_PTR(-EINVAL);
413
414
requested = min_t(int, requested, cxlfs->entries->num_features - start);
415
416
out_size = sizeof(struct fwctl_rpc_cxl_out) +
417
struct_size(feat_out, ents, requested);
418
419
struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
420
kvzalloc(out_size, GFP_KERNEL);
421
if (!rpc_out)
422
return ERR_PTR(-ENOMEM);
423
424
rpc_out->size = struct_size(feat_out, ents, requested);
425
feat_out = &rpc_out->get_sup_feats_out;
426
427
for (i = start, pos = &feat_out->ents[0];
428
i < cxlfs->entries->num_features; i++, pos++) {
429
if (i - start == requested)
430
break;
431
432
memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
433
/*
434
* If the feature is exclusive, set the set_feat_size to 0 to
435
* indicate that the feature is not changeable.
436
*/
437
if (is_cxl_feature_exclusive(pos)) {
438
u32 flags;
439
440
pos->set_feat_size = 0;
441
flags = le32_to_cpu(pos->flags);
442
flags &= ~CXL_FEATURE_F_CHANGEABLE;
443
pos->flags = cpu_to_le32(flags);
444
}
445
}
446
447
feat_out->num_entries = cpu_to_le16(requested);
448
feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
449
rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
450
*out_len = out_size;
451
452
return no_free_ptr(rpc_out);
453
}
454
455
static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
456
const struct fwctl_rpc_cxl *rpc_in,
457
size_t *out_len)
458
{
459
struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
460
const struct cxl_mbox_get_feat_in *feat_in;
461
u16 offset, count, return_code;
462
size_t out_size = *out_len;
463
464
if (rpc_in->op_size != sizeof(*feat_in))
465
return ERR_PTR(-EINVAL);
466
467
feat_in = &rpc_in->get_feat_in;
468
offset = le16_to_cpu(feat_in->offset);
469
count = le16_to_cpu(feat_in->count);
470
471
if (!count)
472
return ERR_PTR(-EINVAL);
473
474
struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
475
kvzalloc(out_size, GFP_KERNEL);
476
if (!rpc_out)
477
return ERR_PTR(-ENOMEM);
478
479
out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
480
feat_in->selection, rpc_out->payload,
481
count, offset, &return_code);
482
*out_len = sizeof(struct fwctl_rpc_cxl_out);
483
if (!out_size) {
484
rpc_out->size = 0;
485
rpc_out->retval = return_code;
486
return no_free_ptr(rpc_out);
487
}
488
489
rpc_out->size = out_size;
490
rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
491
*out_len += out_size;
492
493
return no_free_ptr(rpc_out);
494
}
495
496
static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
497
const struct fwctl_rpc_cxl *rpc_in,
498
size_t *out_len)
499
{
500
struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
501
const struct cxl_mbox_set_feat_in *feat_in;
502
size_t out_size, data_size;
503
u16 offset, return_code;
504
u32 flags;
505
int rc;
506
507
if (rpc_in->op_size <= sizeof(feat_in->hdr))
508
return ERR_PTR(-EINVAL);
509
510
feat_in = &rpc_in->set_feat_in;
511
512
if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
513
return ERR_PTR(-EPERM);
514
515
offset = le16_to_cpu(feat_in->offset);
516
flags = le32_to_cpu(feat_in->flags);
517
out_size = *out_len;
518
519
struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
520
kvzalloc(out_size, GFP_KERNEL);
521
if (!rpc_out)
522
return ERR_PTR(-ENOMEM);
523
524
rpc_out->size = 0;
525
526
data_size = rpc_in->op_size - sizeof(feat_in->hdr);
527
rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
528
feat_in->version, feat_in->feat_data,
529
data_size, flags, offset, &return_code);
530
*out_len = sizeof(*rpc_out);
531
if (rc) {
532
rpc_out->retval = return_code;
533
return no_free_ptr(rpc_out);
534
}
535
536
rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
537
538
return no_free_ptr(rpc_out);
539
}
540
541
static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
542
const struct fwctl_rpc_cxl *rpc_in,
543
enum fwctl_rpc_scope scope)
544
{
545
u16 effects, imm_mask, reset_mask;
546
struct cxl_feat_entry *feat;
547
u32 flags;
548
549
if (rpc_in->op_size < sizeof(uuid_t))
550
return false;
551
552
feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
553
if (IS_ERR(feat))
554
return false;
555
556
/* Ensure that the attribute is changeable */
557
flags = le32_to_cpu(feat->flags);
558
if (!(flags & CXL_FEATURE_F_CHANGEABLE))
559
return false;
560
561
effects = le16_to_cpu(feat->effects);
562
563
/*
564
* Reserved bits are set, rejecting since the effects is not
565
* comprehended by the driver.
566
*/
567
if (effects & CXL_CMD_EFFECTS_RESERVED) {
568
dev_warn_once(cxlfs->cxlds->dev,
569
"Reserved bits set in the Feature effects field!\n");
570
return false;
571
}
572
573
/* Currently no user background command support */
574
if (effects & CXL_CMD_BACKGROUND)
575
return false;
576
577
/* Effects cause immediate change, highest security scope is needed */
578
imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
579
CXL_CMD_DATA_CHANGE_IMMEDIATE |
580
CXL_CMD_POLICY_CHANGE_IMMEDIATE |
581
CXL_CMD_LOG_CHANGE_IMMEDIATE;
582
583
reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
584
CXL_CMD_CONFIG_CHANGE_CONV_RESET |
585
CXL_CMD_CONFIG_CHANGE_CXL_RESET;
586
587
/* If no immediate or reset effect set, The hardware has a bug */
588
if (!(effects & imm_mask) && !(effects & reset_mask))
589
return false;
590
591
/*
592
* If the Feature setting causes immediate configuration change
593
* then we need the full write permission policy.
594
*/
595
if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
596
return true;
597
598
/*
599
* If the Feature setting only causes configuration change
600
* after a reset, then the lesser level of write permission
601
* policy is ok.
602
*/
603
if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
604
return true;
605
606
return false;
607
}
608
609
static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
610
const struct fwctl_rpc_cxl *rpc_in,
611
enum fwctl_rpc_scope scope,
612
u16 opcode)
613
{
614
struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
615
616
switch (opcode) {
617
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
618
case CXL_MBOX_OP_GET_FEATURE:
619
return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
620
case CXL_MBOX_OP_SET_FEATURE:
621
if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
622
return false;
623
return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
624
default:
625
return false;
626
}
627
}
628
629
static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
630
const struct fwctl_rpc_cxl *rpc_in,
631
size_t *out_len, u16 opcode)
632
{
633
switch (opcode) {
634
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
635
return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
636
case CXL_MBOX_OP_GET_FEATURE:
637
return cxlctl_get_feature(cxlfs, rpc_in, out_len);
638
case CXL_MBOX_OP_SET_FEATURE:
639
return cxlctl_set_feature(cxlfs, rpc_in, out_len);
640
default:
641
return ERR_PTR(-EOPNOTSUPP);
642
}
643
}
644
645
static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
646
void *in, size_t in_len, size_t *out_len)
647
{
648
struct fwctl_device *fwctl_dev = uctx->fwctl;
649
struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
650
struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
651
const struct fwctl_rpc_cxl *rpc_in = in;
652
u16 opcode = rpc_in->opcode;
653
654
if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
655
return ERR_PTR(-EINVAL);
656
657
return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
658
}
659
660
static const struct fwctl_ops cxlctl_ops = {
661
.device_type = FWCTL_DEVICE_TYPE_CXL,
662
.uctx_size = sizeof(struct fwctl_uctx),
663
.open_uctx = cxlctl_open_uctx,
664
.close_uctx = cxlctl_close_uctx,
665
.fw_rpc = cxlctl_fw_rpc,
666
};
667
668
DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
669
670
static void free_memdev_fwctl(void *_fwctl_dev)
671
{
672
struct fwctl_device *fwctl_dev = _fwctl_dev;
673
674
fwctl_unregister(fwctl_dev);
675
fwctl_put(fwctl_dev);
676
}
677
678
int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
679
{
680
struct cxl_dev_state *cxlds = cxlmd->cxlds;
681
struct cxl_features_state *cxlfs;
682
int rc;
683
684
cxlfs = to_cxlfs(cxlds);
685
if (!cxlfs)
686
return -ENODEV;
687
688
/* No need to setup FWCTL if there are no user allowed features found */
689
if (!cxlfs->entries->num_user_features)
690
return -ENODEV;
691
692
struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
693
_fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
694
if (!fwctl_dev)
695
return -ENOMEM;
696
697
rc = fwctl_register(fwctl_dev);
698
if (rc)
699
return rc;
700
701
return devm_add_action_or_reset(host, free_memdev_fwctl,
702
no_free_ptr(fwctl_dev));
703
}
704
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
705
706
MODULE_IMPORT_NS("FWCTL");
707
708