Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/nvdimm/test/nfit.c
50373 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4
*/
5
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
#include <linux/platform_device.h>
7
#include <linux/dma-mapping.h>
8
#include <linux/workqueue.h>
9
#include <linux/libnvdimm.h>
10
#include <linux/genalloc.h>
11
#include <linux/vmalloc.h>
12
#include <linux/device.h>
13
#include <linux/module.h>
14
#include <linux/mutex.h>
15
#include <linux/ndctl.h>
16
#include <linux/sizes.h>
17
#include <linux/list.h>
18
#include <linux/slab.h>
19
#include <nd-core.h>
20
#include <intel.h>
21
#include <nfit.h>
22
#include <nd.h>
23
#include "nfit_test.h"
24
#include "../watermark.h"
25
26
/*
27
* Generate an NFIT table to describe the following topology:
28
*
29
* BUS0: Interleaved PMEM regions, and aliasing with BLK regions
30
*
31
* (a) (b) DIMM BLK-REGION
32
* +----------+--------------+----------+---------+
33
* +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
34
* | imc0 +--+- - - - - region0 - - - -+----------+ +
35
* +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
36
* | +----------+--------------v----------v v
37
* +--+---+ | |
38
* | cpu0 | region1
39
* +--+---+ | |
40
* | +-------------------------^----------^ ^
41
* +--+---+ | blk4.0 | pm1.0 | 2 region4
42
* | imc1 +--+-------------------------+----------+ +
43
* +------+ | blk5.0 | pm1.0 | 3 region5
44
* +-------------------------+----------+-+-------+
45
*
46
* +--+---+
47
* | cpu1 |
48
* +--+---+ (Hotplug DIMM)
49
* | +----------------------------------------------+
50
* +--+---+ | blk6.0/pm7.0 | 4 region6/7
51
* | imc0 +--+----------------------------------------------+
52
* +------+
53
*
54
*
55
* *) In this layout we have four dimms and two memory controllers in one
56
* socket. Each unique interface (BLK or PMEM) to DPA space
57
* is identified by a region device with a dynamically assigned id.
58
*
59
* *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
60
* A single PMEM namespace "pm0.0" is created using half of the
61
* REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
62
* allocate from from the bottom of a region. The unallocated
63
* portion of REGION0 aliases with REGION2 and REGION3. That
64
* unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
65
* "blk3.0") starting at the base of each DIMM to offset (a) in those
66
* DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
67
* names that can be assigned to a namespace.
68
*
69
* *) In the last portion of dimm0 and dimm1 we have an interleaved
70
* SPA range, REGION1, that spans those two dimms as well as dimm2
71
* and dimm3. Some of REGION1 allocated to a PMEM namespace named
72
* "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
73
* dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
74
* "blk5.0".
75
*
76
* *) The portion of dimm2 and dimm3 that do not participate in the
77
* REGION1 interleaved SPA range (i.e. the DPA address below offset
78
* (b) are also included in the "blk4.0" and "blk5.0" namespaces.
79
* Note, that BLK namespaces need not be contiguous in DPA-space, and
80
* can consume aliased capacity from multiple interleave sets.
81
*
82
* BUS1: Legacy NVDIMM (single contiguous range)
83
*
84
* region2
85
* +---------------------+
86
* |---------------------|
87
* || pm2.0 ||
88
* |---------------------|
89
* +---------------------+
90
*
91
* *) A NFIT-table may describe a simple system-physical-address range
92
* with no BLK aliasing. This type of region may optionally
93
* reference an NVDIMM.
94
*/
95
enum {
96
NUM_PM = 3,
97
NUM_DCR = 5,
98
NUM_HINTS = 8,
99
NUM_BDW = NUM_DCR,
100
NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
101
NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */
102
+ 4 /* spa1 iset */ + 1 /* spa11 iset */,
103
DIMM_SIZE = SZ_32M,
104
LABEL_SIZE = SZ_128K,
105
SPA_VCD_SIZE = SZ_4M,
106
SPA0_SIZE = DIMM_SIZE,
107
SPA1_SIZE = DIMM_SIZE*2,
108
SPA2_SIZE = DIMM_SIZE,
109
BDW_SIZE = 64 << 8,
110
DCR_SIZE = 12,
111
NUM_NFITS = 2, /* permit testing multiple NFITs per system */
112
};
113
114
struct nfit_test_dcr {
115
__le64 bdw_addr;
116
__le32 bdw_status;
117
__u8 aperature[BDW_SIZE];
118
};
119
120
#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
121
(((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
122
| ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
123
124
static u32 handle[] = {
125
[0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
126
[1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
127
[2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
128
[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
129
[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
130
[5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
131
[6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
132
};
133
134
static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
135
static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
136
struct nfit_test_sec {
137
u8 state;
138
u8 ext_state;
139
u8 old_state;
140
u8 passphrase[32];
141
u8 master_passphrase[32];
142
u64 overwrite_end_time;
143
} dimm_sec_info[NUM_DCR];
144
145
static const struct nd_intel_smart smart_def = {
146
.flags = ND_INTEL_SMART_HEALTH_VALID
147
| ND_INTEL_SMART_SPARES_VALID
148
| ND_INTEL_SMART_ALARM_VALID
149
| ND_INTEL_SMART_USED_VALID
150
| ND_INTEL_SMART_SHUTDOWN_VALID
151
| ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
152
| ND_INTEL_SMART_MTEMP_VALID
153
| ND_INTEL_SMART_CTEMP_VALID,
154
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
155
.media_temperature = 23 * 16,
156
.ctrl_temperature = 25 * 16,
157
.pmic_temperature = 40 * 16,
158
.spares = 75,
159
.alarm_flags = ND_INTEL_SMART_SPARE_TRIP
160
| ND_INTEL_SMART_TEMP_TRIP,
161
.ait_status = 1,
162
.life_used = 5,
163
.shutdown_state = 0,
164
.shutdown_count = 42,
165
.vendor_size = 0,
166
};
167
168
struct nfit_test_fw {
169
enum intel_fw_update_state state;
170
u32 context;
171
u64 version;
172
u32 size_received;
173
u64 end_time;
174
bool armed;
175
bool missed_activate;
176
unsigned long last_activate;
177
};
178
179
struct nfit_test {
180
struct acpi_nfit_desc acpi_desc;
181
struct platform_device pdev;
182
struct list_head resources;
183
void *nfit_buf;
184
dma_addr_t nfit_dma;
185
size_t nfit_size;
186
size_t nfit_filled;
187
int dcr_idx;
188
int num_dcr;
189
int num_pm;
190
void **dimm;
191
dma_addr_t *dimm_dma;
192
void **flush;
193
dma_addr_t *flush_dma;
194
void **label;
195
dma_addr_t *label_dma;
196
void **spa_set;
197
dma_addr_t *spa_set_dma;
198
struct nfit_test_dcr **dcr;
199
dma_addr_t *dcr_dma;
200
int (*alloc)(struct nfit_test *t);
201
void (*setup)(struct nfit_test *t);
202
int setup_hotplug;
203
union acpi_object **_fit;
204
dma_addr_t _fit_dma;
205
struct ars_state {
206
struct nd_cmd_ars_status *ars_status;
207
unsigned long deadline;
208
spinlock_t lock;
209
} ars_state;
210
struct device *dimm_dev[ARRAY_SIZE(handle)];
211
struct nd_intel_smart *smart;
212
struct nd_intel_smart_threshold *smart_threshold;
213
struct badrange badrange;
214
struct work_struct work;
215
struct nfit_test_fw *fw;
216
};
217
218
static struct workqueue_struct *nfit_wq;
219
220
static struct gen_pool *nfit_pool;
221
222
static const char zero_key[NVDIMM_PASSPHRASE_LEN];
223
224
static struct nfit_test *to_nfit_test(struct device *dev)
225
{
226
struct platform_device *pdev = to_platform_device(dev);
227
228
return container_of(pdev, struct nfit_test, pdev);
229
}
230
231
static int nd_intel_test_get_fw_info(struct nfit_test *t,
232
struct nd_intel_fw_info *nd_cmd, unsigned int buf_len,
233
int idx)
234
{
235
struct device *dev = &t->pdev.dev;
236
struct nfit_test_fw *fw = &t->fw[idx];
237
238
dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
239
__func__, t, nd_cmd, buf_len, idx);
240
241
if (buf_len < sizeof(*nd_cmd))
242
return -EINVAL;
243
244
nd_cmd->status = 0;
245
nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE;
246
nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN;
247
nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL;
248
nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME;
249
nd_cmd->update_cap = 0;
250
nd_cmd->fis_version = INTEL_FW_FIS_VERSION;
251
nd_cmd->run_version = 0;
252
nd_cmd->updated_version = fw->version;
253
254
return 0;
255
}
256
257
static int nd_intel_test_start_update(struct nfit_test *t,
258
struct nd_intel_fw_start *nd_cmd, unsigned int buf_len,
259
int idx)
260
{
261
struct device *dev = &t->pdev.dev;
262
struct nfit_test_fw *fw = &t->fw[idx];
263
264
dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
265
__func__, t, nd_cmd, buf_len, idx);
266
267
if (buf_len < sizeof(*nd_cmd))
268
return -EINVAL;
269
270
if (fw->state != FW_STATE_NEW) {
271
/* extended status, FW update in progress */
272
nd_cmd->status = 0x10007;
273
return 0;
274
}
275
276
fw->state = FW_STATE_IN_PROGRESS;
277
fw->context++;
278
fw->size_received = 0;
279
nd_cmd->status = 0;
280
nd_cmd->context = fw->context;
281
282
dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context);
283
284
return 0;
285
}
286
287
static int nd_intel_test_send_data(struct nfit_test *t,
288
struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len,
289
int idx)
290
{
291
struct device *dev = &t->pdev.dev;
292
struct nfit_test_fw *fw = &t->fw[idx];
293
u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length];
294
295
dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
296
__func__, t, nd_cmd, buf_len, idx);
297
298
if (buf_len < sizeof(*nd_cmd))
299
return -EINVAL;
300
301
302
dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status);
303
dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]);
304
dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1,
305
nd_cmd->data[nd_cmd->length-1]);
306
307
if (fw->state != FW_STATE_IN_PROGRESS) {
308
dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__);
309
*status = 0x5;
310
return 0;
311
}
312
313
if (nd_cmd->context != fw->context) {
314
dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
315
__func__, nd_cmd->context, fw->context);
316
*status = 0x10007;
317
return 0;
318
}
319
320
/*
321
* check offset + len > size of fw storage
322
* check length is > max send length
323
*/
324
if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE ||
325
nd_cmd->length > INTEL_FW_MAX_SEND_LEN) {
326
*status = 0x3;
327
dev_dbg(dev, "%s: buffer boundary violation\n", __func__);
328
return 0;
329
}
330
331
fw->size_received += nd_cmd->length;
332
dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n",
333
__func__, nd_cmd->length, fw->size_received);
334
*status = 0;
335
return 0;
336
}
337
338
static int nd_intel_test_finish_fw(struct nfit_test *t,
339
struct nd_intel_fw_finish_update *nd_cmd,
340
unsigned int buf_len, int idx)
341
{
342
struct device *dev = &t->pdev.dev;
343
struct nfit_test_fw *fw = &t->fw[idx];
344
345
dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
346
__func__, t, nd_cmd, buf_len, idx);
347
348
if (fw->state == FW_STATE_UPDATED) {
349
/* update already done, need activation */
350
nd_cmd->status = 0x20007;
351
return 0;
352
}
353
354
dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n",
355
__func__, nd_cmd->context, nd_cmd->ctrl_flags);
356
357
switch (nd_cmd->ctrl_flags) {
358
case 0: /* finish */
359
if (nd_cmd->context != fw->context) {
360
dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
361
__func__, nd_cmd->context,
362
fw->context);
363
nd_cmd->status = 0x10007;
364
return 0;
365
}
366
nd_cmd->status = 0;
367
fw->state = FW_STATE_VERIFY;
368
/* set 1 second of time for firmware "update" */
369
fw->end_time = jiffies + HZ;
370
break;
371
372
case 1: /* abort */
373
fw->size_received = 0;
374
/* successfully aborted status */
375
nd_cmd->status = 0x40007;
376
fw->state = FW_STATE_NEW;
377
dev_dbg(dev, "%s: abort successful\n", __func__);
378
break;
379
380
default: /* bad control flag */
381
dev_warn(dev, "%s: unknown control flag: %#x\n",
382
__func__, nd_cmd->ctrl_flags);
383
return -EINVAL;
384
}
385
386
return 0;
387
}
388
389
static int nd_intel_test_finish_query(struct nfit_test *t,
390
struct nd_intel_fw_finish_query *nd_cmd,
391
unsigned int buf_len, int idx)
392
{
393
struct device *dev = &t->pdev.dev;
394
struct nfit_test_fw *fw = &t->fw[idx];
395
396
dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
397
__func__, t, nd_cmd, buf_len, idx);
398
399
if (buf_len < sizeof(*nd_cmd))
400
return -EINVAL;
401
402
if (nd_cmd->context != fw->context) {
403
dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
404
__func__, nd_cmd->context, fw->context);
405
nd_cmd->status = 0x10007;
406
return 0;
407
}
408
409
dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context);
410
411
switch (fw->state) {
412
case FW_STATE_NEW:
413
nd_cmd->updated_fw_rev = 0;
414
nd_cmd->status = 0;
415
dev_dbg(dev, "%s: new state\n", __func__);
416
break;
417
418
case FW_STATE_IN_PROGRESS:
419
/* sequencing error */
420
nd_cmd->status = 0x40007;
421
nd_cmd->updated_fw_rev = 0;
422
dev_dbg(dev, "%s: sequence error\n", __func__);
423
break;
424
425
case FW_STATE_VERIFY:
426
if (time_is_after_jiffies64(fw->end_time)) {
427
nd_cmd->updated_fw_rev = 0;
428
nd_cmd->status = 0x20007;
429
dev_dbg(dev, "%s: still verifying\n", __func__);
430
break;
431
}
432
dev_dbg(dev, "%s: transition out verify\n", __func__);
433
fw->state = FW_STATE_UPDATED;
434
fw->missed_activate = false;
435
fallthrough;
436
case FW_STATE_UPDATED:
437
nd_cmd->status = 0;
438
/* bogus test version */
439
fw->version = nd_cmd->updated_fw_rev =
440
INTEL_FW_FAKE_VERSION;
441
dev_dbg(dev, "%s: updated\n", __func__);
442
break;
443
444
default: /* we should never get here */
445
return -EINVAL;
446
}
447
448
return 0;
449
}
450
451
static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
452
unsigned int buf_len)
453
{
454
if (buf_len < sizeof(*nd_cmd))
455
return -EINVAL;
456
457
nd_cmd->status = 0;
458
nd_cmd->config_size = LABEL_SIZE;
459
nd_cmd->max_xfer = SZ_4K;
460
461
return 0;
462
}
463
464
static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
465
*nd_cmd, unsigned int buf_len, void *label)
466
{
467
unsigned int len, offset = nd_cmd->in_offset;
468
int rc;
469
470
if (buf_len < sizeof(*nd_cmd))
471
return -EINVAL;
472
if (offset >= LABEL_SIZE)
473
return -EINVAL;
474
if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
475
return -EINVAL;
476
477
nd_cmd->status = 0;
478
len = min(nd_cmd->in_length, LABEL_SIZE - offset);
479
memcpy(nd_cmd->out_buf, label + offset, len);
480
rc = buf_len - sizeof(*nd_cmd) - len;
481
482
return rc;
483
}
484
485
static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
486
unsigned int buf_len, void *label)
487
{
488
unsigned int len, offset = nd_cmd->in_offset;
489
u32 *status;
490
int rc;
491
492
if (buf_len < sizeof(*nd_cmd))
493
return -EINVAL;
494
if (offset >= LABEL_SIZE)
495
return -EINVAL;
496
if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
497
return -EINVAL;
498
499
status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
500
*status = 0;
501
len = min(nd_cmd->in_length, LABEL_SIZE - offset);
502
memcpy(label + offset, nd_cmd->in_buf, len);
503
rc = buf_len - sizeof(*nd_cmd) - (len + 4);
504
505
return rc;
506
}
507
508
#define NFIT_TEST_CLEAR_ERR_UNIT 256
509
510
static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
511
unsigned int buf_len)
512
{
513
int ars_recs;
514
515
if (buf_len < sizeof(*nd_cmd))
516
return -EINVAL;
517
518
/* for testing, only store up to n records that fit within 4k */
519
ars_recs = SZ_4K / sizeof(struct nd_ars_record);
520
521
nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
522
+ ars_recs * sizeof(struct nd_ars_record);
523
nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
524
nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
525
526
return 0;
527
}
528
529
static void post_ars_status(struct ars_state *ars_state,
530
struct badrange *badrange, u64 addr, u64 len)
531
{
532
struct nd_cmd_ars_status *ars_status;
533
struct nd_ars_record *ars_record;
534
struct badrange_entry *be;
535
u64 end = addr + len - 1;
536
int i = 0;
537
538
ars_state->deadline = jiffies + 1*HZ;
539
ars_status = ars_state->ars_status;
540
ars_status->status = 0;
541
ars_status->address = addr;
542
ars_status->length = len;
543
ars_status->type = ND_ARS_PERSISTENT;
544
545
spin_lock(&badrange->lock);
546
list_for_each_entry(be, &badrange->list, list) {
547
u64 be_end = be->start + be->length - 1;
548
u64 rstart, rend;
549
550
/* skip entries outside the range */
551
if (be_end < addr || be->start > end)
552
continue;
553
554
rstart = (be->start < addr) ? addr : be->start;
555
rend = (be_end < end) ? be_end : end;
556
ars_record = &ars_status->records[i];
557
ars_record->handle = 0;
558
ars_record->err_address = rstart;
559
ars_record->length = rend - rstart + 1;
560
i++;
561
}
562
spin_unlock(&badrange->lock);
563
ars_status->num_records = i;
564
ars_status->out_length = sizeof(struct nd_cmd_ars_status)
565
+ i * sizeof(struct nd_ars_record);
566
}
567
568
static int nfit_test_cmd_ars_start(struct nfit_test *t,
569
struct ars_state *ars_state,
570
struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
571
int *cmd_rc)
572
{
573
if (buf_len < sizeof(*ars_start))
574
return -EINVAL;
575
576
spin_lock(&ars_state->lock);
577
if (time_before(jiffies, ars_state->deadline)) {
578
ars_start->status = NFIT_ARS_START_BUSY;
579
*cmd_rc = -EBUSY;
580
} else {
581
ars_start->status = 0;
582
ars_start->scrub_time = 1;
583
post_ars_status(ars_state, &t->badrange, ars_start->address,
584
ars_start->length);
585
*cmd_rc = 0;
586
}
587
spin_unlock(&ars_state->lock);
588
589
return 0;
590
}
591
592
static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
593
struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
594
int *cmd_rc)
595
{
596
if (buf_len < ars_state->ars_status->out_length)
597
return -EINVAL;
598
599
spin_lock(&ars_state->lock);
600
if (time_before(jiffies, ars_state->deadline)) {
601
memset(ars_status, 0, buf_len);
602
ars_status->status = NFIT_ARS_STATUS_BUSY;
603
ars_status->out_length = sizeof(*ars_status);
604
*cmd_rc = -EBUSY;
605
} else {
606
memcpy(ars_status, ars_state->ars_status,
607
ars_state->ars_status->out_length);
608
*cmd_rc = 0;
609
}
610
spin_unlock(&ars_state->lock);
611
return 0;
612
}
613
614
static int nfit_test_cmd_clear_error(struct nfit_test *t,
615
struct nd_cmd_clear_error *clear_err,
616
unsigned int buf_len, int *cmd_rc)
617
{
618
const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
619
if (buf_len < sizeof(*clear_err))
620
return -EINVAL;
621
622
if ((clear_err->address & mask) || (clear_err->length & mask))
623
return -EINVAL;
624
625
badrange_forget(&t->badrange, clear_err->address, clear_err->length);
626
clear_err->status = 0;
627
clear_err->cleared = clear_err->length;
628
*cmd_rc = 0;
629
return 0;
630
}
631
632
struct region_search_spa {
633
u64 addr;
634
struct nd_region *region;
635
};
636
637
static int is_region_device(struct device *dev)
638
{
639
return !strncmp(dev->kobj.name, "region", 6);
640
}
641
642
static int nfit_test_search_region_spa(struct device *dev, void *data)
643
{
644
struct region_search_spa *ctx = data;
645
struct nd_region *nd_region;
646
resource_size_t ndr_end;
647
648
if (!is_region_device(dev))
649
return 0;
650
651
nd_region = to_nd_region(dev);
652
ndr_end = nd_region->ndr_start + nd_region->ndr_size;
653
654
if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
655
ctx->region = nd_region;
656
return 1;
657
}
658
659
return 0;
660
}
661
662
static int nfit_test_search_spa(struct nvdimm_bus *bus,
663
struct nd_cmd_translate_spa *spa)
664
{
665
int ret;
666
struct nd_region *nd_region = NULL;
667
struct nvdimm *nvdimm = NULL;
668
struct nd_mapping *nd_mapping = NULL;
669
struct region_search_spa ctx = {
670
.addr = spa->spa,
671
.region = NULL,
672
};
673
struct nfit_mem *nfit_mem;
674
u64 dpa;
675
676
ret = device_for_each_child(&bus->dev, &ctx,
677
nfit_test_search_region_spa);
678
679
if (!ret)
680
return -ENODEV;
681
682
nd_region = ctx.region;
683
684
dpa = ctx.addr - nd_region->ndr_start;
685
686
/*
687
* last dimm is selected for test
688
*/
689
nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
690
nvdimm = nd_mapping->nvdimm;
691
nfit_mem = nvdimm_provider_data(nvdimm);
692
if (!nfit_mem)
693
return -EINVAL;
694
695
spa->devices[0].nfit_device_handle =
696
__to_nfit_memdev(nfit_mem)->device_handle;
697
spa->num_nvdimms = 1;
698
spa->devices[0].dpa = dpa;
699
700
return 0;
701
}
702
703
static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
704
struct nd_cmd_translate_spa *spa, unsigned int buf_len)
705
{
706
if (buf_len < spa->translate_length)
707
return -EINVAL;
708
709
if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
710
spa->status = 2;
711
712
return 0;
713
}
714
715
static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len,
716
struct nd_intel_smart *smart_data)
717
{
718
if (buf_len < sizeof(*smart))
719
return -EINVAL;
720
memcpy(smart, smart_data, sizeof(*smart));
721
return 0;
722
}
723
724
static int nfit_test_cmd_smart_threshold(
725
struct nd_intel_smart_threshold *out,
726
unsigned int buf_len,
727
struct nd_intel_smart_threshold *smart_t)
728
{
729
if (buf_len < sizeof(*smart_t))
730
return -EINVAL;
731
memcpy(out, smart_t, sizeof(*smart_t));
732
return 0;
733
}
734
735
static void smart_notify(struct device *bus_dev,
736
struct device *dimm_dev, struct nd_intel_smart *smart,
737
struct nd_intel_smart_threshold *thresh)
738
{
739
dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
740
__func__, thresh->alarm_control, thresh->spares,
741
smart->spares, thresh->media_temperature,
742
smart->media_temperature, thresh->ctrl_temperature,
743
smart->ctrl_temperature);
744
if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP)
745
&& smart->spares
746
<= thresh->spares)
747
|| ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP)
748
&& smart->media_temperature
749
>= thresh->media_temperature)
750
|| ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
751
&& smart->ctrl_temperature
752
>= thresh->ctrl_temperature)
753
|| (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH)
754
|| (smart->shutdown_state != 0)) {
755
device_lock(bus_dev);
756
__acpi_nvdimm_notify(dimm_dev, 0x81);
757
device_unlock(bus_dev);
758
}
759
}
760
761
static int nfit_test_cmd_smart_set_threshold(
762
struct nd_intel_smart_set_threshold *in,
763
unsigned int buf_len,
764
struct nd_intel_smart_threshold *thresh,
765
struct nd_intel_smart *smart,
766
struct device *bus_dev, struct device *dimm_dev)
767
{
768
unsigned int size;
769
770
size = sizeof(*in) - 4;
771
if (buf_len < size)
772
return -EINVAL;
773
memcpy(thresh->data, in, size);
774
in->status = 0;
775
smart_notify(bus_dev, dimm_dev, smart, thresh);
776
777
return 0;
778
}
779
780
static int nfit_test_cmd_smart_inject(
781
struct nd_intel_smart_inject *inj,
782
unsigned int buf_len,
783
struct nd_intel_smart_threshold *thresh,
784
struct nd_intel_smart *smart,
785
struct device *bus_dev, struct device *dimm_dev)
786
{
787
if (buf_len != sizeof(*inj))
788
return -EINVAL;
789
790
if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
791
if (inj->mtemp_enable)
792
smart->media_temperature = inj->media_temperature;
793
else
794
smart->media_temperature = smart_def.media_temperature;
795
}
796
if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
797
if (inj->spare_enable)
798
smart->spares = inj->spares;
799
else
800
smart->spares = smart_def.spares;
801
}
802
if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
803
if (inj->fatal_enable)
804
smart->health = ND_INTEL_SMART_FATAL_HEALTH;
805
else
806
smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
807
}
808
if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
809
if (inj->unsafe_shutdown_enable) {
810
smart->shutdown_state = 1;
811
smart->shutdown_count++;
812
} else
813
smart->shutdown_state = 0;
814
}
815
inj->status = 0;
816
smart_notify(bus_dev, dimm_dev, smart, thresh);
817
818
return 0;
819
}
820
821
static void uc_error_notify(struct work_struct *work)
822
{
823
struct nfit_test *t = container_of(work, typeof(*t), work);
824
825
__acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
826
}
827
828
static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
829
struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
830
{
831
int rc;
832
833
if (buf_len != sizeof(*err_inj)) {
834
rc = -EINVAL;
835
goto err;
836
}
837
838
if (err_inj->err_inj_spa_range_length <= 0) {
839
rc = -EINVAL;
840
goto err;
841
}
842
843
rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
844
err_inj->err_inj_spa_range_length);
845
if (rc < 0)
846
goto err;
847
848
if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
849
queue_work(nfit_wq, &t->work);
850
851
err_inj->status = 0;
852
return 0;
853
854
err:
855
err_inj->status = NFIT_ARS_INJECT_INVALID;
856
return rc;
857
}
858
859
static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
860
struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
861
{
862
int rc;
863
864
if (buf_len != sizeof(*err_clr)) {
865
rc = -EINVAL;
866
goto err;
867
}
868
869
if (err_clr->err_inj_clr_spa_range_length <= 0) {
870
rc = -EINVAL;
871
goto err;
872
}
873
874
badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
875
err_clr->err_inj_clr_spa_range_length);
876
877
err_clr->status = 0;
878
return 0;
879
880
err:
881
err_clr->status = NFIT_ARS_INJECT_INVALID;
882
return rc;
883
}
884
885
static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
886
struct nd_cmd_ars_err_inj_stat *err_stat,
887
unsigned int buf_len)
888
{
889
struct badrange_entry *be;
890
int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
891
int i = 0;
892
893
err_stat->status = 0;
894
spin_lock(&t->badrange.lock);
895
list_for_each_entry(be, &t->badrange.list, list) {
896
err_stat->record[i].err_inj_stat_spa_range_base = be->start;
897
err_stat->record[i].err_inj_stat_spa_range_length = be->length;
898
i++;
899
if (i > max)
900
break;
901
}
902
spin_unlock(&t->badrange.lock);
903
err_stat->inj_err_rec_count = i;
904
905
return 0;
906
}
907
908
static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
909
struct nd_intel_lss *nd_cmd, unsigned int buf_len)
910
{
911
struct device *dev = &t->pdev.dev;
912
913
if (buf_len < sizeof(*nd_cmd))
914
return -EINVAL;
915
916
switch (nd_cmd->enable) {
917
case 0:
918
nd_cmd->status = 0;
919
dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n",
920
__func__);
921
break;
922
case 1:
923
nd_cmd->status = 0;
924
dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n",
925
__func__);
926
break;
927
default:
928
dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable);
929
nd_cmd->status = 0x3;
930
break;
931
}
932
933
934
return 0;
935
}
936
937
static int override_return_code(int dimm, unsigned int func, int rc)
938
{
939
if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
940
if (dimm_fail_cmd_code[dimm])
941
return dimm_fail_cmd_code[dimm];
942
return -EIO;
943
}
944
return rc;
945
}
946
947
static int nd_intel_test_cmd_security_status(struct nfit_test *t,
948
struct nd_intel_get_security_state *nd_cmd,
949
unsigned int buf_len, int dimm)
950
{
951
struct device *dev = &t->pdev.dev;
952
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
953
954
nd_cmd->status = 0;
955
nd_cmd->state = sec->state;
956
nd_cmd->extended_state = sec->ext_state;
957
dev_dbg(dev, "security state (%#x) returned\n", nd_cmd->state);
958
959
return 0;
960
}
961
962
static int nd_intel_test_cmd_unlock_unit(struct nfit_test *t,
963
struct nd_intel_unlock_unit *nd_cmd,
964
unsigned int buf_len, int dimm)
965
{
966
struct device *dev = &t->pdev.dev;
967
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
968
969
if (!(sec->state & ND_INTEL_SEC_STATE_LOCKED) ||
970
(sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
971
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
972
dev_dbg(dev, "unlock unit: invalid state: %#x\n",
973
sec->state);
974
} else if (memcmp(nd_cmd->passphrase, sec->passphrase,
975
ND_INTEL_PASSPHRASE_SIZE) != 0) {
976
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
977
dev_dbg(dev, "unlock unit: invalid passphrase\n");
978
} else {
979
nd_cmd->status = 0;
980
sec->state = ND_INTEL_SEC_STATE_ENABLED;
981
dev_dbg(dev, "Unit unlocked\n");
982
}
983
984
dev_dbg(dev, "unlocking status returned: %#x\n", nd_cmd->status);
985
return 0;
986
}
987
988
static int nd_intel_test_cmd_set_pass(struct nfit_test *t,
989
struct nd_intel_set_passphrase *nd_cmd,
990
unsigned int buf_len, int dimm)
991
{
992
struct device *dev = &t->pdev.dev;
993
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
994
995
if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
996
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
997
dev_dbg(dev, "set passphrase: wrong security state\n");
998
} else if (memcmp(nd_cmd->old_pass, sec->passphrase,
999
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1000
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1001
dev_dbg(dev, "set passphrase: wrong passphrase\n");
1002
} else {
1003
memcpy(sec->passphrase, nd_cmd->new_pass,
1004
ND_INTEL_PASSPHRASE_SIZE);
1005
sec->state |= ND_INTEL_SEC_STATE_ENABLED;
1006
nd_cmd->status = 0;
1007
dev_dbg(dev, "passphrase updated\n");
1008
}
1009
1010
return 0;
1011
}
1012
1013
static int nd_intel_test_cmd_freeze_lock(struct nfit_test *t,
1014
struct nd_intel_freeze_lock *nd_cmd,
1015
unsigned int buf_len, int dimm)
1016
{
1017
struct device *dev = &t->pdev.dev;
1018
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1019
1020
if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)) {
1021
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
1022
dev_dbg(dev, "freeze lock: wrong security state\n");
1023
} else {
1024
sec->state |= ND_INTEL_SEC_STATE_FROZEN;
1025
nd_cmd->status = 0;
1026
dev_dbg(dev, "security frozen\n");
1027
}
1028
1029
return 0;
1030
}
1031
1032
static int nd_intel_test_cmd_disable_pass(struct nfit_test *t,
1033
struct nd_intel_disable_passphrase *nd_cmd,
1034
unsigned int buf_len, int dimm)
1035
{
1036
struct device *dev = &t->pdev.dev;
1037
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1038
1039
if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
1040
(sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
1041
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
1042
dev_dbg(dev, "disable passphrase: wrong security state\n");
1043
} else if (memcmp(nd_cmd->passphrase, sec->passphrase,
1044
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1045
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1046
dev_dbg(dev, "disable passphrase: wrong passphrase\n");
1047
} else {
1048
memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
1049
sec->state = 0;
1050
dev_dbg(dev, "disable passphrase: done\n");
1051
}
1052
1053
return 0;
1054
}
1055
1056
static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
1057
struct nd_intel_secure_erase *nd_cmd,
1058
unsigned int buf_len, int dimm)
1059
{
1060
struct device *dev = &t->pdev.dev;
1061
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1062
1063
if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
1064
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
1065
dev_dbg(dev, "secure erase: wrong security state\n");
1066
} else if (memcmp(nd_cmd->passphrase, sec->passphrase,
1067
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1068
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1069
dev_dbg(dev, "secure erase: wrong passphrase\n");
1070
} else {
1071
if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
1072
&& (memcmp(nd_cmd->passphrase, zero_key,
1073
ND_INTEL_PASSPHRASE_SIZE) != 0)) {
1074
dev_dbg(dev, "invalid zero key\n");
1075
return 0;
1076
}
1077
memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
1078
memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
1079
sec->state = 0;
1080
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
1081
dev_dbg(dev, "secure erase: done\n");
1082
}
1083
1084
return 0;
1085
}
1086
1087
static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
1088
struct nd_intel_overwrite *nd_cmd,
1089
unsigned int buf_len, int dimm)
1090
{
1091
struct device *dev = &t->pdev.dev;
1092
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1093
1094
if ((sec->state & ND_INTEL_SEC_STATE_ENABLED) &&
1095
memcmp(nd_cmd->passphrase, sec->passphrase,
1096
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1097
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1098
dev_dbg(dev, "overwrite: wrong passphrase\n");
1099
return 0;
1100
}
1101
1102
sec->old_state = sec->state;
1103
sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
1104
dev_dbg(dev, "overwrite progressing.\n");
1105
sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
1106
1107
return 0;
1108
}
1109
1110
static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
1111
struct nd_intel_query_overwrite *nd_cmd,
1112
unsigned int buf_len, int dimm)
1113
{
1114
struct device *dev = &t->pdev.dev;
1115
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1116
1117
if (!(sec->state & ND_INTEL_SEC_STATE_OVERWRITE)) {
1118
nd_cmd->status = ND_INTEL_STATUS_OQUERY_SEQUENCE_ERR;
1119
return 0;
1120
}
1121
1122
if (time_is_before_jiffies64(sec->overwrite_end_time)) {
1123
sec->overwrite_end_time = 0;
1124
sec->state = sec->old_state;
1125
sec->old_state = 0;
1126
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
1127
dev_dbg(dev, "overwrite is complete\n");
1128
} else
1129
nd_cmd->status = ND_INTEL_STATUS_OQUERY_INPROGRESS;
1130
return 0;
1131
}
1132
1133
static int nd_intel_test_cmd_master_set_pass(struct nfit_test *t,
1134
struct nd_intel_set_master_passphrase *nd_cmd,
1135
unsigned int buf_len, int dimm)
1136
{
1137
struct device *dev = &t->pdev.dev;
1138
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1139
1140
if (!(sec->ext_state & ND_INTEL_SEC_ESTATE_ENABLED)) {
1141
nd_cmd->status = ND_INTEL_STATUS_NOT_SUPPORTED;
1142
dev_dbg(dev, "master set passphrase: in wrong state\n");
1143
} else if (sec->ext_state & ND_INTEL_SEC_ESTATE_PLIMIT) {
1144
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
1145
dev_dbg(dev, "master set passphrase: in wrong security state\n");
1146
} else if (memcmp(nd_cmd->old_pass, sec->master_passphrase,
1147
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1148
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1149
dev_dbg(dev, "master set passphrase: wrong passphrase\n");
1150
} else {
1151
memcpy(sec->master_passphrase, nd_cmd->new_pass,
1152
ND_INTEL_PASSPHRASE_SIZE);
1153
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
1154
dev_dbg(dev, "master passphrase: updated\n");
1155
}
1156
1157
return 0;
1158
}
1159
1160
static int nd_intel_test_cmd_master_secure_erase(struct nfit_test *t,
1161
struct nd_intel_master_secure_erase *nd_cmd,
1162
unsigned int buf_len, int dimm)
1163
{
1164
struct device *dev = &t->pdev.dev;
1165
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1166
1167
if (!(sec->ext_state & ND_INTEL_SEC_ESTATE_ENABLED)) {
1168
nd_cmd->status = ND_INTEL_STATUS_NOT_SUPPORTED;
1169
dev_dbg(dev, "master secure erase: in wrong state\n");
1170
} else if (sec->ext_state & ND_INTEL_SEC_ESTATE_PLIMIT) {
1171
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
1172
dev_dbg(dev, "master secure erase: in wrong security state\n");
1173
} else if (memcmp(nd_cmd->passphrase, sec->master_passphrase,
1174
ND_INTEL_PASSPHRASE_SIZE) != 0) {
1175
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
1176
dev_dbg(dev, "master secure erase: wrong passphrase\n");
1177
} else {
1178
/* we do not erase master state passphrase ever */
1179
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
1180
memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
1181
sec->state = 0;
1182
dev_dbg(dev, "master secure erase: done\n");
1183
}
1184
1185
return 0;
1186
}
1187
1188
static unsigned long last_activate;
1189
1190
static int nvdimm_bus_intel_fw_activate_businfo(struct nfit_test *t,
1191
struct nd_intel_bus_fw_activate_businfo *nd_cmd,
1192
unsigned int buf_len)
1193
{
1194
int i, armed = 0;
1195
int state;
1196
u64 tmo;
1197
1198
for (i = 0; i < NUM_DCR; i++) {
1199
struct nfit_test_fw *fw = &t->fw[i];
1200
1201
if (fw->armed)
1202
armed++;
1203
}
1204
1205
/*
1206
* Emulate 3 second activation max, and 1 second incremental
1207
* quiesce time per dimm requiring multiple activates to get all
1208
* DIMMs updated.
1209
*/
1210
if (armed)
1211
state = ND_INTEL_FWA_ARMED;
1212
else if (!last_activate || time_after(jiffies, last_activate + 3 * HZ))
1213
state = ND_INTEL_FWA_IDLE;
1214
else
1215
state = ND_INTEL_FWA_BUSY;
1216
1217
tmo = armed * USEC_PER_SEC;
1218
*nd_cmd = (struct nd_intel_bus_fw_activate_businfo) {
1219
.capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
1220
| ND_INTEL_BUS_FWA_CAP_OSQUIESCE
1221
| ND_INTEL_BUS_FWA_CAP_RESET,
1222
.state = state,
1223
.activate_tmo = tmo,
1224
.cpu_quiesce_tmo = tmo,
1225
.io_quiesce_tmo = tmo,
1226
.max_quiesce_tmo = 3 * USEC_PER_SEC,
1227
};
1228
1229
return 0;
1230
}
1231
1232
static int nvdimm_bus_intel_fw_activate(struct nfit_test *t,
1233
struct nd_intel_bus_fw_activate *nd_cmd,
1234
unsigned int buf_len)
1235
{
1236
struct nd_intel_bus_fw_activate_businfo info;
1237
u32 status = 0;
1238
int i;
1239
1240
nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
1241
if (info.state == ND_INTEL_FWA_BUSY)
1242
status = ND_INTEL_BUS_FWA_STATUS_BUSY;
1243
else if (info.activate_tmo > info.max_quiesce_tmo)
1244
status = ND_INTEL_BUS_FWA_STATUS_TMO;
1245
else if (info.state == ND_INTEL_FWA_IDLE)
1246
status = ND_INTEL_BUS_FWA_STATUS_NOARM;
1247
1248
dev_dbg(&t->pdev.dev, "status: %d\n", status);
1249
nd_cmd->status = status;
1250
if (status && status != ND_INTEL_BUS_FWA_STATUS_TMO)
1251
return 0;
1252
1253
last_activate = jiffies;
1254
for (i = 0; i < NUM_DCR; i++) {
1255
struct nfit_test_fw *fw = &t->fw[i];
1256
1257
if (!fw->armed)
1258
continue;
1259
if (fw->state != FW_STATE_UPDATED)
1260
fw->missed_activate = true;
1261
else
1262
fw->state = FW_STATE_NEW;
1263
fw->armed = false;
1264
fw->last_activate = last_activate;
1265
}
1266
1267
return 0;
1268
}
1269
1270
static int nd_intel_test_cmd_fw_activate_dimminfo(struct nfit_test *t,
1271
struct nd_intel_fw_activate_dimminfo *nd_cmd,
1272
unsigned int buf_len, int dimm)
1273
{
1274
struct nd_intel_bus_fw_activate_businfo info;
1275
struct nfit_test_fw *fw = &t->fw[dimm];
1276
u32 result, state;
1277
1278
nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
1279
1280
if (info.state == ND_INTEL_FWA_BUSY)
1281
state = ND_INTEL_FWA_BUSY;
1282
else if (info.state == ND_INTEL_FWA_IDLE)
1283
state = ND_INTEL_FWA_IDLE;
1284
else if (fw->armed)
1285
state = ND_INTEL_FWA_ARMED;
1286
else
1287
state = ND_INTEL_FWA_IDLE;
1288
1289
result = ND_INTEL_DIMM_FWA_NONE;
1290
if (last_activate && fw->last_activate == last_activate &&
1291
state == ND_INTEL_FWA_IDLE) {
1292
if (fw->missed_activate)
1293
result = ND_INTEL_DIMM_FWA_NOTSTAGED;
1294
else
1295
result = ND_INTEL_DIMM_FWA_SUCCESS;
1296
}
1297
1298
*nd_cmd = (struct nd_intel_fw_activate_dimminfo) {
1299
.result = result,
1300
.state = state,
1301
};
1302
1303
return 0;
1304
}
1305
1306
static int nd_intel_test_cmd_fw_activate_arm(struct nfit_test *t,
1307
struct nd_intel_fw_activate_arm *nd_cmd,
1308
unsigned int buf_len, int dimm)
1309
{
1310
struct nfit_test_fw *fw = &t->fw[dimm];
1311
1312
fw->armed = nd_cmd->activate_arm == ND_INTEL_DIMM_FWA_ARM;
1313
nd_cmd->status = 0;
1314
return 0;
1315
}
1316
1317
static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
1318
{
1319
int i;
1320
1321
/* lookup per-dimm data */
1322
for (i = 0; i < ARRAY_SIZE(handle); i++)
1323
if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
1324
break;
1325
if (i >= ARRAY_SIZE(handle))
1326
return -ENXIO;
1327
return i;
1328
}
1329
1330
static void nfit_ctl_dbg(struct acpi_nfit_desc *acpi_desc,
1331
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
1332
unsigned int len)
1333
{
1334
struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
1335
unsigned int func = cmd;
1336
unsigned int family = 0;
1337
1338
if (cmd == ND_CMD_CALL) {
1339
struct nd_cmd_pkg *pkg = buf;
1340
1341
len = pkg->nd_size_in;
1342
family = pkg->nd_family;
1343
buf = pkg->nd_payload;
1344
func = pkg->nd_command;
1345
}
1346
dev_dbg(&t->pdev.dev, "%s family: %d cmd: %d: func: %d input length: %d\n",
1347
nvdimm ? nvdimm_name(nvdimm) : "bus", family, cmd, func,
1348
len);
1349
print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 16, 4,
1350
buf, min(len, 256u), true);
1351
}
1352
1353
static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
1354
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
1355
unsigned int buf_len, int *cmd_rc)
1356
{
1357
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1358
struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
1359
unsigned int func = cmd;
1360
int i, rc = 0, __cmd_rc;
1361
1362
if (!cmd_rc)
1363
cmd_rc = &__cmd_rc;
1364
*cmd_rc = 0;
1365
1366
nfit_ctl_dbg(acpi_desc, nvdimm, cmd, buf, buf_len);
1367
1368
if (nvdimm) {
1369
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1370
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
1371
1372
if (!nfit_mem)
1373
return -ENOTTY;
1374
1375
if (cmd == ND_CMD_CALL) {
1376
struct nd_cmd_pkg *call_pkg = buf;
1377
1378
buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
1379
buf = (void *) call_pkg->nd_payload;
1380
func = call_pkg->nd_command;
1381
if (call_pkg->nd_family != nfit_mem->family)
1382
return -ENOTTY;
1383
1384
i = get_dimm(nfit_mem, func);
1385
if (i < 0)
1386
return i;
1387
if (i >= NUM_DCR) {
1388
dev_WARN_ONCE(&t->pdev.dev, 1,
1389
"ND_CMD_CALL only valid for nfit_test0\n");
1390
return -EINVAL;
1391
}
1392
1393
switch (func) {
1394
case NVDIMM_INTEL_GET_SECURITY_STATE:
1395
rc = nd_intel_test_cmd_security_status(t,
1396
buf, buf_len, i);
1397
break;
1398
case NVDIMM_INTEL_UNLOCK_UNIT:
1399
rc = nd_intel_test_cmd_unlock_unit(t,
1400
buf, buf_len, i);
1401
break;
1402
case NVDIMM_INTEL_SET_PASSPHRASE:
1403
rc = nd_intel_test_cmd_set_pass(t,
1404
buf, buf_len, i);
1405
break;
1406
case NVDIMM_INTEL_DISABLE_PASSPHRASE:
1407
rc = nd_intel_test_cmd_disable_pass(t,
1408
buf, buf_len, i);
1409
break;
1410
case NVDIMM_INTEL_FREEZE_LOCK:
1411
rc = nd_intel_test_cmd_freeze_lock(t,
1412
buf, buf_len, i);
1413
break;
1414
case NVDIMM_INTEL_SECURE_ERASE:
1415
rc = nd_intel_test_cmd_secure_erase(t,
1416
buf, buf_len, i);
1417
break;
1418
case NVDIMM_INTEL_OVERWRITE:
1419
rc = nd_intel_test_cmd_overwrite(t,
1420
buf, buf_len, i);
1421
break;
1422
case NVDIMM_INTEL_QUERY_OVERWRITE:
1423
rc = nd_intel_test_cmd_query_overwrite(t,
1424
buf, buf_len, i);
1425
break;
1426
case NVDIMM_INTEL_SET_MASTER_PASSPHRASE:
1427
rc = nd_intel_test_cmd_master_set_pass(t,
1428
buf, buf_len, i);
1429
break;
1430
case NVDIMM_INTEL_MASTER_SECURE_ERASE:
1431
rc = nd_intel_test_cmd_master_secure_erase(t,
1432
buf, buf_len, i);
1433
break;
1434
case NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO:
1435
rc = nd_intel_test_cmd_fw_activate_dimminfo(
1436
t, buf, buf_len, i);
1437
break;
1438
case NVDIMM_INTEL_FW_ACTIVATE_ARM:
1439
rc = nd_intel_test_cmd_fw_activate_arm(
1440
t, buf, buf_len, i);
1441
break;
1442
case ND_INTEL_ENABLE_LSS_STATUS:
1443
rc = nd_intel_test_cmd_set_lss_status(t,
1444
buf, buf_len);
1445
break;
1446
case ND_INTEL_FW_GET_INFO:
1447
rc = nd_intel_test_get_fw_info(t, buf,
1448
buf_len, i);
1449
break;
1450
case ND_INTEL_FW_START_UPDATE:
1451
rc = nd_intel_test_start_update(t, buf,
1452
buf_len, i);
1453
break;
1454
case ND_INTEL_FW_SEND_DATA:
1455
rc = nd_intel_test_send_data(t, buf,
1456
buf_len, i);
1457
break;
1458
case ND_INTEL_FW_FINISH_UPDATE:
1459
rc = nd_intel_test_finish_fw(t, buf,
1460
buf_len, i);
1461
break;
1462
case ND_INTEL_FW_FINISH_QUERY:
1463
rc = nd_intel_test_finish_query(t, buf,
1464
buf_len, i);
1465
break;
1466
case ND_INTEL_SMART:
1467
rc = nfit_test_cmd_smart(buf, buf_len,
1468
&t->smart[i]);
1469
break;
1470
case ND_INTEL_SMART_THRESHOLD:
1471
rc = nfit_test_cmd_smart_threshold(buf,
1472
buf_len,
1473
&t->smart_threshold[i]);
1474
break;
1475
case ND_INTEL_SMART_SET_THRESHOLD:
1476
rc = nfit_test_cmd_smart_set_threshold(buf,
1477
buf_len,
1478
&t->smart_threshold[i],
1479
&t->smart[i],
1480
&t->pdev.dev, t->dimm_dev[i]);
1481
break;
1482
case ND_INTEL_SMART_INJECT:
1483
rc = nfit_test_cmd_smart_inject(buf,
1484
buf_len,
1485
&t->smart_threshold[i],
1486
&t->smart[i],
1487
&t->pdev.dev, t->dimm_dev[i]);
1488
break;
1489
default:
1490
return -ENOTTY;
1491
}
1492
return override_return_code(i, func, rc);
1493
}
1494
1495
if (!test_bit(cmd, &cmd_mask)
1496
|| !test_bit(func, &nfit_mem->dsm_mask))
1497
return -ENOTTY;
1498
1499
i = get_dimm(nfit_mem, func);
1500
if (i < 0)
1501
return i;
1502
1503
switch (func) {
1504
case ND_CMD_GET_CONFIG_SIZE:
1505
rc = nfit_test_cmd_get_config_size(buf, buf_len);
1506
break;
1507
case ND_CMD_GET_CONFIG_DATA:
1508
rc = nfit_test_cmd_get_config_data(buf, buf_len,
1509
t->label[i - t->dcr_idx]);
1510
break;
1511
case ND_CMD_SET_CONFIG_DATA:
1512
rc = nfit_test_cmd_set_config_data(buf, buf_len,
1513
t->label[i - t->dcr_idx]);
1514
break;
1515
default:
1516
return -ENOTTY;
1517
}
1518
return override_return_code(i, func, rc);
1519
} else {
1520
struct ars_state *ars_state = &t->ars_state;
1521
struct nd_cmd_pkg *call_pkg = buf;
1522
1523
if (!nd_desc)
1524
return -ENOTTY;
1525
1526
if (cmd == ND_CMD_CALL && call_pkg->nd_family
1527
== NVDIMM_BUS_FAMILY_NFIT) {
1528
func = call_pkg->nd_command;
1529
buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
1530
buf = (void *) call_pkg->nd_payload;
1531
1532
switch (func) {
1533
case NFIT_CMD_TRANSLATE_SPA:
1534
rc = nfit_test_cmd_translate_spa(
1535
acpi_desc->nvdimm_bus, buf, buf_len);
1536
return rc;
1537
case NFIT_CMD_ARS_INJECT_SET:
1538
rc = nfit_test_cmd_ars_error_inject(t, buf,
1539
buf_len);
1540
return rc;
1541
case NFIT_CMD_ARS_INJECT_CLEAR:
1542
rc = nfit_test_cmd_ars_inject_clear(t, buf,
1543
buf_len);
1544
return rc;
1545
case NFIT_CMD_ARS_INJECT_GET:
1546
rc = nfit_test_cmd_ars_inject_status(t, buf,
1547
buf_len);
1548
return rc;
1549
default:
1550
return -ENOTTY;
1551
}
1552
} else if (cmd == ND_CMD_CALL && call_pkg->nd_family
1553
== NVDIMM_BUS_FAMILY_INTEL) {
1554
func = call_pkg->nd_command;
1555
buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
1556
buf = (void *) call_pkg->nd_payload;
1557
1558
switch (func) {
1559
case NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO:
1560
rc = nvdimm_bus_intel_fw_activate_businfo(t,
1561
buf, buf_len);
1562
return rc;
1563
case NVDIMM_BUS_INTEL_FW_ACTIVATE:
1564
rc = nvdimm_bus_intel_fw_activate(t, buf,
1565
buf_len);
1566
return rc;
1567
default:
1568
return -ENOTTY;
1569
}
1570
} else if (cmd == ND_CMD_CALL)
1571
return -ENOTTY;
1572
1573
if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
1574
return -ENOTTY;
1575
1576
switch (func) {
1577
case ND_CMD_ARS_CAP:
1578
rc = nfit_test_cmd_ars_cap(buf, buf_len);
1579
break;
1580
case ND_CMD_ARS_START:
1581
rc = nfit_test_cmd_ars_start(t, ars_state, buf,
1582
buf_len, cmd_rc);
1583
break;
1584
case ND_CMD_ARS_STATUS:
1585
rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
1586
cmd_rc);
1587
break;
1588
case ND_CMD_CLEAR_ERROR:
1589
rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
1590
break;
1591
default:
1592
return -ENOTTY;
1593
}
1594
}
1595
1596
return rc;
1597
}
1598
1599
static DEFINE_SPINLOCK(nfit_test_lock);
1600
static struct nfit_test *instances[NUM_NFITS];
1601
1602
static void release_nfit_res(void *data)
1603
{
1604
struct nfit_test_resource *nfit_res = data;
1605
1606
spin_lock(&nfit_test_lock);
1607
list_del(&nfit_res->list);
1608
spin_unlock(&nfit_test_lock);
1609
1610
if (resource_size(&nfit_res->res) >= DIMM_SIZE)
1611
gen_pool_free(nfit_pool, nfit_res->res.start,
1612
resource_size(&nfit_res->res));
1613
vfree(nfit_res->buf);
1614
kfree(nfit_res);
1615
}
1616
1617
static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
1618
void *buf)
1619
{
1620
struct device *dev = &t->pdev.dev;
1621
struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
1622
GFP_KERNEL);
1623
int rc;
1624
1625
if (!buf || !nfit_res || !*dma)
1626
goto err;
1627
rc = devm_add_action(dev, release_nfit_res, nfit_res);
1628
if (rc)
1629
goto err;
1630
INIT_LIST_HEAD(&nfit_res->list);
1631
memset(buf, 0, size);
1632
nfit_res->dev = dev;
1633
nfit_res->buf = buf;
1634
nfit_res->res.start = *dma;
1635
nfit_res->res.end = *dma + size - 1;
1636
nfit_res->res.name = "NFIT";
1637
spin_lock_init(&nfit_res->lock);
1638
INIT_LIST_HEAD(&nfit_res->requests);
1639
spin_lock(&nfit_test_lock);
1640
list_add(&nfit_res->list, &t->resources);
1641
spin_unlock(&nfit_test_lock);
1642
1643
return nfit_res->buf;
1644
err:
1645
if (*dma && size >= DIMM_SIZE)
1646
gen_pool_free(nfit_pool, *dma, size);
1647
if (buf)
1648
vfree(buf);
1649
kfree(nfit_res);
1650
return NULL;
1651
}
1652
1653
static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
1654
{
1655
struct genpool_data_align data = {
1656
.align = SZ_128M,
1657
};
1658
void *buf = vmalloc(size);
1659
1660
if (size >= DIMM_SIZE)
1661
*dma = gen_pool_alloc_algo(nfit_pool, size,
1662
gen_pool_first_fit_align, &data);
1663
else
1664
*dma = (unsigned long) buf;
1665
return __test_alloc(t, size, dma, buf);
1666
}
1667
1668
static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
1669
{
1670
int i;
1671
1672
for (i = 0; i < ARRAY_SIZE(instances); i++) {
1673
struct nfit_test_resource *n, *nfit_res = NULL;
1674
struct nfit_test *t = instances[i];
1675
1676
if (!t)
1677
continue;
1678
spin_lock(&nfit_test_lock);
1679
list_for_each_entry(n, &t->resources, list) {
1680
if (addr >= n->res.start && (addr < n->res.start
1681
+ resource_size(&n->res))) {
1682
nfit_res = n;
1683
break;
1684
} else if (addr >= (unsigned long) n->buf
1685
&& (addr < (unsigned long) n->buf
1686
+ resource_size(&n->res))) {
1687
nfit_res = n;
1688
break;
1689
}
1690
}
1691
spin_unlock(&nfit_test_lock);
1692
if (nfit_res)
1693
return nfit_res;
1694
}
1695
1696
return NULL;
1697
}
1698
1699
static int ars_state_init(struct device *dev, struct ars_state *ars_state)
1700
{
1701
/* for testing, only store up to n records that fit within 4k */
1702
ars_state->ars_status = devm_kzalloc(dev,
1703
sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
1704
if (!ars_state->ars_status)
1705
return -ENOMEM;
1706
spin_lock_init(&ars_state->lock);
1707
return 0;
1708
}
1709
1710
static void put_dimms(void *data)
1711
{
1712
struct nfit_test *t = data;
1713
int i;
1714
1715
for (i = 0; i < t->num_dcr; i++)
1716
if (t->dimm_dev[i])
1717
device_unregister(t->dimm_dev[i]);
1718
}
1719
1720
static const struct class nfit_test_dimm = {
1721
.name = "nfit_test_dimm",
1722
};
1723
1724
static int dimm_name_to_id(struct device *dev)
1725
{
1726
int dimm;
1727
1728
if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1)
1729
return -ENXIO;
1730
return dimm;
1731
}
1732
1733
static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
1734
char *buf)
1735
{
1736
int dimm = dimm_name_to_id(dev);
1737
1738
if (dimm < 0)
1739
return dimm;
1740
1741
return sprintf(buf, "%#x\n", handle[dimm]);
1742
}
1743
DEVICE_ATTR_RO(handle);
1744
1745
static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
1746
char *buf)
1747
{
1748
int dimm = dimm_name_to_id(dev);
1749
1750
if (dimm < 0)
1751
return dimm;
1752
1753
return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
1754
}
1755
1756
static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
1757
const char *buf, size_t size)
1758
{
1759
int dimm = dimm_name_to_id(dev);
1760
unsigned long val;
1761
ssize_t rc;
1762
1763
if (dimm < 0)
1764
return dimm;
1765
1766
rc = kstrtol(buf, 0, &val);
1767
if (rc)
1768
return rc;
1769
1770
dimm_fail_cmd_flags[dimm] = val;
1771
return size;
1772
}
1773
static DEVICE_ATTR_RW(fail_cmd);
1774
1775
static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
1776
char *buf)
1777
{
1778
int dimm = dimm_name_to_id(dev);
1779
1780
if (dimm < 0)
1781
return dimm;
1782
1783
return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]);
1784
}
1785
1786
static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
1787
const char *buf, size_t size)
1788
{
1789
int dimm = dimm_name_to_id(dev);
1790
unsigned long val;
1791
ssize_t rc;
1792
1793
if (dimm < 0)
1794
return dimm;
1795
1796
rc = kstrtol(buf, 0, &val);
1797
if (rc)
1798
return rc;
1799
1800
dimm_fail_cmd_code[dimm] = val;
1801
return size;
1802
}
1803
static DEVICE_ATTR_RW(fail_cmd_code);
1804
1805
static ssize_t lock_dimm_store(struct device *dev,
1806
struct device_attribute *attr, const char *buf, size_t size)
1807
{
1808
int dimm = dimm_name_to_id(dev);
1809
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
1810
1811
sec->state = ND_INTEL_SEC_STATE_ENABLED | ND_INTEL_SEC_STATE_LOCKED;
1812
return size;
1813
}
1814
static DEVICE_ATTR_WO(lock_dimm);
1815
1816
static struct attribute *nfit_test_dimm_attributes[] = {
1817
&dev_attr_fail_cmd.attr,
1818
&dev_attr_fail_cmd_code.attr,
1819
&dev_attr_handle.attr,
1820
&dev_attr_lock_dimm.attr,
1821
NULL,
1822
};
1823
1824
static struct attribute_group nfit_test_dimm_attribute_group = {
1825
.attrs = nfit_test_dimm_attributes,
1826
};
1827
1828
static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
1829
&nfit_test_dimm_attribute_group,
1830
NULL,
1831
};
1832
1833
static int nfit_test_dimm_init(struct nfit_test *t)
1834
{
1835
int i;
1836
1837
if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
1838
return -ENOMEM;
1839
for (i = 0; i < t->num_dcr; i++) {
1840
t->dimm_dev[i] = device_create_with_groups(&nfit_test_dimm,
1841
&t->pdev.dev, 0, NULL,
1842
nfit_test_dimm_attribute_groups,
1843
"test_dimm%d", i + t->dcr_idx);
1844
if (!t->dimm_dev[i])
1845
return -ENOMEM;
1846
}
1847
return 0;
1848
}
1849
1850
static void nfit_security_init(struct nfit_test *t)
1851
{
1852
int i;
1853
1854
for (i = 0; i < t->num_dcr; i++) {
1855
struct nfit_test_sec *sec = &dimm_sec_info[i];
1856
1857
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
1858
}
1859
}
1860
1861
static void smart_init(struct nfit_test *t)
1862
{
1863
int i;
1864
const struct nd_intel_smart_threshold smart_t_data = {
1865
.alarm_control = ND_INTEL_SMART_SPARE_TRIP
1866
| ND_INTEL_SMART_TEMP_TRIP,
1867
.media_temperature = 40 * 16,
1868
.ctrl_temperature = 30 * 16,
1869
.spares = 5,
1870
};
1871
1872
for (i = 0; i < t->num_dcr; i++) {
1873
memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
1874
memcpy(&t->smart_threshold[i], &smart_t_data,
1875
sizeof(smart_t_data));
1876
}
1877
}
1878
1879
static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
1880
{
1881
/* until spa location cookie support is added... */
1882
return sizeof(*spa) - 8;
1883
}
1884
1885
static int nfit_test0_alloc(struct nfit_test *t)
1886
{
1887
struct acpi_nfit_system_address *spa = NULL;
1888
struct acpi_nfit_flush_address *flush;
1889
size_t nfit_size = sizeof_spa(spa) * NUM_SPA
1890
+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
1891
+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
1892
+ offsetof(struct acpi_nfit_control_region,
1893
window_size) * NUM_DCR
1894
+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
1895
+ struct_size(flush, hint_address, NUM_HINTS) * NUM_DCR
1896
+ sizeof(struct acpi_nfit_capabilities);
1897
int i;
1898
1899
t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
1900
if (!t->nfit_buf)
1901
return -ENOMEM;
1902
t->nfit_size = nfit_size;
1903
1904
t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
1905
if (!t->spa_set[0])
1906
return -ENOMEM;
1907
1908
t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
1909
if (!t->spa_set[1])
1910
return -ENOMEM;
1911
1912
t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
1913
if (!t->spa_set[2])
1914
return -ENOMEM;
1915
1916
for (i = 0; i < t->num_dcr; i++) {
1917
t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
1918
if (!t->dimm[i])
1919
return -ENOMEM;
1920
1921
t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
1922
if (!t->label[i])
1923
return -ENOMEM;
1924
sprintf(t->label[i], "label%d", i);
1925
1926
t->flush[i] = test_alloc(t, max(PAGE_SIZE,
1927
sizeof(u64) * NUM_HINTS),
1928
&t->flush_dma[i]);
1929
if (!t->flush[i])
1930
return -ENOMEM;
1931
}
1932
1933
for (i = 0; i < t->num_dcr; i++) {
1934
t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
1935
if (!t->dcr[i])
1936
return -ENOMEM;
1937
}
1938
1939
t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
1940
if (!t->_fit)
1941
return -ENOMEM;
1942
1943
if (nfit_test_dimm_init(t))
1944
return -ENOMEM;
1945
smart_init(t);
1946
nfit_security_init(t);
1947
return ars_state_init(&t->pdev.dev, &t->ars_state);
1948
}
1949
1950
static int nfit_test1_alloc(struct nfit_test *t)
1951
{
1952
struct acpi_nfit_system_address *spa = NULL;
1953
size_t nfit_size = sizeof_spa(spa) * 2
1954
+ sizeof(struct acpi_nfit_memory_map) * 2
1955
+ offsetof(struct acpi_nfit_control_region, window_size) * 2;
1956
int i;
1957
1958
t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
1959
if (!t->nfit_buf)
1960
return -ENOMEM;
1961
t->nfit_size = nfit_size;
1962
1963
t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
1964
if (!t->spa_set[0])
1965
return -ENOMEM;
1966
1967
for (i = 0; i < t->num_dcr; i++) {
1968
t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
1969
if (!t->label[i])
1970
return -ENOMEM;
1971
sprintf(t->label[i], "label%d", i);
1972
}
1973
1974
t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
1975
if (!t->spa_set[1])
1976
return -ENOMEM;
1977
1978
if (nfit_test_dimm_init(t))
1979
return -ENOMEM;
1980
smart_init(t);
1981
return ars_state_init(&t->pdev.dev, &t->ars_state);
1982
}
1983
1984
static void dcr_common_init(struct acpi_nfit_control_region *dcr)
1985
{
1986
dcr->vendor_id = 0xabcd;
1987
dcr->device_id = 0;
1988
dcr->revision_id = 1;
1989
dcr->valid_fields = 1;
1990
dcr->manufacturing_location = 0xa;
1991
dcr->manufacturing_date = cpu_to_be16(2016);
1992
}
1993
1994
static void nfit_test0_setup(struct nfit_test *t)
1995
{
1996
const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
1997
+ (sizeof(u64) * NUM_HINTS);
1998
struct acpi_nfit_desc *acpi_desc;
1999
struct acpi_nfit_memory_map *memdev;
2000
void *nfit_buf = t->nfit_buf;
2001
struct acpi_nfit_system_address *spa;
2002
struct acpi_nfit_control_region *dcr;
2003
struct acpi_nfit_data_region *bdw;
2004
struct acpi_nfit_flush_address *flush;
2005
struct acpi_nfit_capabilities *pcap;
2006
unsigned int offset = 0, i;
2007
unsigned long *acpi_mask;
2008
2009
/*
2010
* spa0 (interleave first half of dimm0 and dimm1, note storage
2011
* does not actually alias the related block-data-window
2012
* regions)
2013
*/
2014
spa = nfit_buf;
2015
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2016
spa->header.length = sizeof_spa(spa);
2017
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
2018
spa->range_index = 0+1;
2019
spa->address = t->spa_set_dma[0];
2020
spa->length = SPA0_SIZE;
2021
offset += spa->header.length;
2022
2023
/*
2024
* spa1 (interleave last half of the 4 DIMMS, note storage
2025
* does not actually alias the related block-data-window
2026
* regions)
2027
*/
2028
spa = nfit_buf + offset;
2029
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2030
spa->header.length = sizeof_spa(spa);
2031
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
2032
spa->range_index = 1+1;
2033
spa->address = t->spa_set_dma[1];
2034
spa->length = SPA1_SIZE;
2035
offset += spa->header.length;
2036
2037
/* spa2 (dcr0) dimm0 */
2038
spa = nfit_buf + offset;
2039
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2040
spa->header.length = sizeof_spa(spa);
2041
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
2042
spa->range_index = 2+1;
2043
spa->address = t->dcr_dma[0];
2044
spa->length = DCR_SIZE;
2045
offset += spa->header.length;
2046
2047
/* spa3 (dcr1) dimm1 */
2048
spa = nfit_buf + offset;
2049
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2050
spa->header.length = sizeof_spa(spa);
2051
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
2052
spa->range_index = 3+1;
2053
spa->address = t->dcr_dma[1];
2054
spa->length = DCR_SIZE;
2055
offset += spa->header.length;
2056
2057
/* spa4 (dcr2) dimm2 */
2058
spa = nfit_buf + offset;
2059
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2060
spa->header.length = sizeof_spa(spa);
2061
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
2062
spa->range_index = 4+1;
2063
spa->address = t->dcr_dma[2];
2064
spa->length = DCR_SIZE;
2065
offset += spa->header.length;
2066
2067
/* spa5 (dcr3) dimm3 */
2068
spa = nfit_buf + offset;
2069
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2070
spa->header.length = sizeof_spa(spa);
2071
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
2072
spa->range_index = 5+1;
2073
spa->address = t->dcr_dma[3];
2074
spa->length = DCR_SIZE;
2075
offset += spa->header.length;
2076
2077
/* spa6 (bdw for dcr0) dimm0 */
2078
spa = nfit_buf + offset;
2079
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2080
spa->header.length = sizeof_spa(spa);
2081
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
2082
spa->range_index = 6+1;
2083
spa->address = t->dimm_dma[0];
2084
spa->length = DIMM_SIZE;
2085
offset += spa->header.length;
2086
2087
/* spa7 (bdw for dcr1) dimm1 */
2088
spa = nfit_buf + offset;
2089
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2090
spa->header.length = sizeof_spa(spa);
2091
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
2092
spa->range_index = 7+1;
2093
spa->address = t->dimm_dma[1];
2094
spa->length = DIMM_SIZE;
2095
offset += spa->header.length;
2096
2097
/* spa8 (bdw for dcr2) dimm2 */
2098
spa = nfit_buf + offset;
2099
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2100
spa->header.length = sizeof_spa(spa);
2101
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
2102
spa->range_index = 8+1;
2103
spa->address = t->dimm_dma[2];
2104
spa->length = DIMM_SIZE;
2105
offset += spa->header.length;
2106
2107
/* spa9 (bdw for dcr3) dimm3 */
2108
spa = nfit_buf + offset;
2109
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2110
spa->header.length = sizeof_spa(spa);
2111
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
2112
spa->range_index = 9+1;
2113
spa->address = t->dimm_dma[3];
2114
spa->length = DIMM_SIZE;
2115
offset += spa->header.length;
2116
2117
/* mem-region0 (spa0, dimm0) */
2118
memdev = nfit_buf + offset;
2119
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2120
memdev->header.length = sizeof(*memdev);
2121
memdev->device_handle = handle[0];
2122
memdev->physical_id = 0;
2123
memdev->region_id = 0;
2124
memdev->range_index = 0+1;
2125
memdev->region_index = 4+1;
2126
memdev->region_size = SPA0_SIZE/2;
2127
memdev->region_offset = 1;
2128
memdev->address = 0;
2129
memdev->interleave_index = 0;
2130
memdev->interleave_ways = 2;
2131
offset += memdev->header.length;
2132
2133
/* mem-region1 (spa0, dimm1) */
2134
memdev = nfit_buf + offset;
2135
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2136
memdev->header.length = sizeof(*memdev);
2137
memdev->device_handle = handle[1];
2138
memdev->physical_id = 1;
2139
memdev->region_id = 0;
2140
memdev->range_index = 0+1;
2141
memdev->region_index = 5+1;
2142
memdev->region_size = SPA0_SIZE/2;
2143
memdev->region_offset = (1 << 8);
2144
memdev->address = 0;
2145
memdev->interleave_index = 0;
2146
memdev->interleave_ways = 2;
2147
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2148
offset += memdev->header.length;
2149
2150
/* mem-region2 (spa1, dimm0) */
2151
memdev = nfit_buf + offset;
2152
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2153
memdev->header.length = sizeof(*memdev);
2154
memdev->device_handle = handle[0];
2155
memdev->physical_id = 0;
2156
memdev->region_id = 1;
2157
memdev->range_index = 1+1;
2158
memdev->region_index = 4+1;
2159
memdev->region_size = SPA1_SIZE/4;
2160
memdev->region_offset = (1 << 16);
2161
memdev->address = SPA0_SIZE/2;
2162
memdev->interleave_index = 0;
2163
memdev->interleave_ways = 4;
2164
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2165
offset += memdev->header.length;
2166
2167
/* mem-region3 (spa1, dimm1) */
2168
memdev = nfit_buf + offset;
2169
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2170
memdev->header.length = sizeof(*memdev);
2171
memdev->device_handle = handle[1];
2172
memdev->physical_id = 1;
2173
memdev->region_id = 1;
2174
memdev->range_index = 1+1;
2175
memdev->region_index = 5+1;
2176
memdev->region_size = SPA1_SIZE/4;
2177
memdev->region_offset = (1 << 24);
2178
memdev->address = SPA0_SIZE/2;
2179
memdev->interleave_index = 0;
2180
memdev->interleave_ways = 4;
2181
offset += memdev->header.length;
2182
2183
/* mem-region4 (spa1, dimm2) */
2184
memdev = nfit_buf + offset;
2185
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2186
memdev->header.length = sizeof(*memdev);
2187
memdev->device_handle = handle[2];
2188
memdev->physical_id = 2;
2189
memdev->region_id = 0;
2190
memdev->range_index = 1+1;
2191
memdev->region_index = 6+1;
2192
memdev->region_size = SPA1_SIZE/4;
2193
memdev->region_offset = (1ULL << 32);
2194
memdev->address = SPA0_SIZE/2;
2195
memdev->interleave_index = 0;
2196
memdev->interleave_ways = 4;
2197
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2198
offset += memdev->header.length;
2199
2200
/* mem-region5 (spa1, dimm3) */
2201
memdev = nfit_buf + offset;
2202
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2203
memdev->header.length = sizeof(*memdev);
2204
memdev->device_handle = handle[3];
2205
memdev->physical_id = 3;
2206
memdev->region_id = 0;
2207
memdev->range_index = 1+1;
2208
memdev->region_index = 7+1;
2209
memdev->region_size = SPA1_SIZE/4;
2210
memdev->region_offset = (1ULL << 40);
2211
memdev->address = SPA0_SIZE/2;
2212
memdev->interleave_index = 0;
2213
memdev->interleave_ways = 4;
2214
offset += memdev->header.length;
2215
2216
/* mem-region6 (spa/dcr0, dimm0) */
2217
memdev = nfit_buf + offset;
2218
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2219
memdev->header.length = sizeof(*memdev);
2220
memdev->device_handle = handle[0];
2221
memdev->physical_id = 0;
2222
memdev->region_id = 0;
2223
memdev->range_index = 2+1;
2224
memdev->region_index = 0+1;
2225
memdev->region_size = 0;
2226
memdev->region_offset = 0;
2227
memdev->address = 0;
2228
memdev->interleave_index = 0;
2229
memdev->interleave_ways = 1;
2230
offset += memdev->header.length;
2231
2232
/* mem-region7 (spa/dcr1, dimm1) */
2233
memdev = nfit_buf + offset;
2234
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2235
memdev->header.length = sizeof(*memdev);
2236
memdev->device_handle = handle[1];
2237
memdev->physical_id = 1;
2238
memdev->region_id = 0;
2239
memdev->range_index = 3+1;
2240
memdev->region_index = 1+1;
2241
memdev->region_size = 0;
2242
memdev->region_offset = 0;
2243
memdev->address = 0;
2244
memdev->interleave_index = 0;
2245
memdev->interleave_ways = 1;
2246
offset += memdev->header.length;
2247
2248
/* mem-region8 (spa/dcr2, dimm2) */
2249
memdev = nfit_buf + offset;
2250
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2251
memdev->header.length = sizeof(*memdev);
2252
memdev->device_handle = handle[2];
2253
memdev->physical_id = 2;
2254
memdev->region_id = 0;
2255
memdev->range_index = 4+1;
2256
memdev->region_index = 2+1;
2257
memdev->region_size = 0;
2258
memdev->region_offset = 0;
2259
memdev->address = 0;
2260
memdev->interleave_index = 0;
2261
memdev->interleave_ways = 1;
2262
offset += memdev->header.length;
2263
2264
/* mem-region9 (spa/dcr3, dimm3) */
2265
memdev = nfit_buf + offset;
2266
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2267
memdev->header.length = sizeof(*memdev);
2268
memdev->device_handle = handle[3];
2269
memdev->physical_id = 3;
2270
memdev->region_id = 0;
2271
memdev->range_index = 5+1;
2272
memdev->region_index = 3+1;
2273
memdev->region_size = 0;
2274
memdev->region_offset = 0;
2275
memdev->address = 0;
2276
memdev->interleave_index = 0;
2277
memdev->interleave_ways = 1;
2278
offset += memdev->header.length;
2279
2280
/* mem-region10 (spa/bdw0, dimm0) */
2281
memdev = nfit_buf + offset;
2282
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2283
memdev->header.length = sizeof(*memdev);
2284
memdev->device_handle = handle[0];
2285
memdev->physical_id = 0;
2286
memdev->region_id = 0;
2287
memdev->range_index = 6+1;
2288
memdev->region_index = 0+1;
2289
memdev->region_size = 0;
2290
memdev->region_offset = 0;
2291
memdev->address = 0;
2292
memdev->interleave_index = 0;
2293
memdev->interleave_ways = 1;
2294
offset += memdev->header.length;
2295
2296
/* mem-region11 (spa/bdw1, dimm1) */
2297
memdev = nfit_buf + offset;
2298
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2299
memdev->header.length = sizeof(*memdev);
2300
memdev->device_handle = handle[1];
2301
memdev->physical_id = 1;
2302
memdev->region_id = 0;
2303
memdev->range_index = 7+1;
2304
memdev->region_index = 1+1;
2305
memdev->region_size = 0;
2306
memdev->region_offset = 0;
2307
memdev->address = 0;
2308
memdev->interleave_index = 0;
2309
memdev->interleave_ways = 1;
2310
offset += memdev->header.length;
2311
2312
/* mem-region12 (spa/bdw2, dimm2) */
2313
memdev = nfit_buf + offset;
2314
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2315
memdev->header.length = sizeof(*memdev);
2316
memdev->device_handle = handle[2];
2317
memdev->physical_id = 2;
2318
memdev->region_id = 0;
2319
memdev->range_index = 8+1;
2320
memdev->region_index = 2+1;
2321
memdev->region_size = 0;
2322
memdev->region_offset = 0;
2323
memdev->address = 0;
2324
memdev->interleave_index = 0;
2325
memdev->interleave_ways = 1;
2326
offset += memdev->header.length;
2327
2328
/* mem-region13 (spa/dcr3, dimm3) */
2329
memdev = nfit_buf + offset;
2330
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2331
memdev->header.length = sizeof(*memdev);
2332
memdev->device_handle = handle[3];
2333
memdev->physical_id = 3;
2334
memdev->region_id = 0;
2335
memdev->range_index = 9+1;
2336
memdev->region_index = 3+1;
2337
memdev->region_size = 0;
2338
memdev->region_offset = 0;
2339
memdev->address = 0;
2340
memdev->interleave_index = 0;
2341
memdev->interleave_ways = 1;
2342
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2343
offset += memdev->header.length;
2344
2345
/* dcr-descriptor0: blk */
2346
dcr = nfit_buf + offset;
2347
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2348
dcr->header.length = sizeof(*dcr);
2349
dcr->region_index = 0+1;
2350
dcr_common_init(dcr);
2351
dcr->serial_number = ~handle[0];
2352
dcr->code = NFIT_FIC_BLK;
2353
dcr->windows = 1;
2354
dcr->window_size = DCR_SIZE;
2355
dcr->command_offset = 0;
2356
dcr->command_size = 8;
2357
dcr->status_offset = 8;
2358
dcr->status_size = 4;
2359
offset += dcr->header.length;
2360
2361
/* dcr-descriptor1: blk */
2362
dcr = nfit_buf + offset;
2363
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2364
dcr->header.length = sizeof(*dcr);
2365
dcr->region_index = 1+1;
2366
dcr_common_init(dcr);
2367
dcr->serial_number = ~handle[1];
2368
dcr->code = NFIT_FIC_BLK;
2369
dcr->windows = 1;
2370
dcr->window_size = DCR_SIZE;
2371
dcr->command_offset = 0;
2372
dcr->command_size = 8;
2373
dcr->status_offset = 8;
2374
dcr->status_size = 4;
2375
offset += dcr->header.length;
2376
2377
/* dcr-descriptor2: blk */
2378
dcr = nfit_buf + offset;
2379
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2380
dcr->header.length = sizeof(*dcr);
2381
dcr->region_index = 2+1;
2382
dcr_common_init(dcr);
2383
dcr->serial_number = ~handle[2];
2384
dcr->code = NFIT_FIC_BLK;
2385
dcr->windows = 1;
2386
dcr->window_size = DCR_SIZE;
2387
dcr->command_offset = 0;
2388
dcr->command_size = 8;
2389
dcr->status_offset = 8;
2390
dcr->status_size = 4;
2391
offset += dcr->header.length;
2392
2393
/* dcr-descriptor3: blk */
2394
dcr = nfit_buf + offset;
2395
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2396
dcr->header.length = sizeof(*dcr);
2397
dcr->region_index = 3+1;
2398
dcr_common_init(dcr);
2399
dcr->serial_number = ~handle[3];
2400
dcr->code = NFIT_FIC_BLK;
2401
dcr->windows = 1;
2402
dcr->window_size = DCR_SIZE;
2403
dcr->command_offset = 0;
2404
dcr->command_size = 8;
2405
dcr->status_offset = 8;
2406
dcr->status_size = 4;
2407
offset += dcr->header.length;
2408
2409
/* dcr-descriptor0: pmem */
2410
dcr = nfit_buf + offset;
2411
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2412
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2413
window_size);
2414
dcr->region_index = 4+1;
2415
dcr_common_init(dcr);
2416
dcr->serial_number = ~handle[0];
2417
dcr->code = NFIT_FIC_BYTEN;
2418
dcr->windows = 0;
2419
offset += dcr->header.length;
2420
2421
/* dcr-descriptor1: pmem */
2422
dcr = nfit_buf + offset;
2423
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2424
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2425
window_size);
2426
dcr->region_index = 5+1;
2427
dcr_common_init(dcr);
2428
dcr->serial_number = ~handle[1];
2429
dcr->code = NFIT_FIC_BYTEN;
2430
dcr->windows = 0;
2431
offset += dcr->header.length;
2432
2433
/* dcr-descriptor2: pmem */
2434
dcr = nfit_buf + offset;
2435
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2436
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2437
window_size);
2438
dcr->region_index = 6+1;
2439
dcr_common_init(dcr);
2440
dcr->serial_number = ~handle[2];
2441
dcr->code = NFIT_FIC_BYTEN;
2442
dcr->windows = 0;
2443
offset += dcr->header.length;
2444
2445
/* dcr-descriptor3: pmem */
2446
dcr = nfit_buf + offset;
2447
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2448
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2449
window_size);
2450
dcr->region_index = 7+1;
2451
dcr_common_init(dcr);
2452
dcr->serial_number = ~handle[3];
2453
dcr->code = NFIT_FIC_BYTEN;
2454
dcr->windows = 0;
2455
offset += dcr->header.length;
2456
2457
/* bdw0 (spa/dcr0, dimm0) */
2458
bdw = nfit_buf + offset;
2459
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
2460
bdw->header.length = sizeof(*bdw);
2461
bdw->region_index = 0+1;
2462
bdw->windows = 1;
2463
bdw->offset = 0;
2464
bdw->size = BDW_SIZE;
2465
bdw->capacity = DIMM_SIZE;
2466
bdw->start_address = 0;
2467
offset += bdw->header.length;
2468
2469
/* bdw1 (spa/dcr1, dimm1) */
2470
bdw = nfit_buf + offset;
2471
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
2472
bdw->header.length = sizeof(*bdw);
2473
bdw->region_index = 1+1;
2474
bdw->windows = 1;
2475
bdw->offset = 0;
2476
bdw->size = BDW_SIZE;
2477
bdw->capacity = DIMM_SIZE;
2478
bdw->start_address = 0;
2479
offset += bdw->header.length;
2480
2481
/* bdw2 (spa/dcr2, dimm2) */
2482
bdw = nfit_buf + offset;
2483
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
2484
bdw->header.length = sizeof(*bdw);
2485
bdw->region_index = 2+1;
2486
bdw->windows = 1;
2487
bdw->offset = 0;
2488
bdw->size = BDW_SIZE;
2489
bdw->capacity = DIMM_SIZE;
2490
bdw->start_address = 0;
2491
offset += bdw->header.length;
2492
2493
/* bdw3 (spa/dcr3, dimm3) */
2494
bdw = nfit_buf + offset;
2495
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
2496
bdw->header.length = sizeof(*bdw);
2497
bdw->region_index = 3+1;
2498
bdw->windows = 1;
2499
bdw->offset = 0;
2500
bdw->size = BDW_SIZE;
2501
bdw->capacity = DIMM_SIZE;
2502
bdw->start_address = 0;
2503
offset += bdw->header.length;
2504
2505
/* flush0 (dimm0) */
2506
flush = nfit_buf + offset;
2507
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
2508
flush->header.length = flush_hint_size;
2509
flush->device_handle = handle[0];
2510
flush->hint_count = NUM_HINTS;
2511
for (i = 0; i < NUM_HINTS; i++)
2512
flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
2513
offset += flush->header.length;
2514
2515
/* flush1 (dimm1) */
2516
flush = nfit_buf + offset;
2517
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
2518
flush->header.length = flush_hint_size;
2519
flush->device_handle = handle[1];
2520
flush->hint_count = NUM_HINTS;
2521
for (i = 0; i < NUM_HINTS; i++)
2522
flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
2523
offset += flush->header.length;
2524
2525
/* flush2 (dimm2) */
2526
flush = nfit_buf + offset;
2527
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
2528
flush->header.length = flush_hint_size;
2529
flush->device_handle = handle[2];
2530
flush->hint_count = NUM_HINTS;
2531
for (i = 0; i < NUM_HINTS; i++)
2532
flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
2533
offset += flush->header.length;
2534
2535
/* flush3 (dimm3) */
2536
flush = nfit_buf + offset;
2537
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
2538
flush->header.length = flush_hint_size;
2539
flush->device_handle = handle[3];
2540
flush->hint_count = NUM_HINTS;
2541
for (i = 0; i < NUM_HINTS; i++)
2542
flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
2543
offset += flush->header.length;
2544
2545
/* platform capabilities */
2546
pcap = nfit_buf + offset;
2547
pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
2548
pcap->header.length = sizeof(*pcap);
2549
pcap->highest_capability = 1;
2550
pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
2551
offset += pcap->header.length;
2552
2553
if (t->setup_hotplug) {
2554
/* dcr-descriptor4: blk */
2555
dcr = nfit_buf + offset;
2556
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2557
dcr->header.length = sizeof(*dcr);
2558
dcr->region_index = 8+1;
2559
dcr_common_init(dcr);
2560
dcr->serial_number = ~handle[4];
2561
dcr->code = NFIT_FIC_BLK;
2562
dcr->windows = 1;
2563
dcr->window_size = DCR_SIZE;
2564
dcr->command_offset = 0;
2565
dcr->command_size = 8;
2566
dcr->status_offset = 8;
2567
dcr->status_size = 4;
2568
offset += dcr->header.length;
2569
2570
/* dcr-descriptor4: pmem */
2571
dcr = nfit_buf + offset;
2572
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2573
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2574
window_size);
2575
dcr->region_index = 9+1;
2576
dcr_common_init(dcr);
2577
dcr->serial_number = ~handle[4];
2578
dcr->code = NFIT_FIC_BYTEN;
2579
dcr->windows = 0;
2580
offset += dcr->header.length;
2581
2582
/* bdw4 (spa/dcr4, dimm4) */
2583
bdw = nfit_buf + offset;
2584
bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
2585
bdw->header.length = sizeof(*bdw);
2586
bdw->region_index = 8+1;
2587
bdw->windows = 1;
2588
bdw->offset = 0;
2589
bdw->size = BDW_SIZE;
2590
bdw->capacity = DIMM_SIZE;
2591
bdw->start_address = 0;
2592
offset += bdw->header.length;
2593
2594
/* spa10 (dcr4) dimm4 */
2595
spa = nfit_buf + offset;
2596
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2597
spa->header.length = sizeof_spa(spa);
2598
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
2599
spa->range_index = 10+1;
2600
spa->address = t->dcr_dma[4];
2601
spa->length = DCR_SIZE;
2602
offset += spa->header.length;
2603
2604
/*
2605
* spa11 (single-dimm interleave for hotplug, note storage
2606
* does not actually alias the related block-data-window
2607
* regions)
2608
*/
2609
spa = nfit_buf + offset;
2610
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2611
spa->header.length = sizeof_spa(spa);
2612
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
2613
spa->range_index = 11+1;
2614
spa->address = t->spa_set_dma[2];
2615
spa->length = SPA0_SIZE;
2616
offset += spa->header.length;
2617
2618
/* spa12 (bdw for dcr4) dimm4 */
2619
spa = nfit_buf + offset;
2620
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2621
spa->header.length = sizeof_spa(spa);
2622
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
2623
spa->range_index = 12+1;
2624
spa->address = t->dimm_dma[4];
2625
spa->length = DIMM_SIZE;
2626
offset += spa->header.length;
2627
2628
/* mem-region14 (spa/dcr4, dimm4) */
2629
memdev = nfit_buf + offset;
2630
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2631
memdev->header.length = sizeof(*memdev);
2632
memdev->device_handle = handle[4];
2633
memdev->physical_id = 4;
2634
memdev->region_id = 0;
2635
memdev->range_index = 10+1;
2636
memdev->region_index = 8+1;
2637
memdev->region_size = 0;
2638
memdev->region_offset = 0;
2639
memdev->address = 0;
2640
memdev->interleave_index = 0;
2641
memdev->interleave_ways = 1;
2642
offset += memdev->header.length;
2643
2644
/* mem-region15 (spa11, dimm4) */
2645
memdev = nfit_buf + offset;
2646
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2647
memdev->header.length = sizeof(*memdev);
2648
memdev->device_handle = handle[4];
2649
memdev->physical_id = 4;
2650
memdev->region_id = 0;
2651
memdev->range_index = 11+1;
2652
memdev->region_index = 9+1;
2653
memdev->region_size = SPA0_SIZE;
2654
memdev->region_offset = (1ULL << 48);
2655
memdev->address = 0;
2656
memdev->interleave_index = 0;
2657
memdev->interleave_ways = 1;
2658
memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
2659
offset += memdev->header.length;
2660
2661
/* mem-region16 (spa/bdw4, dimm4) */
2662
memdev = nfit_buf + offset;
2663
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2664
memdev->header.length = sizeof(*memdev);
2665
memdev->device_handle = handle[4];
2666
memdev->physical_id = 4;
2667
memdev->region_id = 0;
2668
memdev->range_index = 12+1;
2669
memdev->region_index = 8+1;
2670
memdev->region_size = 0;
2671
memdev->region_offset = 0;
2672
memdev->address = 0;
2673
memdev->interleave_index = 0;
2674
memdev->interleave_ways = 1;
2675
offset += memdev->header.length;
2676
2677
/* flush3 (dimm4) */
2678
flush = nfit_buf + offset;
2679
flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
2680
flush->header.length = flush_hint_size;
2681
flush->device_handle = handle[4];
2682
flush->hint_count = NUM_HINTS;
2683
for (i = 0; i < NUM_HINTS; i++)
2684
flush->hint_address[i] = t->flush_dma[4]
2685
+ i * sizeof(u64);
2686
offset += flush->header.length;
2687
2688
/* sanity check to make sure we've filled the buffer */
2689
WARN_ON(offset != t->nfit_size);
2690
}
2691
2692
t->nfit_filled = offset;
2693
2694
post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
2695
SPA0_SIZE);
2696
2697
acpi_desc = &t->acpi_desc;
2698
set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
2699
set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2700
set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2701
set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
2702
set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
2703
set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
2704
set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en);
2705
set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
2706
set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
2707
set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
2708
set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
2709
set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
2710
set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_dsm_mask);
2711
set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_dsm_mask);
2712
set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_dsm_mask);
2713
set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_dsm_mask);
2714
set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
2715
set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
2716
set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
2717
set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en);
2718
set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en);
2719
set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
2720
set_bit(NVDIMM_INTEL_GET_SECURITY_STATE,
2721
&acpi_desc->dimm_cmd_force_en);
2722
set_bit(NVDIMM_INTEL_SET_PASSPHRASE, &acpi_desc->dimm_cmd_force_en);
2723
set_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE,
2724
&acpi_desc->dimm_cmd_force_en);
2725
set_bit(NVDIMM_INTEL_UNLOCK_UNIT, &acpi_desc->dimm_cmd_force_en);
2726
set_bit(NVDIMM_INTEL_FREEZE_LOCK, &acpi_desc->dimm_cmd_force_en);
2727
set_bit(NVDIMM_INTEL_SECURE_ERASE, &acpi_desc->dimm_cmd_force_en);
2728
set_bit(NVDIMM_INTEL_OVERWRITE, &acpi_desc->dimm_cmd_force_en);
2729
set_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &acpi_desc->dimm_cmd_force_en);
2730
set_bit(NVDIMM_INTEL_SET_MASTER_PASSPHRASE,
2731
&acpi_desc->dimm_cmd_force_en);
2732
set_bit(NVDIMM_INTEL_MASTER_SECURE_ERASE,
2733
&acpi_desc->dimm_cmd_force_en);
2734
set_bit(NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, &acpi_desc->dimm_cmd_force_en);
2735
set_bit(NVDIMM_INTEL_FW_ACTIVATE_ARM, &acpi_desc->dimm_cmd_force_en);
2736
2737
acpi_mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
2738
set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, acpi_mask);
2739
set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE, acpi_mask);
2740
}
2741
2742
static void nfit_test1_setup(struct nfit_test *t)
2743
{
2744
size_t offset;
2745
void *nfit_buf = t->nfit_buf;
2746
struct acpi_nfit_memory_map *memdev;
2747
struct acpi_nfit_control_region *dcr;
2748
struct acpi_nfit_system_address *spa;
2749
struct acpi_nfit_desc *acpi_desc;
2750
2751
offset = 0;
2752
/* spa0 (flat range with no bdw aliasing) */
2753
spa = nfit_buf + offset;
2754
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2755
spa->header.length = sizeof_spa(spa);
2756
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
2757
spa->range_index = 0+1;
2758
spa->address = t->spa_set_dma[0];
2759
spa->length = SPA2_SIZE;
2760
offset += spa->header.length;
2761
2762
/* virtual cd region */
2763
spa = nfit_buf + offset;
2764
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
2765
spa->header.length = sizeof_spa(spa);
2766
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
2767
spa->range_index = 0;
2768
spa->address = t->spa_set_dma[1];
2769
spa->length = SPA_VCD_SIZE;
2770
offset += spa->header.length;
2771
2772
/* mem-region0 (spa0, dimm0) */
2773
memdev = nfit_buf + offset;
2774
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2775
memdev->header.length = sizeof(*memdev);
2776
memdev->device_handle = handle[5];
2777
memdev->physical_id = 0;
2778
memdev->region_id = 0;
2779
memdev->range_index = 0+1;
2780
memdev->region_index = 0+1;
2781
memdev->region_size = SPA2_SIZE;
2782
memdev->region_offset = 0;
2783
memdev->address = 0;
2784
memdev->interleave_index = 0;
2785
memdev->interleave_ways = 1;
2786
memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
2787
| ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
2788
| ACPI_NFIT_MEM_NOT_ARMED;
2789
offset += memdev->header.length;
2790
2791
/* dcr-descriptor0 */
2792
dcr = nfit_buf + offset;
2793
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2794
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2795
window_size);
2796
dcr->region_index = 0+1;
2797
dcr_common_init(dcr);
2798
dcr->serial_number = ~handle[5];
2799
dcr->code = NFIT_FIC_BYTE;
2800
dcr->windows = 0;
2801
offset += dcr->header.length;
2802
2803
memdev = nfit_buf + offset;
2804
memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
2805
memdev->header.length = sizeof(*memdev);
2806
memdev->device_handle = handle[6];
2807
memdev->physical_id = 0;
2808
memdev->region_id = 0;
2809
memdev->range_index = 0;
2810
memdev->region_index = 0+2;
2811
memdev->region_size = SPA2_SIZE;
2812
memdev->region_offset = 0;
2813
memdev->address = 0;
2814
memdev->interleave_index = 0;
2815
memdev->interleave_ways = 1;
2816
memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
2817
offset += memdev->header.length;
2818
2819
/* dcr-descriptor1 */
2820
dcr = nfit_buf + offset;
2821
dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
2822
dcr->header.length = offsetof(struct acpi_nfit_control_region,
2823
window_size);
2824
dcr->region_index = 0+2;
2825
dcr_common_init(dcr);
2826
dcr->serial_number = ~handle[6];
2827
dcr->code = NFIT_FIC_BYTE;
2828
dcr->windows = 0;
2829
offset += dcr->header.length;
2830
2831
/* sanity check to make sure we've filled the buffer */
2832
WARN_ON(offset != t->nfit_size);
2833
2834
t->nfit_filled = offset;
2835
2836
post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
2837
SPA2_SIZE);
2838
2839
acpi_desc = &t->acpi_desc;
2840
set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
2841
set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
2842
set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
2843
set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
2844
set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
2845
set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
2846
set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2847
set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2848
}
2849
2850
static unsigned long nfit_ctl_handle;
2851
2852
union acpi_object *result;
2853
2854
static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
2855
const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
2856
{
2857
if (handle != &nfit_ctl_handle)
2858
return ERR_PTR(-ENXIO);
2859
2860
return result;
2861
}
2862
2863
static int setup_result(void *buf, size_t size)
2864
{
2865
result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
2866
if (!result)
2867
return -ENOMEM;
2868
result->package.type = ACPI_TYPE_BUFFER,
2869
result->buffer.pointer = (void *) (result + 1);
2870
result->buffer.length = size;
2871
memcpy(result->buffer.pointer, buf, size);
2872
memset(buf, 0, size);
2873
return 0;
2874
}
2875
2876
static int nfit_ctl_test(struct device *dev)
2877
{
2878
int rc, cmd_rc;
2879
struct nvdimm *nvdimm;
2880
struct acpi_device *adev;
2881
struct nfit_mem *nfit_mem;
2882
struct nd_ars_record *record;
2883
struct acpi_nfit_desc *acpi_desc;
2884
const u64 test_val = 0x0123456789abcdefULL;
2885
unsigned long mask, cmd_size, offset;
2886
struct nfit_ctl_test_cmd {
2887
struct nd_cmd_pkg pkg;
2888
union {
2889
struct nd_cmd_get_config_size cfg_size;
2890
struct nd_cmd_clear_error clear_err;
2891
struct nd_cmd_ars_status ars_stat;
2892
struct nd_cmd_ars_cap ars_cap;
2893
struct nd_intel_bus_fw_activate_businfo fwa_info;
2894
char buf[sizeof(struct nd_cmd_ars_status)
2895
+ sizeof(struct nd_ars_record)];
2896
};
2897
} cmd;
2898
2899
adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
2900
if (!adev)
2901
return -ENOMEM;
2902
*adev = (struct acpi_device) {
2903
.handle = &nfit_ctl_handle,
2904
.dev = {
2905
.init_name = "test-adev",
2906
},
2907
};
2908
2909
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2910
if (!acpi_desc)
2911
return -ENOMEM;
2912
*acpi_desc = (struct acpi_nfit_desc) {
2913
.nd_desc = {
2914
.cmd_mask = 1UL << ND_CMD_ARS_CAP
2915
| 1UL << ND_CMD_ARS_START
2916
| 1UL << ND_CMD_ARS_STATUS
2917
| 1UL << ND_CMD_CLEAR_ERROR
2918
| 1UL << ND_CMD_CALL,
2919
.module = THIS_MODULE,
2920
.provider_name = "ACPI.NFIT",
2921
.ndctl = acpi_nfit_ctl,
2922
.bus_family_mask = 1UL << NVDIMM_BUS_FAMILY_NFIT
2923
| 1UL << NVDIMM_BUS_FAMILY_INTEL,
2924
},
2925
.bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
2926
| 1UL << NFIT_CMD_ARS_INJECT_SET
2927
| 1UL << NFIT_CMD_ARS_INJECT_CLEAR
2928
| 1UL << NFIT_CMD_ARS_INJECT_GET,
2929
.family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL] =
2930
NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK,
2931
.dev = &adev->dev,
2932
};
2933
2934
nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
2935
if (!nfit_mem)
2936
return -ENOMEM;
2937
2938
mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
2939
| 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
2940
| 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
2941
| 1UL << ND_CMD_VENDOR;
2942
*nfit_mem = (struct nfit_mem) {
2943
.adev = adev,
2944
.family = NVDIMM_FAMILY_INTEL,
2945
.dsm_mask = mask,
2946
};
2947
2948
nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
2949
if (!nvdimm)
2950
return -ENOMEM;
2951
*nvdimm = (struct nvdimm) {
2952
.provider_data = nfit_mem,
2953
.cmd_mask = mask,
2954
.dev = {
2955
.init_name = "test-dimm",
2956
},
2957
};
2958
2959
2960
/* basic checkout of a typical 'get config size' command */
2961
cmd_size = sizeof(cmd.cfg_size);
2962
cmd.cfg_size = (struct nd_cmd_get_config_size) {
2963
.status = 0,
2964
.config_size = SZ_128K,
2965
.max_xfer = SZ_4K,
2966
};
2967
rc = setup_result(cmd.buf, cmd_size);
2968
if (rc)
2969
return rc;
2970
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
2971
cmd.buf, cmd_size, &cmd_rc);
2972
2973
if (rc < 0 || cmd_rc || cmd.cfg_size.status != 0
2974
|| cmd.cfg_size.config_size != SZ_128K
2975
|| cmd.cfg_size.max_xfer != SZ_4K) {
2976
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2977
__func__, __LINE__, rc, cmd_rc);
2978
return -EIO;
2979
}
2980
2981
2982
/* test ars_status with zero output */
2983
cmd_size = offsetof(struct nd_cmd_ars_status, address);
2984
cmd.ars_stat = (struct nd_cmd_ars_status) {
2985
.out_length = 0,
2986
};
2987
rc = setup_result(cmd.buf, cmd_size);
2988
if (rc)
2989
return rc;
2990
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
2991
cmd.buf, cmd_size, &cmd_rc);
2992
2993
if (rc < 0 || cmd_rc) {
2994
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2995
__func__, __LINE__, rc, cmd_rc);
2996
return -EIO;
2997
}
2998
2999
3000
/* test ars_cap with benign extended status */
3001
cmd_size = sizeof(cmd.ars_cap);
3002
cmd.ars_cap = (struct nd_cmd_ars_cap) {
3003
.status = ND_ARS_PERSISTENT << 16,
3004
};
3005
offset = offsetof(struct nd_cmd_ars_cap, status);
3006
rc = setup_result(cmd.buf + offset, cmd_size - offset);
3007
if (rc)
3008
return rc;
3009
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
3010
cmd.buf, cmd_size, &cmd_rc);
3011
3012
if (rc < 0 || cmd_rc) {
3013
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3014
__func__, __LINE__, rc, cmd_rc);
3015
return -EIO;
3016
}
3017
3018
3019
/* test ars_status with 'status' trimmed from 'out_length' */
3020
cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
3021
cmd.ars_stat = (struct nd_cmd_ars_status) {
3022
.out_length = cmd_size - 4,
3023
};
3024
record = &cmd.ars_stat.records[0];
3025
*record = (struct nd_ars_record) {
3026
.length = test_val,
3027
};
3028
rc = setup_result(cmd.buf, cmd_size);
3029
if (rc)
3030
return rc;
3031
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
3032
cmd.buf, cmd_size, &cmd_rc);
3033
3034
if (rc < 0 || cmd_rc || record->length != test_val) {
3035
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3036
__func__, __LINE__, rc, cmd_rc);
3037
return -EIO;
3038
}
3039
3040
3041
/* test ars_status with 'Output (Size)' including 'status' */
3042
cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
3043
cmd.ars_stat = (struct nd_cmd_ars_status) {
3044
.out_length = cmd_size,
3045
};
3046
record = &cmd.ars_stat.records[0];
3047
*record = (struct nd_ars_record) {
3048
.length = test_val,
3049
};
3050
rc = setup_result(cmd.buf, cmd_size);
3051
if (rc)
3052
return rc;
3053
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
3054
cmd.buf, cmd_size, &cmd_rc);
3055
3056
if (rc < 0 || cmd_rc || record->length != test_val) {
3057
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3058
__func__, __LINE__, rc, cmd_rc);
3059
return -EIO;
3060
}
3061
3062
3063
/* test extended status for get_config_size results in failure */
3064
cmd_size = sizeof(cmd.cfg_size);
3065
cmd.cfg_size = (struct nd_cmd_get_config_size) {
3066
.status = 1 << 16,
3067
};
3068
rc = setup_result(cmd.buf, cmd_size);
3069
if (rc)
3070
return rc;
3071
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
3072
cmd.buf, cmd_size, &cmd_rc);
3073
3074
if (rc < 0 || cmd_rc >= 0) {
3075
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3076
__func__, __LINE__, rc, cmd_rc);
3077
return -EIO;
3078
}
3079
3080
/* test clear error */
3081
cmd_size = sizeof(cmd.clear_err);
3082
cmd.clear_err = (struct nd_cmd_clear_error) {
3083
.length = 512,
3084
.cleared = 512,
3085
};
3086
rc = setup_result(cmd.buf, cmd_size);
3087
if (rc)
3088
return rc;
3089
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
3090
cmd.buf, cmd_size, &cmd_rc);
3091
if (rc < 0 || cmd_rc) {
3092
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3093
__func__, __LINE__, rc, cmd_rc);
3094
return -EIO;
3095
}
3096
3097
/* test firmware activate bus info */
3098
cmd_size = sizeof(cmd.fwa_info);
3099
cmd = (struct nfit_ctl_test_cmd) {
3100
.pkg = {
3101
.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
3102
.nd_family = NVDIMM_BUS_FAMILY_INTEL,
3103
.nd_size_out = cmd_size,
3104
.nd_fw_size = cmd_size,
3105
},
3106
.fwa_info = {
3107
.state = ND_INTEL_FWA_IDLE,
3108
.capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
3109
| ND_INTEL_BUS_FWA_CAP_OSQUIESCE,
3110
.activate_tmo = 1,
3111
.cpu_quiesce_tmo = 1,
3112
.io_quiesce_tmo = 1,
3113
.max_quiesce_tmo = 1,
3114
},
3115
};
3116
rc = setup_result(cmd.buf, cmd_size);
3117
if (rc)
3118
return rc;
3119
rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CALL,
3120
&cmd, sizeof(cmd.pkg) + cmd_size, &cmd_rc);
3121
if (rc < 0 || cmd_rc) {
3122
dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3123
__func__, __LINE__, rc, cmd_rc);
3124
return -EIO;
3125
}
3126
3127
return 0;
3128
}
3129
3130
static int nfit_test_probe(struct platform_device *pdev)
3131
{
3132
struct nvdimm_bus_descriptor *nd_desc;
3133
struct acpi_nfit_desc *acpi_desc;
3134
struct device *dev = &pdev->dev;
3135
struct nfit_test *nfit_test;
3136
struct nfit_mem *nfit_mem;
3137
union acpi_object *obj;
3138
int rc;
3139
3140
if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
3141
rc = nfit_ctl_test(&pdev->dev);
3142
if (rc)
3143
return rc;
3144
}
3145
3146
nfit_test = to_nfit_test(&pdev->dev);
3147
3148
/* common alloc */
3149
if (nfit_test->num_dcr) {
3150
int num = nfit_test->num_dcr;
3151
3152
nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
3153
GFP_KERNEL);
3154
nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
3155
GFP_KERNEL);
3156
nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
3157
GFP_KERNEL);
3158
nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
3159
GFP_KERNEL);
3160
nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
3161
GFP_KERNEL);
3162
nfit_test->label_dma = devm_kcalloc(dev, num,
3163
sizeof(dma_addr_t), GFP_KERNEL);
3164
nfit_test->dcr = devm_kcalloc(dev, num,
3165
sizeof(struct nfit_test_dcr *), GFP_KERNEL);
3166
nfit_test->dcr_dma = devm_kcalloc(dev, num,
3167
sizeof(dma_addr_t), GFP_KERNEL);
3168
nfit_test->smart = devm_kcalloc(dev, num,
3169
sizeof(struct nd_intel_smart), GFP_KERNEL);
3170
nfit_test->smart_threshold = devm_kcalloc(dev, num,
3171
sizeof(struct nd_intel_smart_threshold),
3172
GFP_KERNEL);
3173
nfit_test->fw = devm_kcalloc(dev, num,
3174
sizeof(struct nfit_test_fw), GFP_KERNEL);
3175
if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
3176
&& nfit_test->label_dma && nfit_test->dcr
3177
&& nfit_test->dcr_dma && nfit_test->flush
3178
&& nfit_test->flush_dma
3179
&& nfit_test->fw)
3180
/* pass */;
3181
else
3182
return -ENOMEM;
3183
}
3184
3185
if (nfit_test->num_pm) {
3186
int num = nfit_test->num_pm;
3187
3188
nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
3189
GFP_KERNEL);
3190
nfit_test->spa_set_dma = devm_kcalloc(dev, num,
3191
sizeof(dma_addr_t), GFP_KERNEL);
3192
if (nfit_test->spa_set && nfit_test->spa_set_dma)
3193
/* pass */;
3194
else
3195
return -ENOMEM;
3196
}
3197
3198
/* per-nfit specific alloc */
3199
if (nfit_test->alloc(nfit_test))
3200
return -ENOMEM;
3201
3202
nfit_test->setup(nfit_test);
3203
acpi_desc = &nfit_test->acpi_desc;
3204
acpi_nfit_desc_init(acpi_desc, &pdev->dev);
3205
nd_desc = &acpi_desc->nd_desc;
3206
nd_desc->provider_name = NULL;
3207
nd_desc->module = THIS_MODULE;
3208
nd_desc->ndctl = nfit_test_ctl;
3209
3210
rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
3211
nfit_test->nfit_filled);
3212
if (rc)
3213
return rc;
3214
3215
rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
3216
if (rc)
3217
return rc;
3218
3219
if (nfit_test->setup != nfit_test0_setup)
3220
return 0;
3221
3222
nfit_test->setup_hotplug = 1;
3223
nfit_test->setup(nfit_test);
3224
3225
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3226
if (!obj)
3227
return -ENOMEM;
3228
obj->type = ACPI_TYPE_BUFFER;
3229
obj->buffer.length = nfit_test->nfit_size;
3230
obj->buffer.pointer = nfit_test->nfit_buf;
3231
*(nfit_test->_fit) = obj;
3232
__acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
3233
3234
/* associate dimm devices with nfit_mem data for notification testing */
3235
mutex_lock(&acpi_desc->init_mutex);
3236
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
3237
u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
3238
int i;
3239
3240
for (i = 0; i < ARRAY_SIZE(handle); i++)
3241
if (nfit_handle == handle[i])
3242
dev_set_drvdata(nfit_test->dimm_dev[i],
3243
nfit_mem);
3244
}
3245
mutex_unlock(&acpi_desc->init_mutex);
3246
3247
return 0;
3248
}
3249
3250
static void nfit_test_release(struct device *dev)
3251
{
3252
struct nfit_test *nfit_test = to_nfit_test(dev);
3253
3254
kfree(nfit_test);
3255
}
3256
3257
static const struct platform_device_id nfit_test_id[] = {
3258
{ KBUILD_MODNAME },
3259
{ },
3260
};
3261
3262
static struct platform_driver nfit_test_driver = {
3263
.probe = nfit_test_probe,
3264
.driver = {
3265
.name = KBUILD_MODNAME,
3266
},
3267
.id_table = nfit_test_id,
3268
};
3269
3270
static __init int nfit_test_init(void)
3271
{
3272
int rc, i;
3273
3274
pmem_test();
3275
libnvdimm_test();
3276
acpi_nfit_test();
3277
device_dax_test();
3278
dax_pmem_test();
3279
3280
nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
3281
3282
nfit_wq = create_singlethread_workqueue("nfit");
3283
if (!nfit_wq)
3284
return -ENOMEM;
3285
3286
rc = class_register(&nfit_test_dimm);
3287
if (rc)
3288
goto err_register;
3289
3290
nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
3291
if (!nfit_pool) {
3292
rc = -ENOMEM;
3293
goto err_register;
3294
}
3295
3296
if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
3297
rc = -ENOMEM;
3298
goto err_register;
3299
}
3300
3301
for (i = 0; i < NUM_NFITS; i++) {
3302
struct nfit_test *nfit_test;
3303
struct platform_device *pdev;
3304
3305
nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
3306
if (!nfit_test) {
3307
rc = -ENOMEM;
3308
goto err_register;
3309
}
3310
INIT_LIST_HEAD(&nfit_test->resources);
3311
badrange_init(&nfit_test->badrange);
3312
switch (i) {
3313
case 0:
3314
nfit_test->num_pm = NUM_PM;
3315
nfit_test->dcr_idx = 0;
3316
nfit_test->num_dcr = NUM_DCR;
3317
nfit_test->alloc = nfit_test0_alloc;
3318
nfit_test->setup = nfit_test0_setup;
3319
break;
3320
case 1:
3321
nfit_test->num_pm = 2;
3322
nfit_test->dcr_idx = NUM_DCR;
3323
nfit_test->num_dcr = 2;
3324
nfit_test->alloc = nfit_test1_alloc;
3325
nfit_test->setup = nfit_test1_setup;
3326
break;
3327
default:
3328
rc = -EINVAL;
3329
goto err_register;
3330
}
3331
pdev = &nfit_test->pdev;
3332
pdev->name = KBUILD_MODNAME;
3333
pdev->id = i;
3334
pdev->dev.release = nfit_test_release;
3335
rc = platform_device_register(pdev);
3336
if (rc) {
3337
put_device(&pdev->dev);
3338
goto err_register;
3339
}
3340
get_device(&pdev->dev);
3341
3342
rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3343
if (rc)
3344
goto err_register;
3345
3346
instances[i] = nfit_test;
3347
INIT_WORK(&nfit_test->work, uc_error_notify);
3348
}
3349
3350
rc = platform_driver_register(&nfit_test_driver);
3351
if (rc)
3352
goto err_register;
3353
return 0;
3354
3355
err_register:
3356
if (nfit_pool)
3357
gen_pool_destroy(nfit_pool);
3358
3359
destroy_workqueue(nfit_wq);
3360
for (i = 0; i < NUM_NFITS; i++)
3361
if (instances[i])
3362
platform_device_unregister(&instances[i]->pdev);
3363
nfit_test_teardown();
3364
for (i = 0; i < NUM_NFITS; i++)
3365
if (instances[i])
3366
put_device(&instances[i]->pdev.dev);
3367
3368
return rc;
3369
}
3370
3371
static __exit void nfit_test_exit(void)
3372
{
3373
int i;
3374
3375
destroy_workqueue(nfit_wq);
3376
for (i = 0; i < NUM_NFITS; i++)
3377
platform_device_unregister(&instances[i]->pdev);
3378
platform_driver_unregister(&nfit_test_driver);
3379
nfit_test_teardown();
3380
3381
gen_pool_destroy(nfit_pool);
3382
3383
for (i = 0; i < NUM_NFITS; i++)
3384
put_device(&instances[i]->pdev.dev);
3385
class_unregister(&nfit_test_dimm);
3386
}
3387
3388
module_init(nfit_test_init);
3389
module_exit(nfit_test_exit);
3390
MODULE_DESCRIPTION("Test ACPI NFIT devices");
3391
MODULE_LICENSE("GPL v2");
3392
MODULE_AUTHOR("Intel Corporation");
3393
3394