Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/cfi/cfi_core.c
39536 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2007, Juniper Networks, Inc.
5
* Copyright (c) 2012-2013, SRI International
6
* All rights reserved.
7
*
8
* Portions of this software were developed by SRI International and the
9
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
10
* (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11
* programme.
12
*
13
* Redistribution and use in source and binary forms, with or without
14
* modification, are permitted provided that the following conditions
15
* are met:
16
* 1. Redistributions of source code must retain the above copyright
17
* notice, this list of conditions and the following disclaimer.
18
* 2. Redistributions in binary form must reproduce the above copyright
19
* notice, this list of conditions and the following disclaimer in the
20
* documentation and/or other materials provided with the distribution.
21
* 3. Neither the name of the author nor the names of any co-contributors
22
* may be used to endorse or promote products derived from this software
23
* without specific prior written permission.
24
*
25
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35
* SUCH DAMAGE.
36
*/
37
38
#include <sys/cdefs.h>
39
#include "opt_cfi.h"
40
41
#include <sys/param.h>
42
#include <sys/systm.h>
43
#include <sys/bus.h>
44
#include <sys/conf.h>
45
#include <sys/endian.h>
46
#include <sys/kenv.h>
47
#include <sys/kernel.h>
48
#include <sys/malloc.h>
49
#include <sys/module.h>
50
#include <sys/rman.h>
51
#include <sys/sysctl.h>
52
53
#include <machine/bus.h>
54
55
#include <dev/cfi/cfi_reg.h>
56
#include <dev/cfi/cfi_var.h>
57
58
static void cfi_add_sysctls(struct cfi_softc *);
59
60
extern struct cdevsw cfi_cdevsw;
61
62
char cfi_driver_name[] = "cfi";
63
64
uint32_t
65
cfi_read_raw(struct cfi_softc *sc, u_int ofs)
66
{
67
uint32_t val;
68
69
ofs &= ~(sc->sc_width - 1);
70
switch (sc->sc_width) {
71
case 1:
72
val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
73
break;
74
case 2:
75
val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
76
break;
77
case 4:
78
val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
79
break;
80
default:
81
val = ~0;
82
break;
83
}
84
return (val);
85
}
86
87
uint32_t
88
cfi_read(struct cfi_softc *sc, u_int ofs)
89
{
90
uint32_t val;
91
uint16_t sval;
92
93
ofs &= ~(sc->sc_width - 1);
94
switch (sc->sc_width) {
95
case 1:
96
val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
97
break;
98
case 2:
99
sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
100
#ifdef CFI_HARDWAREBYTESWAP
101
val = sval;
102
#else
103
val = le16toh(sval);
104
#endif
105
break;
106
case 4:
107
val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
108
#ifndef CFI_HARDWAREBYTESWAP
109
val = le32toh(val);
110
#endif
111
break;
112
default:
113
val = ~0;
114
break;
115
}
116
return (val);
117
}
118
119
static void
120
cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
121
{
122
123
ofs &= ~(sc->sc_width - 1);
124
switch (sc->sc_width) {
125
case 1:
126
bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
127
break;
128
case 2:
129
#ifdef CFI_HARDWAREBYTESWAP
130
bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
131
#else
132
bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
133
134
#endif
135
break;
136
case 4:
137
#ifdef CFI_HARDWAREBYTESWAP
138
bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
139
#else
140
bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
141
#endif
142
break;
143
}
144
}
145
146
/*
147
* This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default()
148
*/
149
static void
150
cfi_reset_default(struct cfi_softc *sc)
151
{
152
153
cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
154
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
155
}
156
157
uint8_t
158
cfi_read_qry(struct cfi_softc *sc, u_int ofs)
159
{
160
uint8_t val;
161
162
cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
163
val = cfi_read(sc, ofs * sc->sc_width);
164
cfi_reset_default(sc);
165
return (val);
166
}
167
168
static void
169
cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
170
{
171
172
cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
173
cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
174
cfi_write(sc, ofs + addr, data);
175
}
176
177
static char *
178
cfi_fmtsize(uint32_t sz)
179
{
180
static char buf[8];
181
static const char *sfx[] = { "", "K", "M", "G" };
182
int sfxidx;
183
184
sfxidx = 0;
185
while (sfxidx < 3 && sz > 1023) {
186
sz /= 1024;
187
sfxidx++;
188
}
189
190
sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
191
return (buf);
192
}
193
194
int
195
cfi_probe(device_t dev)
196
{
197
struct cfi_softc *sc;
198
char *vend_str;
199
int error;
200
uint16_t iface, vend;
201
202
sc = device_get_softc(dev);
203
sc->sc_dev = dev;
204
205
sc->sc_rid = 0;
206
sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
207
RF_ACTIVE);
208
if (sc->sc_res == NULL)
209
return (ENXIO);
210
211
sc->sc_tag = rman_get_bustag(sc->sc_res);
212
sc->sc_handle = rman_get_bushandle(sc->sc_res);
213
214
if (sc->sc_width == 0) {
215
sc->sc_width = 1;
216
while (sc->sc_width <= 4) {
217
if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
218
break;
219
sc->sc_width <<= 1;
220
}
221
} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
222
error = ENXIO;
223
goto out;
224
}
225
if (sc->sc_width > 4) {
226
error = ENXIO;
227
goto out;
228
}
229
230
/* We got a Q. Check if we also have the R and the Y. */
231
if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
232
cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
233
error = ENXIO;
234
goto out;
235
}
236
237
/* Get the vendor and command set. */
238
vend = cfi_read_qry(sc, CFI_QRY_VEND) |
239
(cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
240
241
sc->sc_cmdset = vend;
242
243
switch (vend) {
244
case CFI_VEND_AMD_ECS:
245
case CFI_VEND_AMD_SCS:
246
vend_str = "AMD/Fujitsu";
247
break;
248
case CFI_VEND_INTEL_ECS:
249
vend_str = "Intel/Sharp";
250
break;
251
case CFI_VEND_INTEL_SCS:
252
vend_str = "Intel";
253
break;
254
case CFI_VEND_MITSUBISHI_ECS:
255
case CFI_VEND_MITSUBISHI_SCS:
256
vend_str = "Mitsubishi";
257
break;
258
default:
259
vend_str = "Unknown vendor";
260
break;
261
}
262
263
/* Get the device size. */
264
sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
265
266
/* Sanity-check the I/F */
267
iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
268
(cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
269
270
/*
271
* Adding 1 to iface will give us a bit-wise "switch"
272
* that allows us to test for the interface width by
273
* testing a single bit.
274
*/
275
iface++;
276
277
error = (iface & sc->sc_width) ? 0 : EINVAL;
278
if (error)
279
goto out;
280
281
device_set_descf(dev, "%s - %s", vend_str, cfi_fmtsize(sc->sc_size));
282
283
out:
284
bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
285
return (error);
286
}
287
288
int
289
cfi_attach(device_t dev)
290
{
291
struct cfi_softc *sc;
292
u_int blksz, blocks;
293
u_int r, u;
294
uint64_t mtoexp, ttoexp;
295
#ifdef CFI_SUPPORT_STRATAFLASH
296
uint64_t ppr;
297
char name[KENV_MNAMELEN], value[32];
298
#endif
299
300
sc = device_get_softc(dev);
301
sc->sc_dev = dev;
302
303
sc->sc_rid = 0;
304
sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
305
#ifndef ATSE_CFI_HACK
306
RF_ACTIVE);
307
#else
308
RF_ACTIVE | RF_SHAREABLE);
309
#endif
310
if (sc->sc_res == NULL)
311
return (ENXIO);
312
313
sc->sc_tag = rman_get_bustag(sc->sc_res);
314
sc->sc_handle = rman_get_bushandle(sc->sc_res);
315
316
/* Get time-out values for erase, write, and buffer write. */
317
ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
318
mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
319
if (ttoexp == 0) {
320
device_printf(dev, "erase timeout == 0, using 2^16ms\n");
321
ttoexp = 16;
322
}
323
if (ttoexp > 41) {
324
device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
325
return (EINVAL);
326
}
327
if (mtoexp == 0) {
328
device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
329
ttoexp + 4);
330
mtoexp = 4;
331
}
332
if (ttoexp + mtoexp > 41) {
333
device_printf(dev, "insane max erase timeout: 2^%jd\n",
334
ttoexp + mtoexp);
335
return (EINVAL);
336
}
337
sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
338
sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
339
sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
340
341
ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
342
mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
343
if (ttoexp == 0) {
344
device_printf(dev, "write timeout == 0, using 2^18ns\n");
345
ttoexp = 18;
346
}
347
if (ttoexp > 51) {
348
device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
349
return (EINVAL);
350
}
351
if (mtoexp == 0) {
352
device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
353
ttoexp + 4);
354
mtoexp = 4;
355
}
356
if (ttoexp + mtoexp > 51) {
357
device_printf(dev, "insane max write timeout: 2^%jdus\n",
358
ttoexp + mtoexp);
359
return (EINVAL);
360
}
361
sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
362
sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
363
sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
364
365
ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
366
mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
367
/* Don't check for 0, it means not-supported. */
368
if (ttoexp > 51) {
369
device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
370
return (EINVAL);
371
}
372
if (ttoexp + mtoexp > 51) {
373
device_printf(dev, "insane max write timeout: 2^%jdus\n",
374
ttoexp + mtoexp);
375
return (EINVAL);
376
}
377
sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
378
SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
379
sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
380
sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
381
(1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
382
383
/* Get the maximum size of a multibyte program */
384
if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
385
sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
386
cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
387
else
388
sc->sc_maxbuf = 0;
389
390
/* Get erase regions. */
391
sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
392
sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
393
M_TEMP, M_WAITOK | M_ZERO);
394
for (r = 0; r < sc->sc_regions; r++) {
395
blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
396
(cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
397
sc->sc_region[r].r_blocks = blocks + 1;
398
399
blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
400
(cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
401
sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
402
blksz * 256;
403
}
404
405
/* Reset the device to a default state. */
406
cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
407
408
if (bootverbose) {
409
device_printf(dev, "[");
410
for (r = 0; r < sc->sc_regions; r++) {
411
printf("%ux%s%s", sc->sc_region[r].r_blocks,
412
cfi_fmtsize(sc->sc_region[r].r_blksz),
413
(r == sc->sc_regions - 1) ? "]\n" : ",");
414
}
415
}
416
417
if (sc->sc_cmdset == CFI_VEND_AMD_ECS ||
418
sc->sc_cmdset == CFI_VEND_AMD_SCS) {
419
cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_AUTO_SELECT);
420
sc->sc_manid = cfi_read(sc, 0);
421
sc->sc_devid = cfi_read(sc, 2);
422
device_printf(dev, "Manufacturer ID:%x Device ID:%x\n",
423
sc->sc_manid, sc->sc_devid);
424
cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
425
}
426
427
u = device_get_unit(dev);
428
sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
429
"%s%u", cfi_driver_name, u);
430
sc->sc_nod->si_drv1 = sc;
431
432
cfi_add_sysctls(sc);
433
434
#ifdef CFI_SUPPORT_STRATAFLASH
435
/*
436
* Store the Intel factory PPR in the environment. In some
437
* cases it is the most unique ID on a board.
438
*/
439
if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
440
if (snprintf(name, sizeof(name), "%s.factory_ppr",
441
device_get_nameunit(dev)) < (sizeof(name) - 1) &&
442
snprintf(value, sizeof(value), "0x%016jx", ppr) <
443
(sizeof(value) - 1))
444
(void) kern_setenv(name, value);
445
}
446
#endif
447
448
device_add_child(dev, "cfid", DEVICE_UNIT_ANY);
449
bus_attach_children(dev);
450
451
return (0);
452
}
453
454
static void
455
cfi_add_sysctls(struct cfi_softc *sc)
456
{
457
struct sysctl_ctx_list *ctx;
458
struct sysctl_oid_list *children;
459
460
ctx = device_get_sysctl_ctx(sc->sc_dev);
461
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
462
463
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
464
"typical_erase_timout_count",
465
CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
466
0, "Number of times the typical erase timeout was exceeded");
467
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
468
"max_erase_timout_count",
469
CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
470
"Number of times the maximum erase timeout was exceeded");
471
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
472
"typical_write_timout_count",
473
CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
474
"Number of times the typical write timeout was exceeded");
475
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
476
"max_write_timout_count",
477
CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
478
"Number of times the maximum write timeout was exceeded");
479
if (sc->sc_maxbuf > 0) {
480
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
481
"typical_bufwrite_timout_count",
482
CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
483
"Number of times the typical buffered write timeout was "
484
"exceeded");
485
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
486
"max_bufwrite_timout_count",
487
CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
488
"Number of times the maximum buffered write timeout was "
489
"exceeded");
490
}
491
}
492
493
int
494
cfi_detach(device_t dev)
495
{
496
struct cfi_softc *sc;
497
498
sc = device_get_softc(dev);
499
500
destroy_dev(sc->sc_nod);
501
free(sc->sc_region, M_TEMP);
502
bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
503
return (0);
504
}
505
506
static bool
507
cfi_check_erase(struct cfi_softc *sc, u_int ofs, u_int sz)
508
{
509
bool result;
510
int i;
511
uint32_t val;
512
513
result = FALSE;
514
for (i = 0; i < sz; i += sc->sc_width) {
515
val = cfi_read(sc, ofs + i);
516
switch (sc->sc_width) {
517
case 1:
518
if (val != 0xff)
519
goto out;
520
continue;
521
case 2:
522
if (val != 0xffff)
523
goto out;
524
continue;
525
case 4:
526
if (val != 0xffffffff)
527
goto out;
528
continue;
529
}
530
}
531
result = TRUE;
532
533
out:
534
return (result);
535
}
536
537
static int
538
cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
539
enum cfi_wait_cmd cmd)
540
{
541
int done, error, tto_exceeded;
542
uint32_t st0 = 0, st = 0;
543
sbintime_t now;
544
545
done = 0;
546
error = 0;
547
tto_exceeded = 0;
548
while (!done && !error) {
549
/*
550
* Save time before we start so we always do one check
551
* after the timeout has expired.
552
*/
553
now = sbinuptime();
554
555
switch (sc->sc_cmdset) {
556
case CFI_VEND_INTEL_ECS:
557
case CFI_VEND_INTEL_SCS:
558
st = cfi_read(sc, ofs);
559
done = (st & CFI_INTEL_STATUS_WSMS);
560
if (done) {
561
/* NB: bit 0 is reserved */
562
st &= ~(CFI_INTEL_XSTATUS_RSVD |
563
CFI_INTEL_STATUS_WSMS |
564
CFI_INTEL_STATUS_RSVD);
565
if (st & CFI_INTEL_STATUS_DPS)
566
error = EPERM;
567
else if (st & CFI_INTEL_STATUS_PSLBS)
568
error = EIO;
569
else if (st & CFI_INTEL_STATUS_ECLBS)
570
error = ENXIO;
571
else if (st)
572
error = EACCES;
573
}
574
break;
575
case CFI_VEND_AMD_SCS:
576
case CFI_VEND_AMD_ECS:
577
st0 = cfi_read(sc, ofs);
578
st = cfi_read(sc, ofs);
579
done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
580
break;
581
}
582
583
if (tto_exceeded ||
584
now > start + sc->sc_typical_timeouts[cmd]) {
585
if (!tto_exceeded) {
586
tto_exceeded = 1;
587
sc->sc_tto_counts[cmd]++;
588
#ifdef CFI_DEBUG_TIMEOUT
589
device_printf(sc->sc_dev,
590
"typical timeout exceeded (cmd %d)", cmd);
591
#endif
592
}
593
if (now > start + sc->sc_max_timeouts[cmd]) {
594
sc->sc_mto_counts[cmd]++;
595
#ifdef CFI_DEBUG_TIMEOUT
596
device_printf(sc->sc_dev,
597
"max timeout exceeded (cmd %d)", cmd);
598
#endif
599
}
600
}
601
}
602
if (!done && !error)
603
error = ETIMEDOUT;
604
if (error)
605
printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
606
return (error);
607
}
608
609
int
610
cfi_write_block(struct cfi_softc *sc)
611
{
612
union {
613
uint8_t *x8;
614
uint16_t *x16;
615
uint32_t *x32;
616
} ptr, cpyprt;
617
register_t intr;
618
int error, i, j, neederase = 0;
619
uint32_t st;
620
u_int wlen;
621
sbintime_t start;
622
u_int minsz;
623
uint32_t val;
624
625
/* Intel flash must be unlocked before modification */
626
switch (sc->sc_cmdset) {
627
case CFI_VEND_INTEL_ECS:
628
case CFI_VEND_INTEL_SCS:
629
cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
630
cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
631
cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
632
break;
633
}
634
635
/* Check if an erase is required. */
636
for (i = 0; i < sc->sc_wrbufsz; i++)
637
if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
638
neederase = 1;
639
break;
640
}
641
642
if (neederase) {
643
intr = intr_disable();
644
start = sbinuptime();
645
/* Erase the block. */
646
switch (sc->sc_cmdset) {
647
case CFI_VEND_INTEL_ECS:
648
case CFI_VEND_INTEL_SCS:
649
cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
650
cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
651
break;
652
case CFI_VEND_AMD_SCS:
653
case CFI_VEND_AMD_ECS:
654
/* find minimum sector size */
655
minsz = sc->sc_region[0].r_blksz;
656
for (i = 1; i < sc->sc_regions; i++) {
657
if (sc->sc_region[i].r_blksz < minsz)
658
minsz = sc->sc_region[i].r_blksz;
659
}
660
cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
661
CFI_AMD_ERASE_SECTOR);
662
cfi_amd_write(sc, sc->sc_wrofs,
663
sc->sc_wrofs >> (ffs(minsz) - 1),
664
CFI_AMD_BLOCK_ERASE);
665
for (i = 0; i < CFI_AMD_MAXCHK; ++i) {
666
if (cfi_check_erase(sc, sc->sc_wrofs,
667
sc->sc_wrbufsz))
668
break;
669
DELAY(10);
670
}
671
if (i == CFI_AMD_MAXCHK) {
672
printf("\nCFI Sector Erase time out error\n");
673
return (ENODEV);
674
}
675
break;
676
default:
677
/* Better safe than sorry... */
678
intr_restore(intr);
679
return (ENODEV);
680
}
681
intr_restore(intr);
682
error = cfi_wait_ready(sc, sc->sc_wrofs, start,
683
CFI_TIMEOUT_ERASE);
684
if (error)
685
goto out;
686
} else
687
error = 0;
688
689
/* Write the block using a multibyte write if supported. */
690
ptr.x8 = sc->sc_wrbuf;
691
cpyprt.x8 = sc->sc_wrbufcpy;
692
if (sc->sc_maxbuf > sc->sc_width) {
693
switch (sc->sc_cmdset) {
694
case CFI_VEND_INTEL_ECS:
695
case CFI_VEND_INTEL_SCS:
696
for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
697
wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
698
699
intr = intr_disable();
700
701
start = sbinuptime();
702
do {
703
cfi_write(sc, sc->sc_wrofs + i,
704
CFI_BCS_BUF_PROG_SETUP);
705
if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
706
error = ETIMEDOUT;
707
goto out;
708
}
709
st = cfi_read(sc, sc->sc_wrofs + i);
710
} while (! (st & CFI_INTEL_STATUS_WSMS));
711
712
cfi_write(sc, sc->sc_wrofs + i,
713
(wlen / sc->sc_width) - 1);
714
switch (sc->sc_width) {
715
case 1:
716
bus_space_write_region_1(sc->sc_tag,
717
sc->sc_handle, sc->sc_wrofs + i,
718
ptr.x8 + i, wlen);
719
break;
720
case 2:
721
bus_space_write_region_2(sc->sc_tag,
722
sc->sc_handle, sc->sc_wrofs + i,
723
ptr.x16 + i / 2, wlen / 2);
724
break;
725
case 4:
726
bus_space_write_region_4(sc->sc_tag,
727
sc->sc_handle, sc->sc_wrofs + i,
728
ptr.x32 + i / 4, wlen / 4);
729
break;
730
}
731
732
cfi_write(sc, sc->sc_wrofs + i,
733
CFI_BCS_CONFIRM);
734
735
intr_restore(intr);
736
737
error = cfi_wait_ready(sc, sc->sc_wrofs + i,
738
start, CFI_TIMEOUT_BUFWRITE);
739
if (error != 0)
740
goto out;
741
}
742
goto out;
743
default:
744
/* Fall through to single word case */
745
break;
746
}
747
}
748
749
/* Write the block one byte/word at a time. */
750
for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
751
/* Avoid writing unless we are actually changing bits */
752
if (!neederase) {
753
switch (sc->sc_width) {
754
case 1:
755
if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
756
continue;
757
break;
758
case 2:
759
if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
760
continue;
761
break;
762
case 4:
763
if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
764
continue;
765
break;
766
}
767
}
768
769
/*
770
* Make sure the command to start a write and the
771
* actual write happens back-to-back without any
772
* excessive delays.
773
*/
774
intr = intr_disable();
775
776
start = sbinuptime();
777
switch (sc->sc_cmdset) {
778
case CFI_VEND_INTEL_ECS:
779
case CFI_VEND_INTEL_SCS:
780
cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
781
break;
782
case CFI_VEND_AMD_SCS:
783
case CFI_VEND_AMD_ECS:
784
cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
785
break;
786
}
787
switch (sc->sc_width) {
788
case 1:
789
bus_space_write_1(sc->sc_tag, sc->sc_handle,
790
sc->sc_wrofs + i, *(ptr.x8 + i));
791
break;
792
case 2:
793
bus_space_write_2(sc->sc_tag, sc->sc_handle,
794
sc->sc_wrofs + i, *(ptr.x16 + i / 2));
795
break;
796
case 4:
797
bus_space_write_4(sc->sc_tag, sc->sc_handle,
798
sc->sc_wrofs + i, *(ptr.x32 + i / 4));
799
break;
800
}
801
802
intr_restore(intr);
803
804
if (sc->sc_cmdset == CFI_VEND_AMD_ECS ||
805
sc->sc_cmdset == CFI_VEND_AMD_SCS) {
806
for (j = 0; j < CFI_AMD_MAXCHK; ++j) {
807
switch (sc->sc_width) {
808
case 1:
809
val = *(ptr.x8 + i);
810
break;
811
case 2:
812
val = *(ptr.x16 + i / 2);
813
break;
814
case 4:
815
val = *(ptr.x32 + i / 4);
816
break;
817
}
818
819
if (cfi_read(sc, sc->sc_wrofs + i) == val)
820
break;
821
822
DELAY(10);
823
}
824
if (j == CFI_AMD_MAXCHK) {
825
printf("\nCFI Program Verify time out error\n");
826
error = ENXIO;
827
goto out;
828
}
829
} else {
830
error = cfi_wait_ready(sc, sc->sc_wrofs, start,
831
CFI_TIMEOUT_WRITE);
832
if (error)
833
goto out;
834
}
835
}
836
837
/* error is 0. */
838
839
out:
840
cfi_reset_default(sc);
841
842
/* Relock Intel flash */
843
switch (sc->sc_cmdset) {
844
case CFI_VEND_INTEL_ECS:
845
case CFI_VEND_INTEL_SCS:
846
cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
847
cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
848
cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
849
break;
850
}
851
return (error);
852
}
853
854
#ifdef CFI_SUPPORT_STRATAFLASH
855
/*
856
* Intel StrataFlash Protection Register Support.
857
*
858
* The memory includes a 128-bit Protection Register that can be
859
* used for security. There are two 64-bit segments; one is programmed
860
* at the factory with a unique 64-bit number which is immutable.
861
* The other segment is left blank for User (OEM) programming.
862
* The User/OEM segment is One Time Programmable (OTP). It can also
863
* be locked to prevent any further writes by setting bit 0 of the
864
* Protection Lock Register (PLR). The PLR can written only once.
865
*/
866
867
static uint16_t
868
cfi_get16(struct cfi_softc *sc, int off)
869
{
870
uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
871
return v;
872
}
873
874
#ifdef CFI_ARMEDANDDANGEROUS
875
static void
876
cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
877
{
878
bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
879
}
880
#endif
881
882
/*
883
* Read the factory-defined 64-bit segment of the PR.
884
*/
885
int
886
cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
887
{
888
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
889
return EOPNOTSUPP;
890
KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
891
892
cfi_write(sc, 0, CFI_INTEL_READ_ID);
893
*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
894
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
895
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
896
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
897
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
898
return 0;
899
}
900
901
/*
902
* Read the User/OEM 64-bit segment of the PR.
903
*/
904
int
905
cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
906
{
907
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
908
return EOPNOTSUPP;
909
KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
910
911
cfi_write(sc, 0, CFI_INTEL_READ_ID);
912
*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
913
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
914
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
915
((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
916
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
917
return 0;
918
}
919
920
/*
921
* Write the User/OEM 64-bit segment of the PR.
922
* XXX should allow writing individual words/bytes
923
*/
924
int
925
cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
926
{
927
#ifdef CFI_ARMEDANDDANGEROUS
928
register_t intr;
929
int i, error;
930
sbintime_t start;
931
#endif
932
933
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
934
return EOPNOTSUPP;
935
KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
936
937
#ifdef CFI_ARMEDANDDANGEROUS
938
for (i = 7; i >= 4; i--, id >>= 16) {
939
intr = intr_disable();
940
start = sbinuptime();
941
cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
942
cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
943
intr_restore(intr);
944
error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
945
CFI_TIMEOUT_WRITE);
946
if (error)
947
break;
948
}
949
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
950
return error;
951
#else
952
device_printf(sc->sc_dev, "%s: OEM PR not set, "
953
"CFI_ARMEDANDDANGEROUS not configured\n", __func__);
954
return ENXIO;
955
#endif
956
}
957
958
/*
959
* Read the contents of the Protection Lock Register.
960
*/
961
int
962
cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
963
{
964
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
965
return EOPNOTSUPP;
966
KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
967
968
cfi_write(sc, 0, CFI_INTEL_READ_ID);
969
*plr = cfi_get16(sc, CFI_INTEL_PLR);
970
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
971
return 0;
972
}
973
974
/*
975
* Write the Protection Lock Register to lock down the
976
* user-settable segment of the Protection Register.
977
* NOTE: this operation is not reversible.
978
*/
979
int
980
cfi_intel_set_plr(struct cfi_softc *sc)
981
{
982
#ifdef CFI_ARMEDANDDANGEROUS
983
register_t intr;
984
int error;
985
sbintime_t start;
986
#endif
987
if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
988
return EOPNOTSUPP;
989
KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
990
991
#ifdef CFI_ARMEDANDDANGEROUS
992
/* worthy of console msg */
993
device_printf(sc->sc_dev, "set PLR\n");
994
intr = intr_disable();
995
binuptime(&start);
996
cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
997
cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
998
intr_restore(intr);
999
error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
1000
CFI_TIMEOUT_WRITE);
1001
cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
1002
return error;
1003
#else
1004
device_printf(sc->sc_dev, "%s: PLR not set, "
1005
"CFI_ARMEDANDDANGEROUS not configured\n", __func__);
1006
return ENXIO;
1007
#endif
1008
}
1009
#endif /* CFI_SUPPORT_STRATAFLASH */
1010
1011