Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/hw_random/core.c
50302 views
1
/*
2
* hw_random/core.c: HWRNG core API
3
*
4
* Copyright 2006 Michael Buesch <[email protected]>
5
* Copyright 2005 (c) MontaVista Software, Inc.
6
*
7
* Please read Documentation/admin-guide/hw_random.rst for details on use.
8
*
9
* This software may be used and distributed according to the terms
10
* of the GNU General Public License, incorporated herein by reference.
11
*/
12
13
#include <linux/delay.h>
14
#include <linux/device.h>
15
#include <linux/err.h>
16
#include <linux/fs.h>
17
#include <linux/hw_random.h>
18
#include <linux/kernel.h>
19
#include <linux/kthread.h>
20
#include <linux/miscdevice.h>
21
#include <linux/module.h>
22
#include <linux/random.h>
23
#include <linux/rcupdate.h>
24
#include <linux/sched.h>
25
#include <linux/sched/signal.h>
26
#include <linux/slab.h>
27
#include <linux/string.h>
28
#include <linux/uaccess.h>
29
#include <linux/workqueue.h>
30
31
#define RNG_MODULE_NAME "hw_random"
32
33
#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
34
35
static struct hwrng __rcu *current_rng;
36
/* the current rng has been explicitly chosen by user via sysfs */
37
static int cur_rng_set_by_user;
38
static struct task_struct *hwrng_fill;
39
/* list of registered rngs */
40
static LIST_HEAD(rng_list);
41
/* Protects rng_list, hwrng_fill and updating on current_rng */
42
static DEFINE_MUTEX(rng_mutex);
43
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
44
static DEFINE_MUTEX(reading_mutex);
45
static int data_avail;
46
static u8 *rng_buffer, *rng_fillbuf;
47
static unsigned short current_quality;
48
static unsigned short default_quality = 1024; /* default to maximum */
49
50
module_param(current_quality, ushort, 0644);
51
MODULE_PARM_DESC(current_quality,
52
"current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
53
module_param(default_quality, ushort, 0644);
54
MODULE_PARM_DESC(default_quality,
55
"default maximum entropy content of hwrng per 1024 bits of input");
56
57
static void drop_current_rng(void);
58
static int hwrng_init(struct hwrng *rng);
59
static int hwrng_fillfn(void *unused);
60
61
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
62
int wait);
63
64
static size_t rng_buffer_size(void)
65
{
66
return RNG_BUFFER_SIZE;
67
}
68
69
static void cleanup_rng_work(struct work_struct *work)
70
{
71
struct hwrng *rng = container_of(work, struct hwrng, cleanup_work);
72
73
/*
74
* Hold rng_mutex here so we serialize in case they set_current_rng
75
* on rng again immediately.
76
*/
77
mutex_lock(&rng_mutex);
78
79
/* Skip if rng has been reinitialized. */
80
if (kref_read(&rng->ref)) {
81
mutex_unlock(&rng_mutex);
82
return;
83
}
84
85
if (rng->cleanup)
86
rng->cleanup(rng);
87
88
complete(&rng->cleanup_done);
89
mutex_unlock(&rng_mutex);
90
}
91
92
static inline void cleanup_rng(struct kref *kref)
93
{
94
struct hwrng *rng = container_of(kref, struct hwrng, ref);
95
96
schedule_work(&rng->cleanup_work);
97
}
98
99
static int set_current_rng(struct hwrng *rng)
100
{
101
struct hwrng *old_rng;
102
int err;
103
104
BUG_ON(!mutex_is_locked(&rng_mutex));
105
106
err = hwrng_init(rng);
107
if (err)
108
return err;
109
110
old_rng = rcu_dereference_protected(current_rng,
111
lockdep_is_held(&rng_mutex));
112
rcu_assign_pointer(current_rng, rng);
113
114
if (old_rng) {
115
synchronize_rcu();
116
kref_put(&old_rng->ref, cleanup_rng);
117
}
118
119
/* if necessary, start hwrng thread */
120
if (!hwrng_fill) {
121
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
122
if (IS_ERR(hwrng_fill)) {
123
pr_err("hwrng_fill thread creation failed\n");
124
hwrng_fill = NULL;
125
}
126
}
127
128
return 0;
129
}
130
131
static void drop_current_rng(void)
132
{
133
struct hwrng *rng;
134
135
rng = rcu_dereference_protected(current_rng,
136
lockdep_is_held(&rng_mutex));
137
if (!rng)
138
return;
139
140
RCU_INIT_POINTER(current_rng, NULL);
141
synchronize_rcu();
142
143
if (hwrng_fill) {
144
kthread_stop(hwrng_fill);
145
hwrng_fill = NULL;
146
}
147
148
/* decrease last reference for triggering the cleanup */
149
kref_put(&rng->ref, cleanup_rng);
150
}
151
152
/* Returns NULL or refcounted hwrng */
153
static struct hwrng *get_current_rng_nolock(void)
154
{
155
struct hwrng *rng;
156
157
rng = rcu_dereference_protected(current_rng,
158
lockdep_is_held(&rng_mutex));
159
if (rng)
160
kref_get(&rng->ref);
161
162
return rng;
163
}
164
165
static struct hwrng *get_current_rng(void)
166
{
167
struct hwrng *rng;
168
169
rcu_read_lock();
170
rng = rcu_dereference(current_rng);
171
if (rng)
172
kref_get(&rng->ref);
173
174
rcu_read_unlock();
175
176
return rng;
177
}
178
179
static void put_rng(struct hwrng *rng)
180
{
181
if (rng)
182
kref_put(&rng->ref, cleanup_rng);
183
}
184
185
static int hwrng_init(struct hwrng *rng)
186
{
187
if (kref_get_unless_zero(&rng->ref))
188
goto skip_init;
189
190
if (rng->init) {
191
int ret;
192
193
ret = rng->init(rng);
194
if (ret)
195
return ret;
196
}
197
198
kref_init(&rng->ref);
199
reinit_completion(&rng->cleanup_done);
200
201
skip_init:
202
current_quality = rng->quality; /* obsolete */
203
204
return 0;
205
}
206
207
static int rng_dev_open(struct inode *inode, struct file *filp)
208
{
209
/* enforce read-only access to this chrdev */
210
if ((filp->f_mode & FMODE_READ) == 0)
211
return -EINVAL;
212
if (filp->f_mode & FMODE_WRITE)
213
return -EINVAL;
214
return 0;
215
}
216
217
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
218
int wait) {
219
int present;
220
221
BUG_ON(!mutex_is_locked(&reading_mutex));
222
if (rng->read) {
223
int err;
224
225
err = rng->read(rng, buffer, size, wait);
226
if (WARN_ON_ONCE(err > 0 && err > size))
227
err = size;
228
229
return err;
230
}
231
232
if (rng->data_present)
233
present = rng->data_present(rng, wait);
234
else
235
present = 1;
236
237
if (present)
238
return rng->data_read(rng, (u32 *)buffer);
239
240
return 0;
241
}
242
243
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
244
size_t size, loff_t *offp)
245
{
246
u8 buffer[RNG_BUFFER_SIZE];
247
ssize_t ret = 0;
248
int err = 0;
249
int bytes_read, len;
250
struct hwrng *rng;
251
252
while (size) {
253
rng = get_current_rng();
254
if (!rng) {
255
err = -ENODEV;
256
goto out;
257
}
258
259
if (mutex_lock_interruptible(&reading_mutex)) {
260
err = -ERESTARTSYS;
261
goto out_put;
262
}
263
if (!data_avail) {
264
bytes_read = rng_get_data(rng, rng_buffer,
265
rng_buffer_size(),
266
!(filp->f_flags & O_NONBLOCK));
267
if (bytes_read < 0) {
268
err = bytes_read;
269
goto out_unlock_reading;
270
} else if (bytes_read == 0 &&
271
(filp->f_flags & O_NONBLOCK)) {
272
err = -EAGAIN;
273
goto out_unlock_reading;
274
}
275
276
data_avail = bytes_read;
277
}
278
279
len = data_avail;
280
if (len) {
281
if (len > size)
282
len = size;
283
284
data_avail -= len;
285
286
memcpy(buffer, rng_buffer + data_avail, len);
287
}
288
mutex_unlock(&reading_mutex);
289
put_rng(rng);
290
291
if (len) {
292
if (copy_to_user(buf + ret, buffer, len)) {
293
err = -EFAULT;
294
goto out;
295
}
296
297
size -= len;
298
ret += len;
299
}
300
301
302
if (need_resched())
303
schedule_timeout_interruptible(1);
304
305
if (signal_pending(current)) {
306
err = -ERESTARTSYS;
307
goto out;
308
}
309
}
310
out:
311
memzero_explicit(buffer, sizeof(buffer));
312
return ret ? : err;
313
314
out_unlock_reading:
315
mutex_unlock(&reading_mutex);
316
out_put:
317
put_rng(rng);
318
goto out;
319
}
320
321
static const struct file_operations rng_chrdev_ops = {
322
.owner = THIS_MODULE,
323
.open = rng_dev_open,
324
.read = rng_dev_read,
325
.llseek = noop_llseek,
326
};
327
328
static const struct attribute_group *rng_dev_groups[];
329
330
static struct miscdevice rng_miscdev = {
331
.minor = HWRNG_MINOR,
332
.name = RNG_MODULE_NAME,
333
.nodename = "hwrng",
334
.fops = &rng_chrdev_ops,
335
.groups = rng_dev_groups,
336
};
337
338
static int enable_best_rng(void)
339
{
340
struct hwrng *rng, *cur_rng, *new_rng = NULL;
341
int ret = -ENODEV;
342
343
BUG_ON(!mutex_is_locked(&rng_mutex));
344
345
/* no rng to use? */
346
if (list_empty(&rng_list)) {
347
drop_current_rng();
348
cur_rng_set_by_user = 0;
349
return 0;
350
}
351
352
/* use the rng which offers the best quality */
353
list_for_each_entry(rng, &rng_list, list) {
354
if (!new_rng || rng->quality > new_rng->quality)
355
new_rng = rng;
356
}
357
358
cur_rng = rcu_dereference_protected(current_rng,
359
lockdep_is_held(&rng_mutex));
360
ret = ((new_rng == cur_rng) ? 0 : set_current_rng(new_rng));
361
if (!ret)
362
cur_rng_set_by_user = 0;
363
364
return ret;
365
}
366
367
static ssize_t rng_current_store(struct device *dev,
368
struct device_attribute *attr,
369
const char *buf, size_t len)
370
{
371
int err;
372
struct hwrng *rng, *new_rng;
373
374
err = mutex_lock_interruptible(&rng_mutex);
375
if (err)
376
return -ERESTARTSYS;
377
378
if (sysfs_streq(buf, "")) {
379
err = enable_best_rng();
380
} else if (sysfs_streq(buf, "none")) {
381
cur_rng_set_by_user = 1;
382
drop_current_rng();
383
} else {
384
list_for_each_entry(rng, &rng_list, list) {
385
if (sysfs_streq(rng->name, buf)) {
386
err = set_current_rng(rng);
387
if (!err)
388
cur_rng_set_by_user = 1;
389
break;
390
}
391
}
392
}
393
new_rng = get_current_rng_nolock();
394
mutex_unlock(&rng_mutex);
395
396
if (new_rng)
397
put_rng(new_rng);
398
399
return err ? : len;
400
}
401
402
static ssize_t rng_current_show(struct device *dev,
403
struct device_attribute *attr,
404
char *buf)
405
{
406
ssize_t ret;
407
struct hwrng *rng;
408
409
rng = get_current_rng();
410
411
ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
412
put_rng(rng);
413
414
return ret;
415
}
416
417
static ssize_t rng_available_show(struct device *dev,
418
struct device_attribute *attr,
419
char *buf)
420
{
421
int err;
422
struct hwrng *rng;
423
424
err = mutex_lock_interruptible(&rng_mutex);
425
if (err)
426
return -ERESTARTSYS;
427
buf[0] = '\0';
428
list_for_each_entry(rng, &rng_list, list) {
429
strlcat(buf, rng->name, PAGE_SIZE);
430
strlcat(buf, " ", PAGE_SIZE);
431
}
432
strlcat(buf, "none\n", PAGE_SIZE);
433
mutex_unlock(&rng_mutex);
434
435
return strlen(buf);
436
}
437
438
static ssize_t rng_selected_show(struct device *dev,
439
struct device_attribute *attr,
440
char *buf)
441
{
442
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
443
}
444
445
static ssize_t rng_quality_show(struct device *dev,
446
struct device_attribute *attr,
447
char *buf)
448
{
449
ssize_t ret;
450
struct hwrng *rng;
451
452
rng = get_current_rng();
453
454
if (!rng) /* no need to put_rng */
455
return -ENODEV;
456
457
ret = sysfs_emit(buf, "%hu\n", rng->quality);
458
put_rng(rng);
459
460
return ret;
461
}
462
463
static ssize_t rng_quality_store(struct device *dev,
464
struct device_attribute *attr,
465
const char *buf, size_t len)
466
{
467
struct hwrng *rng;
468
u16 quality;
469
int ret = -EINVAL;
470
471
if (len < 2)
472
return -EINVAL;
473
474
ret = mutex_lock_interruptible(&rng_mutex);
475
if (ret)
476
return -ERESTARTSYS;
477
478
ret = kstrtou16(buf, 0, &quality);
479
if (ret || quality > 1024) {
480
ret = -EINVAL;
481
goto out;
482
}
483
484
rng = rcu_dereference_protected(current_rng, lockdep_is_held(&rng_mutex));
485
if (!rng) {
486
ret = -ENODEV;
487
goto out;
488
}
489
490
rng->quality = quality;
491
current_quality = quality; /* obsolete */
492
493
/* the best available RNG may have changed */
494
ret = enable_best_rng();
495
496
out:
497
mutex_unlock(&rng_mutex);
498
return ret ? ret : len;
499
}
500
501
static DEVICE_ATTR_RW(rng_current);
502
static DEVICE_ATTR_RO(rng_available);
503
static DEVICE_ATTR_RO(rng_selected);
504
static DEVICE_ATTR_RW(rng_quality);
505
506
static struct attribute *rng_dev_attrs[] = {
507
&dev_attr_rng_current.attr,
508
&dev_attr_rng_available.attr,
509
&dev_attr_rng_selected.attr,
510
&dev_attr_rng_quality.attr,
511
NULL
512
};
513
514
ATTRIBUTE_GROUPS(rng_dev);
515
516
static int hwrng_fillfn(void *unused)
517
{
518
size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
519
long rc;
520
521
while (!kthread_should_stop()) {
522
unsigned short quality;
523
struct hwrng *rng;
524
525
rng = get_current_rng();
526
if (!rng) {
527
/*
528
* Keep the task_struct alive until kthread_stop()
529
* is called to avoid UAF in drop_current_rng().
530
*/
531
while (!kthread_should_stop()) {
532
set_current_state(TASK_INTERRUPTIBLE);
533
if (!kthread_should_stop())
534
schedule();
535
}
536
set_current_state(TASK_RUNNING);
537
break;
538
}
539
540
mutex_lock(&reading_mutex);
541
rc = rng_get_data(rng, rng_fillbuf,
542
rng_buffer_size(), 1);
543
if (current_quality != rng->quality)
544
rng->quality = current_quality; /* obsolete */
545
quality = rng->quality;
546
mutex_unlock(&reading_mutex);
547
548
if (rc <= 0)
549
hwrng_msleep(rng, 10000);
550
551
put_rng(rng);
552
553
if (rc <= 0)
554
continue;
555
556
/* If we cannot credit at least one bit of entropy,
557
* keep track of the remainder for the next iteration
558
*/
559
entropy = rc * quality * 8 + entropy_credit;
560
if ((entropy >> 10) == 0)
561
entropy_credit = entropy;
562
563
/* Outside lock, sure, but y'know: randomness. */
564
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
565
entropy >> 10, true);
566
}
567
return 0;
568
}
569
570
int hwrng_register(struct hwrng *rng)
571
{
572
int err = -EINVAL;
573
struct hwrng *cur_rng, *tmp;
574
575
if (!rng->name || (!rng->data_read && !rng->read))
576
goto out;
577
578
mutex_lock(&rng_mutex);
579
580
/* Must not register two RNGs with the same name. */
581
err = -EEXIST;
582
list_for_each_entry(tmp, &rng_list, list) {
583
if (strcmp(tmp->name, rng->name) == 0)
584
goto out_unlock;
585
}
586
list_add_tail(&rng->list, &rng_list);
587
588
INIT_WORK(&rng->cleanup_work, cleanup_rng_work);
589
init_completion(&rng->cleanup_done);
590
complete(&rng->cleanup_done);
591
init_completion(&rng->dying);
592
593
/* Adjust quality field to always have a proper value */
594
rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
595
596
if (!cur_rng_set_by_user) {
597
cur_rng = rcu_dereference_protected(current_rng,
598
lockdep_is_held(&rng_mutex));
599
if (!cur_rng || rng->quality > cur_rng->quality) {
600
/*
601
* Set new rng as current as the new rng source
602
* provides better entropy quality and was not
603
* chosen by userspace.
604
*/
605
err = set_current_rng(rng);
606
if (err)
607
goto out_unlock;
608
}
609
}
610
mutex_unlock(&rng_mutex);
611
return 0;
612
out_unlock:
613
mutex_unlock(&rng_mutex);
614
out:
615
return err;
616
}
617
EXPORT_SYMBOL_GPL(hwrng_register);
618
619
void hwrng_unregister(struct hwrng *rng)
620
{
621
struct hwrng *cur_rng;
622
int err;
623
624
mutex_lock(&rng_mutex);
625
626
list_del(&rng->list);
627
complete_all(&rng->dying);
628
629
cur_rng = rcu_dereference_protected(current_rng,
630
lockdep_is_held(&rng_mutex));
631
if (cur_rng == rng) {
632
err = enable_best_rng();
633
if (err) {
634
drop_current_rng();
635
cur_rng_set_by_user = 0;
636
}
637
}
638
639
mutex_unlock(&rng_mutex);
640
wait_for_completion(&rng->cleanup_done);
641
}
642
EXPORT_SYMBOL_GPL(hwrng_unregister);
643
644
static void devm_hwrng_release(struct device *dev, void *res)
645
{
646
hwrng_unregister(*(struct hwrng **)res);
647
}
648
649
static int devm_hwrng_match(struct device *dev, void *res, void *data)
650
{
651
struct hwrng **r = res;
652
653
if (WARN_ON(!r || !*r))
654
return 0;
655
656
return *r == data;
657
}
658
659
int devm_hwrng_register(struct device *dev, struct hwrng *rng)
660
{
661
struct hwrng **ptr;
662
int error;
663
664
ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
665
if (!ptr)
666
return -ENOMEM;
667
668
error = hwrng_register(rng);
669
if (error) {
670
devres_free(ptr);
671
return error;
672
}
673
674
*ptr = rng;
675
devres_add(dev, ptr);
676
return 0;
677
}
678
EXPORT_SYMBOL_GPL(devm_hwrng_register);
679
680
void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
681
{
682
devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
683
}
684
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
685
686
long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
687
{
688
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
689
690
return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
691
}
692
EXPORT_SYMBOL_GPL(hwrng_msleep);
693
694
long hwrng_yield(struct hwrng *rng)
695
{
696
return wait_for_completion_interruptible_timeout(&rng->dying, 1);
697
}
698
EXPORT_SYMBOL_GPL(hwrng_yield);
699
700
static int __init hwrng_modinit(void)
701
{
702
int ret;
703
704
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
705
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
706
if (!rng_buffer)
707
return -ENOMEM;
708
709
rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
710
if (!rng_fillbuf) {
711
kfree(rng_buffer);
712
return -ENOMEM;
713
}
714
715
ret = misc_register(&rng_miscdev);
716
if (ret) {
717
kfree(rng_fillbuf);
718
kfree(rng_buffer);
719
}
720
721
return ret;
722
}
723
724
static void __exit hwrng_modexit(void)
725
{
726
mutex_lock(&rng_mutex);
727
WARN_ON(rcu_access_pointer(current_rng));
728
kfree(rng_buffer);
729
kfree(rng_fillbuf);
730
mutex_unlock(&rng_mutex);
731
732
misc_deregister(&rng_miscdev);
733
}
734
735
fs_initcall(hwrng_modinit); /* depends on misc_register() */
736
module_exit(hwrng_modexit);
737
738
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
739
MODULE_LICENSE("GPL");
740
741