Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/hid/bpf/hid_bpf_dispatch.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
/*
4
* HID-BPF support for Linux
5
*
6
* Copyright (c) 2022-2024 Benjamin Tissoires
7
*/
8
9
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
#include <linux/bitops.h>
11
#include <linux/btf.h>
12
#include <linux/btf_ids.h>
13
#include <linux/filter.h>
14
#include <linux/hid.h>
15
#include <linux/hid_bpf.h>
16
#include <linux/init.h>
17
#include <linux/kfifo.h>
18
#include <linux/minmax.h>
19
#include <linux/module.h>
20
#include "hid_bpf_dispatch.h"
21
22
const struct hid_ops *hid_ops;
23
EXPORT_SYMBOL(hid_ops);
24
25
u8 *
26
dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
27
u32 *size, int interrupt, u64 source, bool from_bpf)
28
{
29
struct hid_bpf_ctx_kern ctx_kern = {
30
.ctx = {
31
.hid = hdev,
32
.allocated_size = hdev->bpf.allocated_data,
33
.size = *size,
34
},
35
.data = hdev->bpf.device_data,
36
.from_bpf = from_bpf,
37
};
38
struct hid_bpf_ops *e;
39
int ret;
40
41
if (unlikely(hdev->bpf.destroyed))
42
return ERR_PTR(-ENODEV);
43
44
if (type >= HID_REPORT_TYPES)
45
return ERR_PTR(-EINVAL);
46
47
/* no program has been attached yet */
48
if (!hdev->bpf.device_data)
49
return data;
50
51
memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
52
memcpy(ctx_kern.data, data, *size);
53
54
rcu_read_lock();
55
list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
56
if (e->hid_device_event) {
57
ret = e->hid_device_event(&ctx_kern.ctx, type, source);
58
if (ret < 0) {
59
rcu_read_unlock();
60
return ERR_PTR(ret);
61
}
62
63
if (ret)
64
ctx_kern.ctx.size = ret;
65
}
66
}
67
rcu_read_unlock();
68
69
ret = ctx_kern.ctx.size;
70
if (ret) {
71
if (ret > ctx_kern.ctx.allocated_size)
72
return ERR_PTR(-EINVAL);
73
74
*size = ret;
75
}
76
77
return ctx_kern.data;
78
}
79
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
80
81
int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
82
unsigned char reportnum, u8 *buf,
83
u32 size, enum hid_report_type rtype,
84
enum hid_class_request reqtype,
85
u64 source, bool from_bpf)
86
{
87
struct hid_bpf_ctx_kern ctx_kern = {
88
.ctx = {
89
.hid = hdev,
90
.allocated_size = size,
91
.size = size,
92
},
93
.data = buf,
94
.from_bpf = from_bpf,
95
};
96
struct hid_bpf_ops *e;
97
int ret, idx;
98
99
if (unlikely(hdev->bpf.destroyed))
100
return -ENODEV;
101
102
if (rtype >= HID_REPORT_TYPES)
103
return -EINVAL;
104
105
idx = srcu_read_lock(&hdev->bpf.srcu);
106
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
107
srcu_read_lock_held(&hdev->bpf.srcu)) {
108
if (!e->hid_hw_request)
109
continue;
110
111
ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
112
if (ret)
113
goto out;
114
}
115
ret = 0;
116
117
out:
118
srcu_read_unlock(&hdev->bpf.srcu, idx);
119
return ret;
120
}
121
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
122
123
int dispatch_hid_bpf_output_report(struct hid_device *hdev,
124
__u8 *buf, u32 size, u64 source,
125
bool from_bpf)
126
{
127
struct hid_bpf_ctx_kern ctx_kern = {
128
.ctx = {
129
.hid = hdev,
130
.allocated_size = size,
131
.size = size,
132
},
133
.data = buf,
134
.from_bpf = from_bpf,
135
};
136
struct hid_bpf_ops *e;
137
int ret, idx;
138
139
if (unlikely(hdev->bpf.destroyed))
140
return -ENODEV;
141
142
idx = srcu_read_lock(&hdev->bpf.srcu);
143
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
144
srcu_read_lock_held(&hdev->bpf.srcu)) {
145
if (!e->hid_hw_output_report)
146
continue;
147
148
ret = e->hid_hw_output_report(&ctx_kern.ctx, source);
149
if (ret)
150
goto out;
151
}
152
ret = 0;
153
154
out:
155
srcu_read_unlock(&hdev->bpf.srcu, idx);
156
return ret;
157
}
158
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
159
160
const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
161
{
162
int ret;
163
struct hid_bpf_ctx_kern ctx_kern = {
164
.ctx = {
165
.hid = hdev,
166
.size = *size,
167
.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
168
},
169
};
170
171
if (!hdev->bpf.rdesc_ops)
172
goto ignore_bpf;
173
174
ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
175
if (!ctx_kern.data)
176
goto ignore_bpf;
177
178
memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
179
180
ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
181
if (ret < 0)
182
goto ignore_bpf;
183
184
if (ret) {
185
if (ret > ctx_kern.ctx.allocated_size)
186
goto ignore_bpf;
187
188
*size = ret;
189
}
190
191
return krealloc(ctx_kern.data, *size, GFP_KERNEL);
192
193
ignore_bpf:
194
kfree(ctx_kern.data);
195
return rdesc;
196
}
197
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
198
199
static int device_match_id(struct device *dev, const void *id)
200
{
201
struct hid_device *hdev = to_hid_device(dev);
202
203
return hdev->id == *(int *)id;
204
}
205
206
struct hid_device *hid_get_device(unsigned int hid_id)
207
{
208
struct device *dev;
209
210
if (!hid_ops)
211
return ERR_PTR(-EINVAL);
212
213
dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
214
if (!dev)
215
return ERR_PTR(-EINVAL);
216
217
return to_hid_device(dev);
218
}
219
220
void hid_put_device(struct hid_device *hid)
221
{
222
put_device(&hid->dev);
223
}
224
225
static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
226
{
227
u8 *alloc_data;
228
unsigned int i, j, max_report_len = 0;
229
size_t alloc_size = 0;
230
231
/* compute the maximum report length for this device */
232
for (i = 0; i < HID_REPORT_TYPES; i++) {
233
struct hid_report_enum *report_enum = hdev->report_enum + i;
234
235
for (j = 0; j < HID_MAX_IDS; j++) {
236
struct hid_report *report = report_enum->report_id_hash[j];
237
238
if (report)
239
max_report_len = max(max_report_len, hid_report_len(report));
240
}
241
}
242
243
/*
244
* Give us a little bit of extra space and some predictability in the
245
* buffer length we create. This way, we can tell users that they can
246
* work on chunks of 64 bytes of memory without having the bpf verifier
247
* scream at them.
248
*/
249
alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
250
251
alloc_data = kzalloc(alloc_size, GFP_KERNEL);
252
if (!alloc_data)
253
return -ENOMEM;
254
255
*data = alloc_data;
256
*size = alloc_size;
257
258
return 0;
259
}
260
261
int hid_bpf_allocate_event_data(struct hid_device *hdev)
262
{
263
/* hdev->bpf.device_data is already allocated, abort */
264
if (hdev->bpf.device_data)
265
return 0;
266
267
return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
268
}
269
270
int hid_bpf_reconnect(struct hid_device *hdev)
271
{
272
if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) {
273
/* trigger call to call_hid_bpf_rdesc_fixup() during the next probe */
274
hdev->bpf_rsize = 0;
275
return device_reprobe(&hdev->dev);
276
}
277
278
return 0;
279
}
280
281
/* Disables missing prototype warnings */
282
__bpf_kfunc_start_defs();
283
284
/**
285
* hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
286
*
287
* @ctx: The HID-BPF context
288
* @offset: The offset within the memory
289
* @rdwr_buf_size: the const size of the buffer
290
*
291
* @returns %NULL on error, an %__u8 memory pointer on success
292
*/
293
__bpf_kfunc __u8 *
294
hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
295
{
296
struct hid_bpf_ctx_kern *ctx_kern;
297
298
if (!ctx)
299
return NULL;
300
301
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
302
303
if (rdwr_buf_size + offset > ctx->allocated_size)
304
return NULL;
305
306
return ctx_kern->data + offset;
307
}
308
309
/**
310
* hid_bpf_allocate_context - Allocate a context to the given HID device
311
*
312
* @hid_id: the system unique identifier of the HID device
313
*
314
* @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
315
*/
316
__bpf_kfunc struct hid_bpf_ctx *
317
hid_bpf_allocate_context(unsigned int hid_id)
318
{
319
struct hid_device *hdev;
320
struct hid_bpf_ctx_kern *ctx_kern = NULL;
321
322
hdev = hid_get_device(hid_id);
323
if (IS_ERR(hdev))
324
return NULL;
325
326
ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
327
if (!ctx_kern) {
328
hid_put_device(hdev);
329
return NULL;
330
}
331
332
ctx_kern->ctx.hid = hdev;
333
334
return &ctx_kern->ctx;
335
}
336
337
/**
338
* hid_bpf_release_context - Release the previously allocated context @ctx
339
*
340
* @ctx: the HID-BPF context to release
341
*
342
*/
343
__bpf_kfunc void
344
hid_bpf_release_context(struct hid_bpf_ctx *ctx)
345
{
346
struct hid_bpf_ctx_kern *ctx_kern;
347
struct hid_device *hid;
348
349
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
350
hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
351
352
kfree(ctx_kern);
353
354
/* get_device() is called by bus_find_device() */
355
hid_put_device(hid);
356
}
357
358
static int
359
__hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
360
enum hid_report_type rtype)
361
{
362
struct hid_report_enum *report_enum;
363
struct hid_report *report;
364
u32 report_len;
365
366
/* check arguments */
367
if (!ctx || !hid_ops || !buf)
368
return -EINVAL;
369
370
switch (rtype) {
371
case HID_INPUT_REPORT:
372
case HID_OUTPUT_REPORT:
373
case HID_FEATURE_REPORT:
374
break;
375
default:
376
return -EINVAL;
377
}
378
379
if (*buf__sz < 1)
380
return -EINVAL;
381
382
report_enum = ctx->hid->report_enum + rtype;
383
report = hid_ops->hid_get_report(report_enum, buf);
384
if (!report)
385
return -EINVAL;
386
387
report_len = hid_report_len(report);
388
389
if (*buf__sz > report_len)
390
*buf__sz = report_len;
391
392
return 0;
393
}
394
395
/**
396
* hid_bpf_hw_request - Communicate with a HID device
397
*
398
* @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
399
* @buf: a %PTR_TO_MEM buffer
400
* @buf__sz: the size of the data to transfer
401
* @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
402
* @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
403
*
404
* @returns %0 on success, a negative error code otherwise.
405
*/
406
__bpf_kfunc int
407
hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
408
enum hid_report_type rtype, enum hid_class_request reqtype)
409
{
410
struct hid_bpf_ctx_kern *ctx_kern;
411
size_t size = buf__sz;
412
u8 *dma_data;
413
int ret;
414
415
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
416
417
if (ctx_kern->from_bpf)
418
return -EDEADLOCK;
419
420
/* check arguments */
421
ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
422
if (ret)
423
return ret;
424
425
switch (reqtype) {
426
case HID_REQ_GET_REPORT:
427
case HID_REQ_GET_IDLE:
428
case HID_REQ_GET_PROTOCOL:
429
case HID_REQ_SET_REPORT:
430
case HID_REQ_SET_IDLE:
431
case HID_REQ_SET_PROTOCOL:
432
break;
433
default:
434
return -EINVAL;
435
}
436
437
dma_data = kmemdup(buf, size, GFP_KERNEL);
438
if (!dma_data)
439
return -ENOMEM;
440
441
ret = hid_ops->hid_hw_raw_request(ctx->hid,
442
dma_data[0],
443
dma_data,
444
size,
445
rtype,
446
reqtype,
447
(u64)(long)ctx,
448
true); /* prevent infinite recursions */
449
450
if (ret > 0)
451
memcpy(buf, dma_data, ret);
452
453
kfree(dma_data);
454
return ret;
455
}
456
457
/**
458
* hid_bpf_hw_output_report - Send an output report to a HID device
459
*
460
* @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
461
* @buf: a %PTR_TO_MEM buffer
462
* @buf__sz: the size of the data to transfer
463
*
464
* Returns the number of bytes transferred on success, a negative error code otherwise.
465
*/
466
__bpf_kfunc int
467
hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
468
{
469
struct hid_bpf_ctx_kern *ctx_kern;
470
size_t size = buf__sz;
471
u8 *dma_data;
472
int ret;
473
474
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
475
if (ctx_kern->from_bpf)
476
return -EDEADLOCK;
477
478
/* check arguments */
479
ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
480
if (ret)
481
return ret;
482
483
dma_data = kmemdup(buf, size, GFP_KERNEL);
484
if (!dma_data)
485
return -ENOMEM;
486
487
ret = hid_ops->hid_hw_output_report(ctx->hid, dma_data, size, (u64)(long)ctx, true);
488
489
kfree(dma_data);
490
return ret;
491
}
492
493
static int
494
__hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
495
size_t size, bool lock_already_taken)
496
{
497
struct hid_bpf_ctx_kern *ctx_kern;
498
int ret;
499
500
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
501
if (ctx_kern->from_bpf)
502
return -EDEADLOCK;
503
504
/* check arguments */
505
ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
506
if (ret)
507
return ret;
508
509
return hid_ops->hid_input_report(ctx->hid, type, buf, size, 0, (u64)(long)ctx, true,
510
lock_already_taken);
511
}
512
513
/**
514
* hid_bpf_try_input_report - Inject a HID report in the kernel from a HID device
515
*
516
* @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
517
* @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
518
* @buf: a %PTR_TO_MEM buffer
519
* @buf__sz: the size of the data to transfer
520
*
521
* Returns %0 on success, a negative error code otherwise. This function will immediately
522
* fail if the device is not available, thus can be safely used in IRQ context.
523
*/
524
__bpf_kfunc int
525
hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
526
const size_t buf__sz)
527
{
528
struct hid_bpf_ctx_kern *ctx_kern;
529
bool from_hid_event_hook;
530
531
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
532
from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data;
533
534
return __hid_bpf_input_report(ctx, type, buf, buf__sz, from_hid_event_hook);
535
}
536
537
/**
538
* hid_bpf_input_report - Inject a HID report in the kernel from a HID device
539
*
540
* @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
541
* @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
542
* @buf: a %PTR_TO_MEM buffer
543
* @buf__sz: the size of the data to transfer
544
*
545
* Returns %0 on success, a negative error code otherwise. This function will wait for the
546
* device to be available before injecting the event, thus needs to be called in sleepable
547
* context.
548
*/
549
__bpf_kfunc int
550
hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
551
const size_t buf__sz)
552
{
553
int ret;
554
555
ret = down_interruptible(&ctx->hid->driver_input_lock);
556
if (ret)
557
return ret;
558
559
/* check arguments */
560
ret = __hid_bpf_input_report(ctx, type, buf, buf__sz, true /* lock_already_taken */);
561
562
up(&ctx->hid->driver_input_lock);
563
564
return ret;
565
}
566
__bpf_kfunc_end_defs();
567
568
/*
569
* The following set contains all functions we agree BPF programs
570
* can use.
571
*/
572
BTF_KFUNCS_START(hid_bpf_kfunc_ids)
573
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
574
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
575
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
576
BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
577
BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
578
BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
579
BTF_ID_FLAGS(func, hid_bpf_try_input_report)
580
BTF_KFUNCS_END(hid_bpf_kfunc_ids)
581
582
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
583
.owner = THIS_MODULE,
584
.set = &hid_bpf_kfunc_ids,
585
};
586
587
/* for syscall HID-BPF */
588
BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
589
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
590
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
591
BTF_ID_FLAGS(func, hid_bpf_hw_request)
592
BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
593
BTF_ID_FLAGS(func, hid_bpf_input_report)
594
BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
595
596
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
597
.owner = THIS_MODULE,
598
.set = &hid_bpf_syscall_kfunc_ids,
599
};
600
601
int hid_bpf_connect_device(struct hid_device *hdev)
602
{
603
bool need_to_allocate = false;
604
struct hid_bpf_ops *e;
605
606
rcu_read_lock();
607
list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
608
if (e->hid_device_event) {
609
need_to_allocate = true;
610
break;
611
}
612
}
613
rcu_read_unlock();
614
615
/* only allocate BPF data if there are programs attached */
616
if (!need_to_allocate)
617
return 0;
618
619
return hid_bpf_allocate_event_data(hdev);
620
}
621
EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
622
623
void hid_bpf_disconnect_device(struct hid_device *hdev)
624
{
625
kfree(hdev->bpf.device_data);
626
hdev->bpf.device_data = NULL;
627
hdev->bpf.allocated_data = 0;
628
}
629
EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
630
631
void hid_bpf_destroy_device(struct hid_device *hdev)
632
{
633
if (!hdev)
634
return;
635
636
/* mark the device as destroyed in bpf so we don't reattach it */
637
hdev->bpf.destroyed = true;
638
639
__hid_bpf_ops_destroy_device(hdev);
640
641
synchronize_srcu(&hdev->bpf.srcu);
642
cleanup_srcu_struct(&hdev->bpf.srcu);
643
}
644
EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
645
646
int hid_bpf_device_init(struct hid_device *hdev)
647
{
648
INIT_LIST_HEAD(&hdev->bpf.prog_list);
649
mutex_init(&hdev->bpf.prog_list_lock);
650
return init_srcu_struct(&hdev->bpf.srcu);
651
}
652
EXPORT_SYMBOL_GPL(hid_bpf_device_init);
653
654
static int __init hid_bpf_init(void)
655
{
656
int err;
657
658
/* Note: if we exit with an error any time here, we would entirely break HID, which
659
* is probably not something we want. So we log an error and return success.
660
*
661
* This is not a big deal: nobody will be able to use the functionality.
662
*/
663
664
err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
665
if (err) {
666
pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
667
return 0;
668
}
669
670
err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
671
if (err) {
672
pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
673
return 0;
674
}
675
676
return 0;
677
}
678
679
late_initcall(hid_bpf_init);
680
MODULE_AUTHOR("Benjamin Tissoires");
681
MODULE_LICENSE("GPL");
682
683