Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpio/gpio-aggregator.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
//
3
// GPIO Aggregator
4
//
5
// Copyright (C) 2019-2020 Glider bv
6
7
#define DRV_NAME "gpio-aggregator"
8
#define pr_fmt(fmt) DRV_NAME ": " fmt
9
10
#include <linux/bitmap.h>
11
#include <linux/bitops.h>
12
#include <linux/configfs.h>
13
#include <linux/ctype.h>
14
#include <linux/delay.h>
15
#include <linux/idr.h>
16
#include <linux/kernel.h>
17
#include <linux/list.h>
18
#include <linux/lockdep.h>
19
#include <linux/mod_devicetable.h>
20
#include <linux/module.h>
21
#include <linux/mutex.h>
22
#include <linux/overflow.h>
23
#include <linux/platform_device.h>
24
#include <linux/property.h>
25
#include <linux/slab.h>
26
#include <linux/spinlock.h>
27
#include <linux/string.h>
28
29
#include <linux/gpio/consumer.h>
30
#include <linux/gpio/driver.h>
31
#include <linux/gpio/machine.h>
32
33
#include "dev-sync-probe.h"
34
35
#define AGGREGATOR_MAX_GPIOS 512
36
#define AGGREGATOR_LEGACY_PREFIX "_sysfs"
37
38
/*
39
* GPIO Aggregator sysfs interface
40
*/
41
42
struct gpio_aggregator {
43
struct dev_sync_probe_data probe_data;
44
struct config_group group;
45
struct gpiod_lookup_table *lookups;
46
struct mutex lock;
47
int id;
48
49
/* List of gpio_aggregator_line. Always added in order */
50
struct list_head list_head;
51
52
/* used by legacy sysfs interface only */
53
bool init_via_sysfs;
54
char args[];
55
};
56
57
struct gpio_aggregator_line {
58
struct config_group group;
59
struct gpio_aggregator *parent;
60
struct list_head entry;
61
62
/* Line index within the aggregator device */
63
unsigned int idx;
64
65
/* Custom name for the virtual line */
66
const char *name;
67
/* GPIO chip label or line name */
68
const char *key;
69
/* Can be negative to indicate lookup by line name */
70
int offset;
71
72
enum gpio_lookup_flags flags;
73
};
74
75
struct gpio_aggregator_pdev_meta {
76
bool init_via_sysfs;
77
};
78
79
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
80
static DEFINE_IDR(gpio_aggregator_idr);
81
82
static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
83
{
84
int ret;
85
86
struct gpio_aggregator *new __free(kfree) = kzalloc(
87
sizeof(*new) + arg_size, GFP_KERNEL);
88
if (!new)
89
return -ENOMEM;
90
91
scoped_guard(mutex, &gpio_aggregator_lock)
92
ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
93
94
if (ret < 0)
95
return ret;
96
97
new->id = ret;
98
INIT_LIST_HEAD(&new->list_head);
99
mutex_init(&new->lock);
100
*aggr = no_free_ptr(new);
101
return 0;
102
}
103
104
static void gpio_aggregator_free(struct gpio_aggregator *aggr)
105
{
106
scoped_guard(mutex, &gpio_aggregator_lock)
107
idr_remove(&gpio_aggregator_idr, aggr->id);
108
109
mutex_destroy(&aggr->lock);
110
kfree(aggr);
111
}
112
113
static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
114
const char *key, int hwnum, unsigned int *n)
115
{
116
struct gpiod_lookup_table *lookups;
117
118
lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
119
GFP_KERNEL);
120
if (!lookups)
121
return -ENOMEM;
122
123
lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
124
125
(*n)++;
126
memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
127
128
aggr->lookups = lookups;
129
return 0;
130
}
131
132
static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
133
{
134
lockdep_assert_held(&aggr->lock);
135
136
return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
137
}
138
139
/* Only aggregators created via legacy sysfs can be "activating". */
140
static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
141
{
142
lockdep_assert_held(&aggr->lock);
143
144
return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
145
}
146
147
static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
148
{
149
lockdep_assert_held(&aggr->lock);
150
151
return list_count_nodes(&aggr->list_head);
152
}
153
154
static struct gpio_aggregator_line *
155
gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
156
char *key, int offset)
157
{
158
struct gpio_aggregator_line *line;
159
160
line = kzalloc(sizeof(*line), GFP_KERNEL);
161
if (!line)
162
return ERR_PTR(-ENOMEM);
163
164
if (key) {
165
line->key = kstrdup(key, GFP_KERNEL);
166
if (!line->key) {
167
kfree(line);
168
return ERR_PTR(-ENOMEM);
169
}
170
}
171
172
line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
173
line->parent = parent;
174
line->idx = idx;
175
line->offset = offset;
176
INIT_LIST_HEAD(&line->entry);
177
178
return line;
179
}
180
181
static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
182
struct gpio_aggregator_line *line)
183
{
184
struct gpio_aggregator_line *tmp;
185
186
lockdep_assert_held(&aggr->lock);
187
188
list_for_each_entry(tmp, &aggr->list_head, entry) {
189
if (tmp->idx > line->idx) {
190
list_add_tail(&line->entry, &tmp->entry);
191
return;
192
}
193
}
194
list_add_tail(&line->entry, &aggr->list_head);
195
}
196
197
static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
198
struct gpio_aggregator_line *line)
199
{
200
lockdep_assert_held(&aggr->lock);
201
202
list_del(&line->entry);
203
}
204
205
static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
206
{
207
struct gpio_aggregator_line *line, *tmp;
208
209
list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
210
configfs_unregister_group(&line->group);
211
/*
212
* Normally, we acquire aggr->lock within the configfs
213
* callback. However, in the legacy sysfs interface case,
214
* calling configfs_(un)register_group while holding
215
* aggr->lock could cause a deadlock. Fortunately, this is
216
* unnecessary because the new_device/delete_device path
217
* and the module unload path are mutually exclusive,
218
* thanks to an explicit try_module_get. That's why this
219
* minimal scoped_guard suffices.
220
*/
221
scoped_guard(mutex, &aggr->lock)
222
gpio_aggregator_line_del(aggr, line);
223
kfree(line->key);
224
kfree(line->name);
225
kfree(line);
226
}
227
}
228
229
230
/*
231
* GPIO Forwarder
232
*/
233
234
struct gpiochip_fwd_timing {
235
u32 ramp_up_us;
236
u32 ramp_down_us;
237
};
238
239
struct gpiochip_fwd {
240
struct gpio_chip chip;
241
struct gpio_desc **descs;
242
union {
243
struct mutex mlock; /* protects tmp[] if can_sleep */
244
spinlock_t slock; /* protects tmp[] if !can_sleep */
245
};
246
struct gpiochip_fwd_timing *delay_timings;
247
unsigned long tmp[]; /* values and descs for multiple ops */
248
};
249
250
#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
251
#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
252
253
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
254
255
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
256
{
257
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
258
259
return gpiod_get_direction(fwd->descs[offset]);
260
}
261
262
static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
263
{
264
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
265
266
return gpiod_direction_input(fwd->descs[offset]);
267
}
268
269
static int gpio_fwd_direction_output(struct gpio_chip *chip,
270
unsigned int offset, int value)
271
{
272
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
273
274
return gpiod_direction_output(fwd->descs[offset], value);
275
}
276
277
static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
278
{
279
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
280
281
return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
282
: gpiod_get_value(fwd->descs[offset]);
283
}
284
285
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
286
unsigned long *bits)
287
{
288
struct gpio_desc **descs = fwd_tmp_descs(fwd);
289
unsigned long *values = fwd_tmp_values(fwd);
290
unsigned int i, j = 0;
291
int error;
292
293
bitmap_clear(values, 0, fwd->chip.ngpio);
294
for_each_set_bit(i, mask, fwd->chip.ngpio)
295
descs[j++] = fwd->descs[i];
296
297
if (fwd->chip.can_sleep)
298
error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
299
else
300
error = gpiod_get_array_value(j, descs, NULL, values);
301
if (error)
302
return error;
303
304
j = 0;
305
for_each_set_bit(i, mask, fwd->chip.ngpio)
306
__assign_bit(i, bits, test_bit(j++, values));
307
308
return 0;
309
}
310
311
static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip,
312
unsigned long *mask, unsigned long *bits)
313
{
314
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
315
unsigned long flags;
316
int error;
317
318
if (chip->can_sleep) {
319
mutex_lock(&fwd->mlock);
320
error = gpio_fwd_get_multiple(fwd, mask, bits);
321
mutex_unlock(&fwd->mlock);
322
} else {
323
spin_lock_irqsave(&fwd->slock, flags);
324
error = gpio_fwd_get_multiple(fwd, mask, bits);
325
spin_unlock_irqrestore(&fwd->slock, flags);
326
}
327
328
return error;
329
}
330
331
static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value)
332
{
333
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
334
const struct gpiochip_fwd_timing *delay_timings;
335
bool is_active_low = gpiod_is_active_low(fwd->descs[offset]);
336
u32 delay_us;
337
338
delay_timings = &fwd->delay_timings[offset];
339
if ((!is_active_low && value) || (is_active_low && !value))
340
delay_us = delay_timings->ramp_up_us;
341
else
342
delay_us = delay_timings->ramp_down_us;
343
if (!delay_us)
344
return;
345
346
if (chip->can_sleep)
347
fsleep(delay_us);
348
else
349
udelay(delay_us);
350
}
351
352
static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
353
{
354
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
355
int ret;
356
357
if (chip->can_sleep)
358
ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
359
else
360
ret = gpiod_set_value(fwd->descs[offset], value);
361
if (ret)
362
return ret;
363
364
if (fwd->delay_timings)
365
gpio_fwd_delay(chip, offset, value);
366
367
return ret;
368
}
369
370
static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
371
unsigned long *bits)
372
{
373
struct gpio_desc **descs = fwd_tmp_descs(fwd);
374
unsigned long *values = fwd_tmp_values(fwd);
375
unsigned int i, j = 0, ret;
376
377
for_each_set_bit(i, mask, fwd->chip.ngpio) {
378
__assign_bit(j, values, test_bit(i, bits));
379
descs[j++] = fwd->descs[i];
380
}
381
382
if (fwd->chip.can_sleep)
383
ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
384
else
385
ret = gpiod_set_array_value(j, descs, NULL, values);
386
387
return ret;
388
}
389
390
static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
391
unsigned long *mask, unsigned long *bits)
392
{
393
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
394
unsigned long flags;
395
int ret;
396
397
if (chip->can_sleep) {
398
mutex_lock(&fwd->mlock);
399
ret = gpio_fwd_set_multiple(fwd, mask, bits);
400
mutex_unlock(&fwd->mlock);
401
} else {
402
spin_lock_irqsave(&fwd->slock, flags);
403
ret = gpio_fwd_set_multiple(fwd, mask, bits);
404
spin_unlock_irqrestore(&fwd->slock, flags);
405
}
406
407
return ret;
408
}
409
410
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
411
unsigned long config)
412
{
413
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
414
415
return gpiod_set_config(fwd->descs[offset], config);
416
}
417
418
static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset)
419
{
420
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
421
422
return gpiod_to_irq(fwd->descs[offset]);
423
}
424
425
/*
426
* The GPIO delay provides a way to configure platform specific delays
427
* for the GPIO ramp-up or ramp-down delays. This can serve the following
428
* purposes:
429
* - Open-drain output using an RC filter
430
*/
431
#define FWD_FEATURE_DELAY BIT(0)
432
433
#ifdef CONFIG_OF_GPIO
434
static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
435
const struct of_phandle_args *gpiospec,
436
u32 *flags)
437
{
438
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
439
struct gpiochip_fwd_timing *timings;
440
u32 line;
441
442
if (gpiospec->args_count != chip->of_gpio_n_cells)
443
return -EINVAL;
444
445
line = gpiospec->args[0];
446
if (line >= chip->ngpio)
447
return -EINVAL;
448
449
timings = &fwd->delay_timings[line];
450
timings->ramp_up_us = gpiospec->args[1];
451
timings->ramp_down_us = gpiospec->args[2];
452
453
return line;
454
}
455
456
static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
457
struct gpiochip_fwd *fwd)
458
{
459
fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
460
sizeof(*fwd->delay_timings),
461
GFP_KERNEL);
462
if (!fwd->delay_timings)
463
return -ENOMEM;
464
465
chip->of_xlate = gpiochip_fwd_delay_of_xlate;
466
chip->of_gpio_n_cells = 3;
467
468
return 0;
469
}
470
#else
471
static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
472
struct gpiochip_fwd *fwd)
473
{
474
return 0;
475
}
476
#endif /* !CONFIG_OF_GPIO */
477
478
/**
479
* gpiochip_fwd_create() - Create a new GPIO forwarder
480
* @dev: Parent device pointer
481
* @ngpios: Number of GPIOs in the forwarder.
482
* @descs: Array containing the GPIO descriptors to forward to.
483
* This array must contain @ngpios entries, and must not be deallocated
484
* before the forwarder has been destroyed again.
485
* @features: Bitwise ORed features as defined with FWD_FEATURE_*.
486
*
487
* This function creates a new gpiochip, which forwards all GPIO operations to
488
* the passed GPIO descriptors.
489
*
490
* Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
491
* code on failure.
492
*/
493
static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
494
unsigned int ngpios,
495
struct gpio_desc *descs[],
496
unsigned long features)
497
{
498
const char *label = dev_name(dev);
499
struct gpiochip_fwd *fwd;
500
struct gpio_chip *chip;
501
unsigned int i;
502
int error;
503
504
fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
505
GFP_KERNEL);
506
if (!fwd)
507
return ERR_PTR(-ENOMEM);
508
509
chip = &fwd->chip;
510
511
/*
512
* If any of the GPIO lines are sleeping, then the entire forwarder
513
* will be sleeping.
514
* If any of the chips support .set_config(), then the forwarder will
515
* support setting configs.
516
*/
517
for (i = 0; i < ngpios; i++) {
518
struct gpio_chip *parent = gpiod_to_chip(descs[i]);
519
520
dev_dbg(dev, "%u => gpio %d irq %d\n", i,
521
desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
522
523
if (gpiod_cansleep(descs[i]))
524
chip->can_sleep = true;
525
if (parent && parent->set_config)
526
chip->set_config = gpio_fwd_set_config;
527
}
528
529
chip->label = label;
530
chip->parent = dev;
531
chip->owner = THIS_MODULE;
532
chip->get_direction = gpio_fwd_get_direction;
533
chip->direction_input = gpio_fwd_direction_input;
534
chip->direction_output = gpio_fwd_direction_output;
535
chip->get = gpio_fwd_get;
536
chip->get_multiple = gpio_fwd_get_multiple_locked;
537
chip->set = gpio_fwd_set;
538
chip->set_multiple = gpio_fwd_set_multiple_locked;
539
chip->to_irq = gpio_fwd_to_irq;
540
chip->base = -1;
541
chip->ngpio = ngpios;
542
fwd->descs = descs;
543
544
if (chip->can_sleep)
545
mutex_init(&fwd->mlock);
546
else
547
spin_lock_init(&fwd->slock);
548
549
if (features & FWD_FEATURE_DELAY) {
550
error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
551
if (error)
552
return ERR_PTR(error);
553
}
554
555
error = devm_gpiochip_add_data(dev, chip, fwd);
556
if (error)
557
return ERR_PTR(error);
558
559
return fwd;
560
}
561
562
/*
563
* Configfs interface
564
*/
565
566
static struct gpio_aggregator *
567
to_gpio_aggregator(struct config_item *item)
568
{
569
struct config_group *group = to_config_group(item);
570
571
return container_of(group, struct gpio_aggregator, group);
572
}
573
574
static struct gpio_aggregator_line *
575
to_gpio_aggregator_line(struct config_item *item)
576
{
577
struct config_group *group = to_config_group(item);
578
579
return container_of(group, struct gpio_aggregator_line, group);
580
}
581
582
static struct fwnode_handle *
583
gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
584
{
585
struct property_entry properties[2];
586
struct gpio_aggregator_line *line;
587
size_t num_lines;
588
int n = 0;
589
590
memset(properties, 0, sizeof(properties));
591
592
num_lines = gpio_aggregator_count_lines(aggr);
593
if (num_lines == 0)
594
return NULL;
595
596
const char **line_names __free(kfree) = kcalloc(
597
num_lines, sizeof(*line_names), GFP_KERNEL);
598
if (!line_names)
599
return ERR_PTR(-ENOMEM);
600
601
/* The list is always sorted as new elements are inserted in order. */
602
list_for_each_entry(line, &aggr->list_head, entry)
603
line_names[n++] = line->name ?: "";
604
605
properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
606
"gpio-line-names",
607
line_names, num_lines);
608
609
return fwnode_create_software_node(properties, NULL);
610
}
611
612
static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
613
{
614
struct platform_device_info pdevinfo;
615
struct gpio_aggregator_line *line;
616
struct fwnode_handle *swnode;
617
unsigned int n = 0;
618
int ret = 0;
619
620
if (gpio_aggregator_count_lines(aggr) == 0)
621
return -EINVAL;
622
623
aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
624
GFP_KERNEL);
625
if (!aggr->lookups)
626
return -ENOMEM;
627
628
swnode = gpio_aggregator_make_device_sw_node(aggr);
629
if (IS_ERR(swnode)) {
630
ret = PTR_ERR(swnode);
631
goto err_remove_lookups;
632
}
633
634
memset(&pdevinfo, 0, sizeof(pdevinfo));
635
pdevinfo.name = DRV_NAME;
636
pdevinfo.id = aggr->id;
637
pdevinfo.fwnode = swnode;
638
639
/* The list is always sorted as new elements are inserted in order. */
640
list_for_each_entry(line, &aggr->list_head, entry) {
641
/*
642
* - Either GPIO chip label or line name must be configured
643
* (i.e. line->key must be non-NULL)
644
* - Line directories must be named with sequential numeric
645
* suffixes starting from 0. (i.e. ./line0, ./line1, ...)
646
*/
647
if (!line->key || line->idx != n) {
648
ret = -EINVAL;
649
goto err_remove_swnode;
650
}
651
652
if (line->offset < 0)
653
ret = gpio_aggregator_add_gpio(aggr, line->key,
654
U16_MAX, &n);
655
else
656
ret = gpio_aggregator_add_gpio(aggr, line->key,
657
line->offset, &n);
658
if (ret)
659
goto err_remove_swnode;
660
}
661
662
aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
663
if (!aggr->lookups->dev_id) {
664
ret = -ENOMEM;
665
goto err_remove_swnode;
666
}
667
668
gpiod_add_lookup_table(aggr->lookups);
669
670
ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
671
if (ret)
672
goto err_remove_lookup_table;
673
674
return 0;
675
676
err_remove_lookup_table:
677
kfree(aggr->lookups->dev_id);
678
gpiod_remove_lookup_table(aggr->lookups);
679
err_remove_swnode:
680
fwnode_remove_software_node(swnode);
681
err_remove_lookups:
682
kfree(aggr->lookups);
683
684
return ret;
685
}
686
687
static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
688
{
689
dev_sync_probe_unregister(&aggr->probe_data);
690
gpiod_remove_lookup_table(aggr->lookups);
691
kfree(aggr->lookups->dev_id);
692
kfree(aggr->lookups);
693
}
694
695
static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
696
bool lock)
697
{
698
struct configfs_subsystem *subsys = aggr->group.cg_subsys;
699
struct gpio_aggregator_line *line;
700
701
/*
702
* The device only needs to depend on leaf lines. This is
703
* sufficient to lock up all the configfs entries that the
704
* instantiated, alive device depends on.
705
*/
706
list_for_each_entry(line, &aggr->list_head, entry) {
707
if (lock)
708
configfs_depend_item_unlocked(
709
subsys, &line->group.cg_item);
710
else
711
configfs_undepend_item_unlocked(
712
&line->group.cg_item);
713
}
714
}
715
716
static ssize_t
717
gpio_aggregator_line_key_show(struct config_item *item, char *page)
718
{
719
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
720
struct gpio_aggregator *aggr = line->parent;
721
722
guard(mutex)(&aggr->lock);
723
724
return sysfs_emit(page, "%s\n", line->key ?: "");
725
}
726
727
static ssize_t
728
gpio_aggregator_line_key_store(struct config_item *item, const char *page,
729
size_t count)
730
{
731
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
732
struct gpio_aggregator *aggr = line->parent;
733
734
char *key __free(kfree) = kstrndup(skip_spaces(page), count,
735
GFP_KERNEL);
736
if (!key)
737
return -ENOMEM;
738
739
strim(key);
740
741
guard(mutex)(&aggr->lock);
742
743
if (gpio_aggregator_is_activating(aggr) ||
744
gpio_aggregator_is_active(aggr))
745
return -EBUSY;
746
747
kfree(line->key);
748
line->key = no_free_ptr(key);
749
750
return count;
751
}
752
CONFIGFS_ATTR(gpio_aggregator_line_, key);
753
754
static ssize_t
755
gpio_aggregator_line_name_show(struct config_item *item, char *page)
756
{
757
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
758
struct gpio_aggregator *aggr = line->parent;
759
760
guard(mutex)(&aggr->lock);
761
762
return sysfs_emit(page, "%s\n", line->name ?: "");
763
}
764
765
static ssize_t
766
gpio_aggregator_line_name_store(struct config_item *item, const char *page,
767
size_t count)
768
{
769
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
770
struct gpio_aggregator *aggr = line->parent;
771
772
char *name __free(kfree) = kstrndup(skip_spaces(page), count,
773
GFP_KERNEL);
774
if (!name)
775
return -ENOMEM;
776
777
strim(name);
778
779
guard(mutex)(&aggr->lock);
780
781
if (gpio_aggregator_is_activating(aggr) ||
782
gpio_aggregator_is_active(aggr))
783
return -EBUSY;
784
785
kfree(line->name);
786
line->name = no_free_ptr(name);
787
788
return count;
789
}
790
CONFIGFS_ATTR(gpio_aggregator_line_, name);
791
792
static ssize_t
793
gpio_aggregator_line_offset_show(struct config_item *item, char *page)
794
{
795
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
796
struct gpio_aggregator *aggr = line->parent;
797
798
guard(mutex)(&aggr->lock);
799
800
return sysfs_emit(page, "%d\n", line->offset);
801
}
802
803
static ssize_t
804
gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
805
size_t count)
806
{
807
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
808
struct gpio_aggregator *aggr = line->parent;
809
int offset, ret;
810
811
ret = kstrtoint(page, 0, &offset);
812
if (ret)
813
return ret;
814
815
/*
816
* When offset == -1, 'key' represents a line name to lookup.
817
* When 0 <= offset < 65535, 'key' represents the label of the chip with
818
* the 'offset' value representing the line within that chip.
819
*
820
* GPIOLIB uses the U16_MAX value to indicate lookup by line name so
821
* the greatest offset we can accept is (U16_MAX - 1).
822
*/
823
if (offset > (U16_MAX - 1) || offset < -1)
824
return -EINVAL;
825
826
guard(mutex)(&aggr->lock);
827
828
if (gpio_aggregator_is_activating(aggr) ||
829
gpio_aggregator_is_active(aggr))
830
return -EBUSY;
831
832
line->offset = offset;
833
834
return count;
835
}
836
CONFIGFS_ATTR(gpio_aggregator_line_, offset);
837
838
static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
839
&gpio_aggregator_line_attr_key,
840
&gpio_aggregator_line_attr_name,
841
&gpio_aggregator_line_attr_offset,
842
NULL
843
};
844
845
static ssize_t
846
gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
847
{
848
struct gpio_aggregator *aggr = to_gpio_aggregator(item);
849
struct platform_device *pdev;
850
851
guard(mutex)(&aggr->lock);
852
853
pdev = aggr->probe_data.pdev;
854
if (pdev)
855
return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
856
857
return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
858
}
859
CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
860
861
static ssize_t
862
gpio_aggregator_device_live_show(struct config_item *item, char *page)
863
{
864
struct gpio_aggregator *aggr = to_gpio_aggregator(item);
865
866
guard(mutex)(&aggr->lock);
867
868
return sysfs_emit(page, "%c\n",
869
gpio_aggregator_is_active(aggr) ? '1' : '0');
870
}
871
872
static ssize_t
873
gpio_aggregator_device_live_store(struct config_item *item, const char *page,
874
size_t count)
875
{
876
struct gpio_aggregator *aggr = to_gpio_aggregator(item);
877
int ret = 0;
878
bool live;
879
880
ret = kstrtobool(page, &live);
881
if (ret)
882
return ret;
883
884
if (!try_module_get(THIS_MODULE))
885
return -ENOENT;
886
887
if (live && !aggr->init_via_sysfs)
888
gpio_aggregator_lockup_configfs(aggr, true);
889
890
scoped_guard(mutex, &aggr->lock) {
891
if (gpio_aggregator_is_activating(aggr) ||
892
(live == gpio_aggregator_is_active(aggr)))
893
ret = -EPERM;
894
else if (live)
895
ret = gpio_aggregator_activate(aggr);
896
else
897
gpio_aggregator_deactivate(aggr);
898
}
899
900
/*
901
* Undepend is required only if device disablement (live == 0)
902
* succeeds or if device enablement (live == 1) fails.
903
*/
904
if (live == !!ret && !aggr->init_via_sysfs)
905
gpio_aggregator_lockup_configfs(aggr, false);
906
907
module_put(THIS_MODULE);
908
909
return ret ?: count;
910
}
911
CONFIGFS_ATTR(gpio_aggregator_device_, live);
912
913
static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
914
&gpio_aggregator_device_attr_dev_name,
915
&gpio_aggregator_device_attr_live,
916
NULL
917
};
918
919
static void
920
gpio_aggregator_line_release(struct config_item *item)
921
{
922
struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
923
struct gpio_aggregator *aggr = line->parent;
924
925
guard(mutex)(&aggr->lock);
926
927
gpio_aggregator_line_del(aggr, line);
928
kfree(line->key);
929
kfree(line->name);
930
kfree(line);
931
}
932
933
static struct configfs_item_operations gpio_aggregator_line_item_ops = {
934
.release = gpio_aggregator_line_release,
935
};
936
937
static const struct config_item_type gpio_aggregator_line_type = {
938
.ct_item_ops = &gpio_aggregator_line_item_ops,
939
.ct_attrs = gpio_aggregator_line_attrs,
940
.ct_owner = THIS_MODULE,
941
};
942
943
static void gpio_aggregator_device_release(struct config_item *item)
944
{
945
struct gpio_aggregator *aggr = to_gpio_aggregator(item);
946
947
/*
948
* At this point, aggr is neither active nor activating,
949
* so calling gpio_aggregator_deactivate() is always unnecessary.
950
*/
951
gpio_aggregator_free(aggr);
952
}
953
954
static struct configfs_item_operations gpio_aggregator_device_item_ops = {
955
.release = gpio_aggregator_device_release,
956
};
957
958
static struct config_group *
959
gpio_aggregator_device_make_group(struct config_group *group, const char *name)
960
{
961
struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
962
struct gpio_aggregator_line *line;
963
unsigned int idx;
964
int ret, nchar;
965
966
ret = sscanf(name, "line%u%n", &idx, &nchar);
967
if (ret != 1 || nchar != strlen(name))
968
return ERR_PTR(-EINVAL);
969
970
if (aggr->init_via_sysfs)
971
/*
972
* Aggregators created via legacy sysfs interface are exposed as
973
* default groups, which means rmdir(2) is prohibited for them.
974
* For simplicity, and to avoid confusion, we also prohibit
975
* mkdir(2).
976
*/
977
return ERR_PTR(-EPERM);
978
979
guard(mutex)(&aggr->lock);
980
981
if (gpio_aggregator_is_active(aggr))
982
return ERR_PTR(-EBUSY);
983
984
list_for_each_entry(line, &aggr->list_head, entry)
985
if (line->idx == idx)
986
return ERR_PTR(-EINVAL);
987
988
line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
989
if (IS_ERR(line))
990
return ERR_CAST(line);
991
992
config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
993
994
gpio_aggregator_line_add(aggr, line);
995
996
return &line->group;
997
}
998
999
static struct configfs_group_operations gpio_aggregator_device_group_ops = {
1000
.make_group = gpio_aggregator_device_make_group,
1001
};
1002
1003
static const struct config_item_type gpio_aggregator_device_type = {
1004
.ct_group_ops = &gpio_aggregator_device_group_ops,
1005
.ct_item_ops = &gpio_aggregator_device_item_ops,
1006
.ct_attrs = gpio_aggregator_device_attrs,
1007
.ct_owner = THIS_MODULE,
1008
};
1009
1010
static struct config_group *
1011
gpio_aggregator_make_group(struct config_group *group, const char *name)
1012
{
1013
struct gpio_aggregator *aggr;
1014
int ret;
1015
1016
/*
1017
* "_sysfs" prefix is reserved for auto-generated config group
1018
* for devices create via legacy sysfs interface.
1019
*/
1020
if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
1021
sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
1022
return ERR_PTR(-EINVAL);
1023
1024
/* arg space is unneeded */
1025
ret = gpio_aggregator_alloc(&aggr, 0);
1026
if (ret)
1027
return ERR_PTR(ret);
1028
1029
config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1030
dev_sync_probe_init(&aggr->probe_data);
1031
1032
return &aggr->group;
1033
}
1034
1035
static struct configfs_group_operations gpio_aggregator_group_ops = {
1036
.make_group = gpio_aggregator_make_group,
1037
};
1038
1039
static const struct config_item_type gpio_aggregator_type = {
1040
.ct_group_ops = &gpio_aggregator_group_ops,
1041
.ct_owner = THIS_MODULE,
1042
};
1043
1044
static struct configfs_subsystem gpio_aggregator_subsys = {
1045
.su_group = {
1046
.cg_item = {
1047
.ci_namebuf = DRV_NAME,
1048
.ci_type = &gpio_aggregator_type,
1049
},
1050
},
1051
};
1052
1053
/*
1054
* Sysfs interface
1055
*/
1056
static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
1057
{
1058
char *args = skip_spaces(aggr->args);
1059
struct gpio_aggregator_line *line;
1060
char name[CONFIGFS_ITEM_NAME_LEN];
1061
char *key, *offsets, *p;
1062
unsigned int i, n = 0;
1063
int error = 0;
1064
1065
unsigned long *bitmap __free(bitmap) =
1066
bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
1067
if (!bitmap)
1068
return -ENOMEM;
1069
1070
args = next_arg(args, &key, &p);
1071
while (*args) {
1072
args = next_arg(args, &offsets, &p);
1073
1074
p = get_options(offsets, 0, &error);
1075
if (error == 0 || *p) {
1076
/* Named GPIO line */
1077
scnprintf(name, sizeof(name), "line%u", n);
1078
line = gpio_aggregator_line_alloc(aggr, n, key, -1);
1079
if (IS_ERR(line)) {
1080
error = PTR_ERR(line);
1081
goto err;
1082
}
1083
config_group_init_type_name(&line->group, name,
1084
&gpio_aggregator_line_type);
1085
error = configfs_register_group(&aggr->group,
1086
&line->group);
1087
if (error)
1088
goto err;
1089
scoped_guard(mutex, &aggr->lock)
1090
gpio_aggregator_line_add(aggr, line);
1091
1092
error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
1093
if (error)
1094
goto err;
1095
1096
key = offsets;
1097
continue;
1098
}
1099
1100
/* GPIO chip + offset(s) */
1101
error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
1102
if (error) {
1103
pr_err("Cannot parse %s: %d\n", offsets, error);
1104
goto err;
1105
}
1106
1107
for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
1108
scnprintf(name, sizeof(name), "line%u", n);
1109
line = gpio_aggregator_line_alloc(aggr, n, key, i);
1110
if (IS_ERR(line)) {
1111
error = PTR_ERR(line);
1112
goto err;
1113
}
1114
config_group_init_type_name(&line->group, name,
1115
&gpio_aggregator_line_type);
1116
error = configfs_register_group(&aggr->group,
1117
&line->group);
1118
if (error)
1119
goto err;
1120
scoped_guard(mutex, &aggr->lock)
1121
gpio_aggregator_line_add(aggr, line);
1122
1123
error = gpio_aggregator_add_gpio(aggr, key, i, &n);
1124
if (error)
1125
goto err;
1126
}
1127
1128
args = next_arg(args, &key, &p);
1129
}
1130
1131
if (!n) {
1132
pr_err("No GPIOs specified\n");
1133
error = -EINVAL;
1134
goto err;
1135
}
1136
1137
return 0;
1138
1139
err:
1140
gpio_aggregator_free_lines(aggr);
1141
return error;
1142
}
1143
1144
static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
1145
const char *buf, size_t count)
1146
{
1147
struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
1148
char name[CONFIGFS_ITEM_NAME_LEN];
1149
struct gpio_aggregator *aggr;
1150
struct platform_device *pdev;
1151
int res;
1152
1153
if (!try_module_get(THIS_MODULE))
1154
return -ENOENT;
1155
1156
/* kernfs guarantees string termination, so count + 1 is safe */
1157
res = gpio_aggregator_alloc(&aggr, count + 1);
1158
if (res)
1159
goto put_module;
1160
1161
memcpy(aggr->args, buf, count + 1);
1162
1163
aggr->init_via_sysfs = true;
1164
aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
1165
GFP_KERNEL);
1166
if (!aggr->lookups) {
1167
res = -ENOMEM;
1168
goto free_ga;
1169
}
1170
1171
aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
1172
if (!aggr->lookups->dev_id) {
1173
res = -ENOMEM;
1174
goto free_table;
1175
}
1176
1177
scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
1178
config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
1179
1180
/*
1181
* Since the device created by sysfs might be toggled via configfs
1182
* 'live' attribute later, this initialization is needed.
1183
*/
1184
dev_sync_probe_init(&aggr->probe_data);
1185
1186
/* Expose to configfs */
1187
res = configfs_register_group(&gpio_aggregator_subsys.su_group,
1188
&aggr->group);
1189
if (res)
1190
goto free_dev_id;
1191
1192
res = gpio_aggregator_parse(aggr);
1193
if (res)
1194
goto unregister_group;
1195
1196
gpiod_add_lookup_table(aggr->lookups);
1197
1198
pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
1199
if (IS_ERR(pdev)) {
1200
res = PTR_ERR(pdev);
1201
goto remove_table;
1202
}
1203
1204
aggr->probe_data.pdev = pdev;
1205
module_put(THIS_MODULE);
1206
return count;
1207
1208
remove_table:
1209
gpiod_remove_lookup_table(aggr->lookups);
1210
unregister_group:
1211
configfs_unregister_group(&aggr->group);
1212
free_dev_id:
1213
kfree(aggr->lookups->dev_id);
1214
free_table:
1215
kfree(aggr->lookups);
1216
free_ga:
1217
gpio_aggregator_free(aggr);
1218
put_module:
1219
module_put(THIS_MODULE);
1220
return res;
1221
}
1222
1223
static struct driver_attribute driver_attr_gpio_aggregator_new_device =
1224
__ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
1225
1226
static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
1227
{
1228
scoped_guard(mutex, &aggr->lock) {
1229
if (gpio_aggregator_is_activating(aggr) ||
1230
gpio_aggregator_is_active(aggr))
1231
gpio_aggregator_deactivate(aggr);
1232
}
1233
gpio_aggregator_free_lines(aggr);
1234
configfs_unregister_group(&aggr->group);
1235
kfree(aggr);
1236
}
1237
1238
static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
1239
const char *buf, size_t count)
1240
{
1241
struct gpio_aggregator *aggr;
1242
unsigned int id;
1243
int error;
1244
1245
if (!str_has_prefix(buf, DRV_NAME "."))
1246
return -EINVAL;
1247
1248
error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
1249
if (error)
1250
return error;
1251
1252
if (!try_module_get(THIS_MODULE))
1253
return -ENOENT;
1254
1255
mutex_lock(&gpio_aggregator_lock);
1256
aggr = idr_find(&gpio_aggregator_idr, id);
1257
/*
1258
* For simplicity, devices created via configfs cannot be deleted
1259
* via sysfs.
1260
*/
1261
if (aggr && aggr->init_via_sysfs)
1262
idr_remove(&gpio_aggregator_idr, id);
1263
else {
1264
mutex_unlock(&gpio_aggregator_lock);
1265
module_put(THIS_MODULE);
1266
return -ENOENT;
1267
}
1268
mutex_unlock(&gpio_aggregator_lock);
1269
1270
gpio_aggregator_destroy(aggr);
1271
module_put(THIS_MODULE);
1272
return count;
1273
}
1274
1275
static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
1276
__ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
1277
1278
static struct attribute *gpio_aggregator_attrs[] = {
1279
&driver_attr_gpio_aggregator_new_device.attr,
1280
&driver_attr_gpio_aggregator_delete_device.attr,
1281
NULL
1282
};
1283
ATTRIBUTE_GROUPS(gpio_aggregator);
1284
1285
/*
1286
* GPIO Aggregator platform device
1287
*/
1288
1289
static int gpio_aggregator_probe(struct platform_device *pdev)
1290
{
1291
struct gpio_aggregator_pdev_meta *meta;
1292
struct device *dev = &pdev->dev;
1293
bool init_via_sysfs = false;
1294
struct gpio_desc **descs;
1295
struct gpiochip_fwd *fwd;
1296
unsigned long features;
1297
int i, n;
1298
1299
n = gpiod_count(dev, NULL);
1300
if (n < 0)
1301
return n;
1302
1303
descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
1304
if (!descs)
1305
return -ENOMEM;
1306
1307
meta = dev_get_platdata(&pdev->dev);
1308
if (meta && meta->init_via_sysfs)
1309
init_via_sysfs = true;
1310
1311
for (i = 0; i < n; i++) {
1312
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
1313
if (IS_ERR(descs[i])) {
1314
/*
1315
* Deferred probing is not suitable when the aggregator
1316
* is created via configfs. They should just retry later
1317
* whenever they like. For device creation via sysfs,
1318
* error is propagated without overriding for backward
1319
* compatibility. .prevent_deferred_probe is kept unset
1320
* for other cases.
1321
*/
1322
if (!init_via_sysfs && !dev_of_node(dev) &&
1323
descs[i] == ERR_PTR(-EPROBE_DEFER)) {
1324
pr_warn("Deferred probe canceled for creation via configfs.\n");
1325
return -ENODEV;
1326
}
1327
return PTR_ERR(descs[i]);
1328
}
1329
}
1330
1331
features = (uintptr_t)device_get_match_data(dev);
1332
fwd = gpiochip_fwd_create(dev, n, descs, features);
1333
if (IS_ERR(fwd))
1334
return PTR_ERR(fwd);
1335
1336
platform_set_drvdata(pdev, fwd);
1337
return 0;
1338
}
1339
1340
static const struct of_device_id gpio_aggregator_dt_ids[] = {
1341
{
1342
.compatible = "gpio-delay",
1343
.data = (void *)FWD_FEATURE_DELAY,
1344
},
1345
/*
1346
* Add GPIO-operated devices controlled from userspace below,
1347
* or use "driver_override" in sysfs.
1348
*/
1349
{}
1350
};
1351
MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
1352
1353
static struct platform_driver gpio_aggregator_driver = {
1354
.probe = gpio_aggregator_probe,
1355
.driver = {
1356
.name = DRV_NAME,
1357
.groups = gpio_aggregator_groups,
1358
.of_match_table = gpio_aggregator_dt_ids,
1359
},
1360
};
1361
1362
static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
1363
{
1364
/*
1365
* There should be no aggregator created via configfs, as their
1366
* presence would prevent module unloading.
1367
*/
1368
gpio_aggregator_destroy(p);
1369
return 0;
1370
}
1371
1372
static void __exit gpio_aggregator_remove_all(void)
1373
{
1374
/*
1375
* Configfs callbacks acquire gpio_aggregator_lock when accessing
1376
* gpio_aggregator_idr, so to prevent lock inversion deadlock, we
1377
* cannot protect idr_for_each invocation here with
1378
* gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
1379
* configfs groups. Fortunately, the new_device/delete_device path
1380
* and the module unload path are mutually exclusive, thanks to an
1381
* explicit try_module_get inside of those driver attr handlers.
1382
* Also, when we reach here, no configfs entries present or being
1383
* created. Therefore, no need to protect with gpio_aggregator_lock
1384
* below.
1385
*/
1386
idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
1387
idr_destroy(&gpio_aggregator_idr);
1388
}
1389
1390
static int __init gpio_aggregator_init(void)
1391
{
1392
int ret = 0;
1393
1394
config_group_init(&gpio_aggregator_subsys.su_group);
1395
mutex_init(&gpio_aggregator_subsys.su_mutex);
1396
ret = configfs_register_subsystem(&gpio_aggregator_subsys);
1397
if (ret) {
1398
pr_err("Failed to register the '%s' configfs subsystem: %d\n",
1399
gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
1400
mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1401
return ret;
1402
}
1403
1404
/*
1405
* CAVEAT: This must occur after configfs registration. Otherwise,
1406
* a race condition could arise: driver attribute groups might be
1407
* exposed and accessed by users before configfs registration
1408
* completes. new_device_store() does not expect a partially
1409
* initialized configfs state.
1410
*/
1411
ret = platform_driver_register(&gpio_aggregator_driver);
1412
if (ret) {
1413
pr_err("Failed to register the platform driver: %d\n", ret);
1414
mutex_destroy(&gpio_aggregator_subsys.su_mutex);
1415
configfs_unregister_subsystem(&gpio_aggregator_subsys);
1416
}
1417
1418
return ret;
1419
}
1420
module_init(gpio_aggregator_init);
1421
1422
static void __exit gpio_aggregator_exit(void)
1423
{
1424
gpio_aggregator_remove_all();
1425
platform_driver_unregister(&gpio_aggregator_driver);
1426
configfs_unregister_subsystem(&gpio_aggregator_subsys);
1427
}
1428
module_exit(gpio_aggregator_exit);
1429
1430
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
1431
MODULE_DESCRIPTION("GPIO Aggregator");
1432
MODULE_LICENSE("GPL v2");
1433
1434