Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/drm_bridge.c
49081 views
1
/*
2
* Copyright (c) 2014 Samsung Electronics Co., Ltd
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sub license,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the
12
* next paragraph) shall be included in all copies or substantial portions
13
* of the Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
* DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include <linux/debugfs.h>
25
#include <linux/err.h>
26
#include <linux/export.h>
27
#include <linux/media-bus-format.h>
28
#include <linux/module.h>
29
#include <linux/mutex.h>
30
31
#include <drm/drm_atomic_state_helper.h>
32
#include <drm/drm_bridge.h>
33
#include <drm/drm_debugfs.h>
34
#include <drm/drm_edid.h>
35
#include <drm/drm_encoder.h>
36
#include <drm/drm_file.h>
37
#include <drm/drm_of.h>
38
#include <drm/drm_print.h>
39
40
#include "drm_crtc_internal.h"
41
42
/**
43
* DOC: overview
44
*
45
* &struct drm_bridge represents a device that hangs on to an encoder. These are
46
* handy when a regular &drm_encoder entity isn't enough to represent the entire
47
* encoder chain.
48
*
49
* A bridge is always attached to a single &drm_encoder at a time, but can be
50
* either connected to it directly, or through a chain of bridges::
51
*
52
* [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
53
*
54
* Here, the output of the encoder feeds to bridge A, and that furthers feeds to
55
* bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
56
* Chaining multiple bridges to the output of a bridge, or the same bridge to
57
* the output of different bridges, is not supported.
58
*
59
* &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
60
* CRTCs, encoders or connectors and hence are not visible to userspace. They
61
* just provide additional hooks to get the desired output at the end of the
62
* encoder chain.
63
*/
64
65
/**
66
* DOC: display driver integration
67
*
68
* Display drivers are responsible for linking encoders with the first bridge
69
* in the chains. This is done by acquiring the appropriate bridge with
70
* devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
71
* encoder with a call to drm_bridge_attach().
72
*
73
* Bridges are responsible for linking themselves with the next bridge in the
74
* chain, if any. This is done the same way as for encoders, with the call to
75
* drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
76
*
77
* Once these links are created, the bridges can participate along with encoder
78
* functions to perform mode validation and fixup (through
79
* drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
80
* setting (through drm_bridge_chain_mode_set()), enable (through
81
* drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
82
* and disable (through drm_atomic_bridge_chain_disable() and
83
* drm_atomic_bridge_chain_post_disable()). Those functions call the
84
* corresponding operations provided in &drm_bridge_funcs in sequence for all
85
* bridges in the chain.
86
*
87
* For display drivers that use the atomic helpers
88
* drm_atomic_helper_check_modeset(),
89
* drm_atomic_helper_commit_modeset_enables() and
90
* drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
91
* commit check and commit tail handlers, or through the higher-level
92
* drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
93
* drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
94
* requires no intervention from the driver. For other drivers, the relevant
95
* DRM bridge chain functions shall be called manually.
96
*
97
* Bridges also participate in implementing the &drm_connector at the end of
98
* the bridge chain. Display drivers may use the drm_bridge_connector_init()
99
* helper to create the &drm_connector, or implement it manually on top of the
100
* connector-related operations exposed by the bridge (see the overview
101
* documentation of bridge operations for more details).
102
*/
103
104
/**
105
* DOC: special care dsi
106
*
107
* The interaction between the bridges and other frameworks involved in
108
* the probing of the upstream driver and the bridge driver can be
109
* challenging. Indeed, there's multiple cases that needs to be
110
* considered:
111
*
112
* - The upstream driver doesn't use the component framework and isn't a
113
* MIPI-DSI host. In this case, the bridge driver will probe at some
114
* point and the upstream driver should try to probe again by returning
115
* EPROBE_DEFER as long as the bridge driver hasn't probed.
116
*
117
* - The upstream driver doesn't use the component framework, but is a
118
* MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
119
* controlled. In this case, the bridge device is a child of the
120
* display device and when it will probe it's assured that the display
121
* device (and MIPI-DSI host) is present. The upstream driver will be
122
* assured that the bridge driver is connected between the
123
* &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
124
* Therefore, it must run mipi_dsi_host_register() in its probe
125
* function, and then run drm_bridge_attach() in its
126
* &mipi_dsi_host_ops.attach hook.
127
*
128
* - The upstream driver uses the component framework and is a MIPI-DSI
129
* host. The bridge device uses the MIPI-DCS commands to be
130
* controlled. This is the same situation than above, and can run
131
* mipi_dsi_host_register() in either its probe or bind hooks.
132
*
133
* - The upstream driver uses the component framework and is a MIPI-DSI
134
* host. The bridge device uses a separate bus (such as I2C) to be
135
* controlled. In this case, there's no correlation between the probe
136
* of the bridge and upstream drivers, so care must be taken to avoid
137
* an endless EPROBE_DEFER loop, with each driver waiting for the
138
* other to probe.
139
*
140
* The ideal pattern to cover the last item (and all the others in the
141
* MIPI-DSI host driver case) is to split the operations like this:
142
*
143
* - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
144
* probe hook. It will make sure that the MIPI-DSI host sticks around,
145
* and that the driver's bind can be called.
146
*
147
* - In its probe hook, the bridge driver must try to find its MIPI-DSI
148
* host, register as a MIPI-DSI device and attach the MIPI-DSI device
149
* to its host. The bridge driver is now functional.
150
*
151
* - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
152
* now add its component. Its bind hook will now be called and since
153
* the bridge driver is attached and registered, we can now look for
154
* and attach it.
155
*
156
* At this point, we're now certain that both the upstream driver and
157
* the bridge driver are functional and we can't have a deadlock-like
158
* situation when probing.
159
*/
160
161
/**
162
* DOC: dsi bridge operations
163
*
164
* DSI host interfaces are expected to be implemented as bridges rather than
165
* encoders, however there are a few aspects of their operation that need to
166
* be defined in order to provide a consistent interface.
167
*
168
* A DSI host should keep the PHY powered down until the pre_enable operation is
169
* called. All lanes are in an undefined idle state up to this point, and it
170
* must not be assumed that it is LP-11.
171
* pre_enable should initialise the PHY, set the data lanes to LP-11, and the
172
* clock lane to either LP-11 or HS depending on the mode_flag
173
* %MIPI_DSI_CLOCK_NON_CONTINUOUS.
174
*
175
* Ordinarily the downstream bridge DSI peripheral pre_enable will have been
176
* called before the DSI host. If the DSI peripheral requires LP-11 and/or
177
* the clock lane to be in HS mode prior to pre_enable, then it can set the
178
* &pre_enable_prev_first flag to request the pre_enable (and
179
* post_disable) order to be altered to enable the DSI host first.
180
*
181
* Either the CRTC being enabled, or the DSI host enable operation should switch
182
* the host to actively transmitting video on the data lanes.
183
*
184
* The reverse also applies. The DSI host disable operation or stopping the CRTC
185
* should stop transmitting video, and the data lanes should return to the LP-11
186
* state. The DSI host &post_disable operation should disable the PHY.
187
* If the &pre_enable_prev_first flag is set, then the DSI peripheral's
188
* bridge &post_disable will be called before the DSI host's post_disable.
189
*
190
* Whilst it is valid to call &host_transfer prior to pre_enable or after
191
* post_disable, the exact state of the lanes is undefined at this point. The
192
* DSI host should initialise the interface, transmit the data, and then disable
193
* the interface again.
194
*
195
* Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
196
* implemented, it therefore needs to be handled entirely within the DSI Host
197
* driver.
198
*/
199
200
/* Protect bridge_list and bridge_lingering_list */
201
static DEFINE_MUTEX(bridge_lock);
202
static LIST_HEAD(bridge_list);
203
static LIST_HEAD(bridge_lingering_list);
204
205
static void __drm_bridge_free(struct kref *kref)
206
{
207
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
208
209
mutex_lock(&bridge_lock);
210
list_del(&bridge->list);
211
mutex_unlock(&bridge_lock);
212
213
if (bridge->funcs->destroy)
214
bridge->funcs->destroy(bridge);
215
216
kfree(bridge->container);
217
}
218
219
/**
220
* drm_bridge_get - Acquire a bridge reference
221
* @bridge: DRM bridge
222
*
223
* This function increments the bridge's refcount.
224
*
225
* Returns:
226
* Pointer to @bridge.
227
*/
228
struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
229
{
230
if (bridge)
231
kref_get(&bridge->refcount);
232
233
return bridge;
234
}
235
EXPORT_SYMBOL(drm_bridge_get);
236
237
/**
238
* drm_bridge_put - Release a bridge reference
239
* @bridge: DRM bridge
240
*
241
* This function decrements the bridge's reference count and frees the
242
* object if the reference count drops to zero.
243
*/
244
void drm_bridge_put(struct drm_bridge *bridge)
245
{
246
if (bridge)
247
kref_put(&bridge->refcount, __drm_bridge_free);
248
}
249
EXPORT_SYMBOL(drm_bridge_put);
250
251
/**
252
* drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
253
*
254
* @data: pointer to @struct drm_bridge, cast to a void pointer
255
*
256
* Wrapper of drm_bridge_put() to be used when a function taking a void
257
* pointer is needed, for example as a devm action.
258
*/
259
static void drm_bridge_put_void(void *data)
260
{
261
struct drm_bridge *bridge = (struct drm_bridge *)data;
262
263
drm_bridge_put(bridge);
264
}
265
266
void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
267
const struct drm_bridge_funcs *funcs)
268
{
269
void *container;
270
struct drm_bridge *bridge;
271
int err;
272
273
if (!funcs) {
274
dev_warn(dev, "Missing funcs pointer\n");
275
return ERR_PTR(-EINVAL);
276
}
277
278
container = kzalloc(size, GFP_KERNEL);
279
if (!container)
280
return ERR_PTR(-ENOMEM);
281
282
bridge = container + offset;
283
INIT_LIST_HEAD(&bridge->list);
284
bridge->container = container;
285
bridge->funcs = funcs;
286
kref_init(&bridge->refcount);
287
288
err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
289
if (err)
290
return ERR_PTR(err);
291
292
return container;
293
}
294
EXPORT_SYMBOL(__devm_drm_bridge_alloc);
295
296
/**
297
* drm_bridge_add - register a bridge
298
*
299
* @bridge: bridge control structure
300
*
301
* Add the given bridge to the global list of bridges, where they can be
302
* found by users via of_drm_find_bridge().
303
*
304
* The bridge to be added must have been allocated by
305
* devm_drm_bridge_alloc().
306
*/
307
void drm_bridge_add(struct drm_bridge *bridge)
308
{
309
if (!bridge->container)
310
DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
311
312
drm_bridge_get(bridge);
313
314
/*
315
* If the bridge was previously added and then removed, it is now
316
* in bridge_lingering_list. Remove it or bridge_lingering_list will be
317
* corrupted when adding this bridge to bridge_list below.
318
*/
319
if (!list_empty(&bridge->list))
320
list_del_init(&bridge->list);
321
322
mutex_init(&bridge->hpd_mutex);
323
324
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
325
bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
326
BIT(HDMI_COLORSPACE_YUV420));
327
328
mutex_lock(&bridge_lock);
329
list_add_tail(&bridge->list, &bridge_list);
330
mutex_unlock(&bridge_lock);
331
}
332
EXPORT_SYMBOL(drm_bridge_add);
333
334
static void drm_bridge_remove_void(void *bridge)
335
{
336
drm_bridge_remove(bridge);
337
}
338
339
/**
340
* devm_drm_bridge_add - devm managed version of drm_bridge_add()
341
*
342
* @dev: device to tie the bridge lifetime to
343
* @bridge: bridge control structure
344
*
345
* This is the managed version of drm_bridge_add() which automatically
346
* calls drm_bridge_remove() when @dev is unbound.
347
*
348
* Return: 0 if no error or negative error code.
349
*/
350
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
351
{
352
drm_bridge_add(bridge);
353
return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
354
}
355
EXPORT_SYMBOL(devm_drm_bridge_add);
356
357
/**
358
* drm_bridge_remove - unregister a bridge
359
*
360
* @bridge: bridge control structure
361
*
362
* Remove the given bridge from the global list of registered bridges, so
363
* it won't be found by users via of_drm_find_bridge(), and add it to the
364
* lingering bridge list, to keep track of it until its allocated memory is
365
* eventually freed.
366
*/
367
void drm_bridge_remove(struct drm_bridge *bridge)
368
{
369
mutex_lock(&bridge_lock);
370
list_move_tail(&bridge->list, &bridge_lingering_list);
371
mutex_unlock(&bridge_lock);
372
373
mutex_destroy(&bridge->hpd_mutex);
374
375
drm_bridge_put(bridge);
376
}
377
EXPORT_SYMBOL(drm_bridge_remove);
378
379
static struct drm_private_state *
380
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
381
{
382
struct drm_bridge *bridge = drm_priv_to_bridge(obj);
383
struct drm_bridge_state *state;
384
385
state = bridge->funcs->atomic_duplicate_state(bridge);
386
return state ? &state->base : NULL;
387
}
388
389
static void
390
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
391
struct drm_private_state *s)
392
{
393
struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
394
struct drm_bridge *bridge = drm_priv_to_bridge(obj);
395
396
bridge->funcs->atomic_destroy_state(bridge, state);
397
}
398
399
static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
400
.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
401
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
402
};
403
404
static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
405
{
406
return bridge->funcs->atomic_reset != NULL;
407
}
408
409
/**
410
* drm_bridge_attach - attach the bridge to an encoder's chain
411
*
412
* @encoder: DRM encoder
413
* @bridge: bridge to attach
414
* @previous: previous bridge in the chain (optional)
415
* @flags: DRM_BRIDGE_ATTACH_* flags
416
*
417
* Called by a kms driver to link the bridge to an encoder's chain. The previous
418
* argument specifies the previous bridge in the chain. If NULL, the bridge is
419
* linked directly at the encoder's output. Otherwise it is linked at the
420
* previous bridge's output.
421
*
422
* If non-NULL the previous bridge must be already attached by a call to this
423
* function.
424
*
425
* The bridge to be attached must have been previously added by
426
* drm_bridge_add().
427
*
428
* Note that bridges attached to encoders are auto-detached during encoder
429
* cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
430
* *not* be balanced with a drm_bridge_detach() in driver code.
431
*
432
* RETURNS:
433
* Zero on success, error code on failure
434
*/
435
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
436
struct drm_bridge *previous,
437
enum drm_bridge_attach_flags flags)
438
{
439
int ret;
440
441
if (!encoder || !bridge)
442
return -EINVAL;
443
444
if (!bridge->container)
445
DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
446
447
if (list_empty(&bridge->list))
448
DRM_WARN("Missing drm_bridge_add() before attach\n");
449
450
drm_bridge_get(bridge);
451
452
if (previous && (!previous->dev || previous->encoder != encoder)) {
453
ret = -EINVAL;
454
goto err_put_bridge;
455
}
456
457
if (bridge->dev) {
458
ret = -EBUSY;
459
goto err_put_bridge;
460
}
461
462
bridge->dev = encoder->dev;
463
bridge->encoder = encoder;
464
465
if (previous)
466
list_add(&bridge->chain_node, &previous->chain_node);
467
else
468
list_add(&bridge->chain_node, &encoder->bridge_chain);
469
470
if (bridge->funcs->attach) {
471
ret = bridge->funcs->attach(bridge, encoder, flags);
472
if (ret < 0)
473
goto err_reset_bridge;
474
}
475
476
if (drm_bridge_is_atomic(bridge)) {
477
struct drm_bridge_state *state;
478
479
state = bridge->funcs->atomic_reset(bridge);
480
if (IS_ERR(state)) {
481
ret = PTR_ERR(state);
482
goto err_detach_bridge;
483
}
484
485
drm_atomic_private_obj_init(bridge->dev, &bridge->base,
486
&state->base,
487
&drm_bridge_priv_state_funcs);
488
}
489
490
return 0;
491
492
err_detach_bridge:
493
if (bridge->funcs->detach)
494
bridge->funcs->detach(bridge);
495
496
err_reset_bridge:
497
bridge->dev = NULL;
498
bridge->encoder = NULL;
499
list_del(&bridge->chain_node);
500
501
if (ret != -EPROBE_DEFER)
502
DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
503
bridge->of_node, encoder->name, ret);
504
else
505
dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
506
"failed to attach bridge %pOF to encoder %s\n",
507
bridge->of_node, encoder->name);
508
509
err_put_bridge:
510
drm_bridge_put(bridge);
511
return ret;
512
}
513
EXPORT_SYMBOL(drm_bridge_attach);
514
515
void drm_bridge_detach(struct drm_bridge *bridge)
516
{
517
if (WARN_ON(!bridge))
518
return;
519
520
if (WARN_ON(!bridge->dev))
521
return;
522
523
if (drm_bridge_is_atomic(bridge))
524
drm_atomic_private_obj_fini(&bridge->base);
525
526
if (bridge->funcs->detach)
527
bridge->funcs->detach(bridge);
528
529
list_del(&bridge->chain_node);
530
bridge->dev = NULL;
531
drm_bridge_put(bridge);
532
}
533
534
/**
535
* DOC: bridge operations
536
*
537
* Bridge drivers expose operations through the &drm_bridge_funcs structure.
538
* The DRM internals (atomic and CRTC helpers) use the helpers defined in
539
* drm_bridge.c to call bridge operations. Those operations are divided in
540
* three big categories to support different parts of the bridge usage.
541
*
542
* - The encoder-related operations support control of the bridges in the
543
* chain, and are roughly counterparts to the &drm_encoder_helper_funcs
544
* operations. They are used by the legacy CRTC and the atomic modeset
545
* helpers to perform mode validation, fixup and setting, and enable and
546
* disable the bridge automatically.
547
*
548
* The enable and disable operations are split in
549
* &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
550
* &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
551
* finer-grained control.
552
*
553
* Bridge drivers may implement the legacy version of those operations, or
554
* the atomic version (prefixed with atomic\_), in which case they shall also
555
* implement the atomic state bookkeeping operations
556
* (&drm_bridge_funcs.atomic_duplicate_state,
557
* &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
558
* Mixing atomic and non-atomic versions of the operations is not supported.
559
*
560
* - The bus format negotiation operations
561
* &drm_bridge_funcs.atomic_get_output_bus_fmts and
562
* &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
563
* negotiate the formats transmitted between bridges in the chain when
564
* multiple formats are supported. Negotiation for formats is performed
565
* transparently for display drivers by the atomic modeset helpers. Only
566
* atomic versions of those operations exist, bridge drivers that need to
567
* implement them shall thus also implement the atomic version of the
568
* encoder-related operations. This feature is not supported by the legacy
569
* CRTC helpers.
570
*
571
* - The connector-related operations support implementing a &drm_connector
572
* based on a chain of bridges. DRM bridges traditionally create a
573
* &drm_connector for bridges meant to be used at the end of the chain. This
574
* puts additional burden on bridge drivers, especially for bridges that may
575
* be used in the middle of a chain or at the end of it. Furthermore, it
576
* requires all operations of the &drm_connector to be handled by a single
577
* bridge, which doesn't always match the hardware architecture.
578
*
579
* To simplify bridge drivers and make the connector implementation more
580
* flexible, a new model allows bridges to unconditionally skip creation of
581
* &drm_connector and instead expose &drm_bridge_funcs operations to support
582
* an externally-implemented &drm_connector. Those operations are
583
* &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
584
* &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
585
* &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
586
* implemented, display drivers shall create a &drm_connector instance for
587
* each chain of bridges, and implement those connector instances based on
588
* the bridge connector operations.
589
*
590
* Bridge drivers shall implement the connector-related operations for all
591
* the features that the bridge hardware support. For instance, if a bridge
592
* supports reading EDID, the &drm_bridge_funcs.get_edid shall be
593
* implemented. This however doesn't mean that the DDC lines are wired to the
594
* bridge on a particular platform, as they could also be connected to an I2C
595
* controller of the SoC. Support for the connector-related operations on the
596
* running platform is reported through the &drm_bridge.ops flags. Bridge
597
* drivers shall detect which operations they can support on the platform
598
* (usually this information is provided by ACPI or DT), and set the
599
* &drm_bridge.ops flags for all supported operations. A flag shall only be
600
* set if the corresponding &drm_bridge_funcs operation is implemented, but
601
* an implemented operation doesn't necessarily imply that the corresponding
602
* flag will be set. Display drivers shall use the &drm_bridge.ops flags to
603
* decide which bridge to delegate a connector operation to. This mechanism
604
* allows providing a single static const &drm_bridge_funcs instance in
605
* bridge drivers, improving security by storing function pointers in
606
* read-only memory.
607
*
608
* In order to ease transition, bridge drivers may support both the old and
609
* new models by making connector creation optional and implementing the
610
* connected-related bridge operations. Connector creation is then controlled
611
* by the flags argument to the drm_bridge_attach() function. Display drivers
612
* that support the new model and create connectors themselves shall set the
613
* %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
614
* connector creation. For intermediate bridges in the chain, the flag shall
615
* be passed to the drm_bridge_attach() call for the downstream bridge.
616
* Bridge drivers that implement the new model only shall return an error
617
* from their &drm_bridge_funcs.attach handler when the
618
* %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
619
* should use the new model, and convert the bridge drivers they use if
620
* needed, in order to gradually transition to the new model.
621
*/
622
623
/**
624
* drm_bridge_chain_mode_valid - validate the mode against all bridges in the
625
* encoder chain.
626
* @bridge: bridge control structure
627
* @info: display info against which the mode shall be validated
628
* @mode: desired mode to be validated
629
*
630
* Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
631
* chain, starting from the first bridge to the last. If at least one bridge
632
* does not accept the mode the function returns the error code.
633
*
634
* Note: the bridge passed should be the one closest to the encoder.
635
*
636
* RETURNS:
637
* MODE_OK on success, drm_mode_status Enum error code on failure
638
*/
639
enum drm_mode_status
640
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
641
const struct drm_display_info *info,
642
const struct drm_display_mode *mode)
643
{
644
struct drm_encoder *encoder;
645
646
if (!bridge)
647
return MODE_OK;
648
649
encoder = bridge->encoder;
650
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
651
enum drm_mode_status ret;
652
653
if (!bridge->funcs->mode_valid)
654
continue;
655
656
ret = bridge->funcs->mode_valid(bridge, info, mode);
657
if (ret != MODE_OK)
658
return ret;
659
}
660
661
return MODE_OK;
662
}
663
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
664
665
/**
666
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
667
* encoder chain
668
* @bridge: bridge control structure
669
* @mode: desired mode to be set for the encoder chain
670
* @adjusted_mode: updated mode that works for this encoder chain
671
*
672
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
673
* encoder chain, starting from the first bridge to the last.
674
*
675
* Note: the bridge passed should be the one closest to the encoder
676
*/
677
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
678
const struct drm_display_mode *mode,
679
const struct drm_display_mode *adjusted_mode)
680
{
681
struct drm_encoder *encoder;
682
683
if (!bridge)
684
return;
685
686
encoder = bridge->encoder;
687
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
688
if (bridge->funcs->mode_set)
689
bridge->funcs->mode_set(bridge, mode, adjusted_mode);
690
}
691
}
692
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
693
694
/**
695
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
696
* @bridge: bridge control structure
697
* @state: atomic state being committed
698
*
699
* Calls &drm_bridge_funcs.atomic_disable (falls back on
700
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
701
* starting from the last bridge to the first. These are called before calling
702
* &drm_encoder_helper_funcs.atomic_disable
703
*
704
* Note: the bridge passed should be the one closest to the encoder
705
*/
706
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
707
struct drm_atomic_state *state)
708
{
709
struct drm_encoder *encoder;
710
struct drm_bridge *iter;
711
712
if (!bridge)
713
return;
714
715
encoder = bridge->encoder;
716
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
717
if (iter->funcs->atomic_disable) {
718
iter->funcs->atomic_disable(iter, state);
719
} else if (iter->funcs->disable) {
720
iter->funcs->disable(iter);
721
}
722
723
if (iter == bridge)
724
break;
725
}
726
}
727
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
728
729
static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
730
struct drm_atomic_state *state)
731
{
732
if (state && bridge->funcs->atomic_post_disable)
733
bridge->funcs->atomic_post_disable(bridge, state);
734
else if (bridge->funcs->post_disable)
735
bridge->funcs->post_disable(bridge);
736
}
737
738
/**
739
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
740
* in the encoder chain
741
* @bridge: bridge control structure
742
* @state: atomic state being committed
743
*
744
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
745
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
746
* starting from the first bridge to the last. These are called after completing
747
* &drm_encoder_helper_funcs.atomic_disable
748
*
749
* If a bridge sets @pre_enable_prev_first, then the @post_disable for that
750
* bridge will be called before the previous one to reverse the @pre_enable
751
* calling direction.
752
*
753
* Example:
754
* Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
755
*
756
* With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
757
* @post_disable order would be,
758
* Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
759
*
760
* Note: the bridge passed should be the one closest to the encoder
761
*/
762
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
763
struct drm_atomic_state *state)
764
{
765
struct drm_encoder *encoder;
766
struct drm_bridge *next, *limit;
767
768
if (!bridge)
769
return;
770
771
encoder = bridge->encoder;
772
773
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
774
limit = NULL;
775
776
if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
777
next = list_next_entry(bridge, chain_node);
778
779
if (next->pre_enable_prev_first) {
780
/* next bridge had requested that prev
781
* was enabled first, so disabled last
782
*/
783
limit = next;
784
785
/* Find the next bridge that has NOT requested
786
* prev to be enabled first / disabled last
787
*/
788
list_for_each_entry_from(next, &encoder->bridge_chain,
789
chain_node) {
790
if (!next->pre_enable_prev_first) {
791
next = list_prev_entry(next, chain_node);
792
limit = next;
793
break;
794
}
795
796
if (list_is_last(&next->chain_node,
797
&encoder->bridge_chain)) {
798
limit = next;
799
break;
800
}
801
}
802
803
/* Call these bridges in reverse order */
804
list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
805
chain_node) {
806
if (next == bridge)
807
break;
808
809
drm_atomic_bridge_call_post_disable(next,
810
state);
811
}
812
}
813
}
814
815
drm_atomic_bridge_call_post_disable(bridge, state);
816
817
if (limit)
818
/* Jump all bridges that we have already post_disabled */
819
bridge = limit;
820
}
821
}
822
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
823
824
static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
825
struct drm_atomic_state *state)
826
{
827
if (state && bridge->funcs->atomic_pre_enable)
828
bridge->funcs->atomic_pre_enable(bridge, state);
829
else if (bridge->funcs->pre_enable)
830
bridge->funcs->pre_enable(bridge);
831
}
832
833
/**
834
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
835
* the encoder chain
836
* @bridge: bridge control structure
837
* @state: atomic state being committed
838
*
839
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
840
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
841
* starting from the last bridge to the first. These are called before calling
842
* &drm_encoder_helper_funcs.atomic_enable
843
*
844
* If a bridge sets @pre_enable_prev_first, then the pre_enable for the
845
* prev bridge will be called before pre_enable of this bridge.
846
*
847
* Example:
848
* Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
849
*
850
* With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
851
* @pre_enable order would be,
852
* Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
853
*
854
* Note: the bridge passed should be the one closest to the encoder
855
*/
856
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
857
struct drm_atomic_state *state)
858
{
859
struct drm_encoder *encoder;
860
struct drm_bridge *iter, *next, *limit;
861
862
if (!bridge)
863
return;
864
865
encoder = bridge->encoder;
866
867
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
868
if (iter->pre_enable_prev_first) {
869
next = iter;
870
limit = bridge;
871
list_for_each_entry_from_reverse(next,
872
&encoder->bridge_chain,
873
chain_node) {
874
if (next == bridge)
875
break;
876
877
if (!next->pre_enable_prev_first) {
878
/* Found first bridge that does NOT
879
* request prev to be enabled first
880
*/
881
limit = next;
882
break;
883
}
884
}
885
886
list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
887
/* Call requested prev bridge pre_enable
888
* in order.
889
*/
890
if (next == iter)
891
/* At the first bridge to request prev
892
* bridges called first.
893
*/
894
break;
895
896
drm_atomic_bridge_call_pre_enable(next, state);
897
}
898
}
899
900
drm_atomic_bridge_call_pre_enable(iter, state);
901
902
if (iter->pre_enable_prev_first)
903
/* Jump all bridges that we have already pre_enabled */
904
iter = limit;
905
906
if (iter == bridge)
907
break;
908
}
909
}
910
EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
911
912
/**
913
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
914
* @bridge: bridge control structure
915
* @state: atomic state being committed
916
*
917
* Calls &drm_bridge_funcs.atomic_enable (falls back on
918
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
919
* starting from the first bridge to the last. These are called after completing
920
* &drm_encoder_helper_funcs.atomic_enable
921
*
922
* Note: the bridge passed should be the one closest to the encoder
923
*/
924
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
925
struct drm_atomic_state *state)
926
{
927
struct drm_encoder *encoder;
928
929
if (!bridge)
930
return;
931
932
encoder = bridge->encoder;
933
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
934
if (bridge->funcs->atomic_enable) {
935
bridge->funcs->atomic_enable(bridge, state);
936
} else if (bridge->funcs->enable) {
937
bridge->funcs->enable(bridge);
938
}
939
}
940
}
941
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
942
943
static int drm_atomic_bridge_check(struct drm_bridge *bridge,
944
struct drm_crtc_state *crtc_state,
945
struct drm_connector_state *conn_state)
946
{
947
if (bridge->funcs->atomic_check) {
948
struct drm_bridge_state *bridge_state;
949
int ret;
950
951
bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
952
bridge);
953
if (WARN_ON(!bridge_state))
954
return -EINVAL;
955
956
ret = bridge->funcs->atomic_check(bridge, bridge_state,
957
crtc_state, conn_state);
958
if (ret)
959
return ret;
960
} else if (bridge->funcs->mode_fixup) {
961
if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
962
&crtc_state->adjusted_mode))
963
return -EINVAL;
964
}
965
966
return 0;
967
}
968
969
static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
970
struct drm_bridge *cur_bridge,
971
struct drm_crtc_state *crtc_state,
972
struct drm_connector_state *conn_state,
973
u32 out_bus_fmt)
974
{
975
unsigned int i, num_in_bus_fmts = 0;
976
struct drm_bridge_state *cur_state;
977
struct drm_bridge *prev_bridge __free(drm_bridge_put) =
978
drm_bridge_get_prev_bridge(cur_bridge);
979
u32 *in_bus_fmts;
980
int ret;
981
982
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
983
cur_bridge);
984
985
/*
986
* If bus format negotiation is not supported by this bridge, let's
987
* pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
988
* hope that it can handle this situation gracefully (by providing
989
* appropriate default values).
990
*/
991
if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
992
if (cur_bridge != first_bridge) {
993
ret = select_bus_fmt_recursive(first_bridge,
994
prev_bridge, crtc_state,
995
conn_state,
996
MEDIA_BUS_FMT_FIXED);
997
if (ret)
998
return ret;
999
}
1000
1001
/*
1002
* Driver does not implement the atomic state hooks, but that's
1003
* fine, as long as it does not access the bridge state.
1004
*/
1005
if (cur_state) {
1006
cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
1007
cur_state->output_bus_cfg.format = out_bus_fmt;
1008
}
1009
1010
return 0;
1011
}
1012
1013
/*
1014
* If the driver implements ->atomic_get_input_bus_fmts() it
1015
* should also implement the atomic state hooks.
1016
*/
1017
if (WARN_ON(!cur_state))
1018
return -EINVAL;
1019
1020
in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
1021
cur_state,
1022
crtc_state,
1023
conn_state,
1024
out_bus_fmt,
1025
&num_in_bus_fmts);
1026
if (!num_in_bus_fmts)
1027
return -ENOTSUPP;
1028
else if (!in_bus_fmts)
1029
return -ENOMEM;
1030
1031
if (first_bridge == cur_bridge) {
1032
cur_state->input_bus_cfg.format = in_bus_fmts[0];
1033
cur_state->output_bus_cfg.format = out_bus_fmt;
1034
kfree(in_bus_fmts);
1035
return 0;
1036
}
1037
1038
for (i = 0; i < num_in_bus_fmts; i++) {
1039
ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1040
crtc_state, conn_state,
1041
in_bus_fmts[i]);
1042
if (ret != -ENOTSUPP)
1043
break;
1044
}
1045
1046
if (!ret) {
1047
cur_state->input_bus_cfg.format = in_bus_fmts[i];
1048
cur_state->output_bus_cfg.format = out_bus_fmt;
1049
}
1050
1051
kfree(in_bus_fmts);
1052
return ret;
1053
}
1054
1055
/*
1056
* This function is called by &drm_atomic_bridge_chain_check() just before
1057
* calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1058
* It performs bus format negotiation between bridge elements. The negotiation
1059
* happens in reverse order, starting from the last element in the chain up to
1060
* @bridge.
1061
*
1062
* Negotiation starts by retrieving supported output bus formats on the last
1063
* bridge element and testing them one by one. The test is recursive, meaning
1064
* that for each tested output format, the whole chain will be walked backward,
1065
* and each element will have to choose an input bus format that can be
1066
* transcoded to the requested output format. When a bridge element does not
1067
* support transcoding into a specific output format -ENOTSUPP is returned and
1068
* the next bridge element will have to try a different format. If none of the
1069
* combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1070
*
1071
* This implementation is relying on
1072
* &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1073
* &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1074
* input/output formats.
1075
*
1076
* When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1077
* the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1078
* tries a single format: &drm_connector.display_info.bus_formats[0] if
1079
* available, MEDIA_BUS_FMT_FIXED otherwise.
1080
*
1081
* When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1082
* &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1083
* bridge element that lacks this hook and asks the previous element in the
1084
* chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1085
* to do in that case (fail if they want to enforce bus format negotiation, or
1086
* provide a reasonable default if they need to support pipelines where not
1087
* all elements support bus format negotiation).
1088
*/
1089
static int
1090
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1091
struct drm_crtc_state *crtc_state,
1092
struct drm_connector_state *conn_state)
1093
{
1094
struct drm_connector *conn = conn_state->connector;
1095
struct drm_encoder *encoder = bridge->encoder;
1096
struct drm_bridge_state *last_bridge_state;
1097
unsigned int i, num_out_bus_fmts = 0;
1098
u32 *out_bus_fmts;
1099
int ret = 0;
1100
1101
struct drm_bridge *last_bridge __free(drm_bridge_put) =
1102
drm_bridge_get(list_last_entry(&encoder->bridge_chain,
1103
struct drm_bridge, chain_node));
1104
last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1105
last_bridge);
1106
1107
if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1108
const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1109
1110
/*
1111
* If the driver implements ->atomic_get_output_bus_fmts() it
1112
* should also implement the atomic state hooks.
1113
*/
1114
if (WARN_ON(!last_bridge_state))
1115
return -EINVAL;
1116
1117
out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1118
last_bridge_state,
1119
crtc_state,
1120
conn_state,
1121
&num_out_bus_fmts);
1122
if (!num_out_bus_fmts)
1123
return -ENOTSUPP;
1124
else if (!out_bus_fmts)
1125
return -ENOMEM;
1126
} else {
1127
num_out_bus_fmts = 1;
1128
out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1129
if (!out_bus_fmts)
1130
return -ENOMEM;
1131
1132
if (conn->display_info.num_bus_formats &&
1133
conn->display_info.bus_formats)
1134
out_bus_fmts[0] = conn->display_info.bus_formats[0];
1135
else
1136
out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1137
}
1138
1139
for (i = 0; i < num_out_bus_fmts; i++) {
1140
ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1141
conn_state, out_bus_fmts[i]);
1142
if (ret != -ENOTSUPP)
1143
break;
1144
}
1145
1146
kfree(out_bus_fmts);
1147
1148
return ret;
1149
}
1150
1151
static void
1152
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1153
struct drm_connector *conn,
1154
struct drm_atomic_state *state)
1155
{
1156
struct drm_bridge_state *bridge_state, *next_bridge_state;
1157
u32 output_flags = 0;
1158
1159
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1160
1161
/* No bridge state attached to this bridge => nothing to propagate. */
1162
if (!bridge_state)
1163
return;
1164
1165
struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
1166
1167
/*
1168
* Let's try to apply the most common case here, that is, propagate
1169
* display_info flags for the last bridge, and propagate the input
1170
* flags of the next bridge element to the output end of the current
1171
* bridge when the bridge is not the last one.
1172
* There are exceptions to this rule, like when signal inversion is
1173
* happening at the board level, but that's something drivers can deal
1174
* with from their &drm_bridge_funcs.atomic_check() implementation by
1175
* simply overriding the flags value we've set here.
1176
*/
1177
if (!next_bridge) {
1178
output_flags = conn->display_info.bus_flags;
1179
} else {
1180
next_bridge_state = drm_atomic_get_new_bridge_state(state,
1181
next_bridge);
1182
/*
1183
* No bridge state attached to the next bridge, just leave the
1184
* flags to 0.
1185
*/
1186
if (next_bridge_state)
1187
output_flags = next_bridge_state->input_bus_cfg.flags;
1188
}
1189
1190
bridge_state->output_bus_cfg.flags = output_flags;
1191
1192
/*
1193
* Propagate the output flags to the input end of the bridge. Again, it's
1194
* not necessarily what all bridges want, but that's what most of them
1195
* do, and by doing that by default we avoid forcing drivers to
1196
* duplicate the "dummy propagation" logic.
1197
*/
1198
bridge_state->input_bus_cfg.flags = output_flags;
1199
}
1200
1201
/**
1202
* drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1203
* @bridge: bridge control structure
1204
* @crtc_state: new CRTC state
1205
* @conn_state: new connector state
1206
*
1207
* First trigger a bus format negotiation before calling
1208
* &drm_bridge_funcs.atomic_check() (falls back on
1209
* &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1210
* starting from the last bridge to the first. These are called before calling
1211
* &drm_encoder_helper_funcs.atomic_check()
1212
*
1213
* RETURNS:
1214
* 0 on success, a negative error code on failure
1215
*/
1216
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1217
struct drm_crtc_state *crtc_state,
1218
struct drm_connector_state *conn_state)
1219
{
1220
struct drm_connector *conn = conn_state->connector;
1221
struct drm_encoder *encoder;
1222
struct drm_bridge *iter;
1223
int ret;
1224
1225
if (!bridge)
1226
return 0;
1227
1228
ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1229
conn_state);
1230
if (ret)
1231
return ret;
1232
1233
encoder = bridge->encoder;
1234
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1235
int ret;
1236
1237
/*
1238
* Bus flags are propagated by default. If a bridge needs to
1239
* tweak the input bus flags for any reason, it should happen
1240
* in its &drm_bridge_funcs.atomic_check() implementation such
1241
* that preceding bridges in the chain can propagate the new
1242
* bus flags.
1243
*/
1244
drm_atomic_bridge_propagate_bus_flags(iter, conn,
1245
crtc_state->state);
1246
1247
ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1248
if (ret)
1249
return ret;
1250
1251
if (iter == bridge)
1252
break;
1253
}
1254
1255
return 0;
1256
}
1257
EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1258
1259
/**
1260
* drm_bridge_detect - check if anything is attached to the bridge output
1261
* @bridge: bridge control structure
1262
* @connector: attached connector
1263
*
1264
* If the bridge supports output detection, as reported by the
1265
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1266
* bridge and return the connection status. Otherwise return
1267
* connector_status_unknown.
1268
*
1269
* RETURNS:
1270
* The detection status on success, or connector_status_unknown if the bridge
1271
* doesn't support output detection.
1272
*/
1273
enum drm_connector_status
1274
drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1275
{
1276
if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1277
return connector_status_unknown;
1278
1279
return bridge->funcs->detect(bridge, connector);
1280
}
1281
EXPORT_SYMBOL_GPL(drm_bridge_detect);
1282
1283
/**
1284
* drm_bridge_get_modes - fill all modes currently valid for the sink into the
1285
* @connector
1286
* @bridge: bridge control structure
1287
* @connector: the connector to fill with modes
1288
*
1289
* If the bridge supports output modes retrieval, as reported by the
1290
* DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1291
* fill the connector with all valid modes and return the number of modes
1292
* added. Otherwise return 0.
1293
*
1294
* RETURNS:
1295
* The number of modes added to the connector.
1296
*/
1297
int drm_bridge_get_modes(struct drm_bridge *bridge,
1298
struct drm_connector *connector)
1299
{
1300
if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1301
return 0;
1302
1303
return bridge->funcs->get_modes(bridge, connector);
1304
}
1305
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1306
1307
/**
1308
* drm_bridge_edid_read - read the EDID data of the connected display
1309
* @bridge: bridge control structure
1310
* @connector: the connector to read EDID for
1311
*
1312
* If the bridge supports output EDID retrieval, as reported by the
1313
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1314
* the EDID and return it. Otherwise return NULL.
1315
*
1316
* RETURNS:
1317
* The retrieved EDID on success, or NULL otherwise.
1318
*/
1319
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1320
struct drm_connector *connector)
1321
{
1322
if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1323
return NULL;
1324
1325
return bridge->funcs->edid_read(bridge, connector);
1326
}
1327
EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1328
1329
/**
1330
* drm_bridge_hpd_enable - enable hot plug detection for the bridge
1331
* @bridge: bridge control structure
1332
* @cb: hot-plug detection callback
1333
* @data: data to be passed to the hot-plug detection callback
1334
*
1335
* Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1336
* and @data as hot plug notification callback. From now on the @cb will be
1337
* called with @data when an output status change is detected by the bridge,
1338
* until hot plug notification gets disabled with drm_bridge_hpd_disable().
1339
*
1340
* Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1341
* bridge->ops. This function shall not be called when the flag is not set.
1342
*
1343
* Only one hot plug detection callback can be registered at a time, it is an
1344
* error to call this function when hot plug detection is already enabled for
1345
* the bridge.
1346
*/
1347
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1348
void (*cb)(void *data,
1349
enum drm_connector_status status),
1350
void *data)
1351
{
1352
if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1353
return;
1354
1355
mutex_lock(&bridge->hpd_mutex);
1356
1357
if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1358
goto unlock;
1359
1360
bridge->hpd_cb = cb;
1361
bridge->hpd_data = data;
1362
1363
if (bridge->funcs->hpd_enable)
1364
bridge->funcs->hpd_enable(bridge);
1365
1366
unlock:
1367
mutex_unlock(&bridge->hpd_mutex);
1368
}
1369
EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1370
1371
/**
1372
* drm_bridge_hpd_disable - disable hot plug detection for the bridge
1373
* @bridge: bridge control structure
1374
*
1375
* Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1376
* plug detection callback previously registered with drm_bridge_hpd_enable().
1377
* Once this function returns the callback will not be called by the bridge
1378
* when an output status change occurs.
1379
*
1380
* Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1381
* bridge->ops. This function shall not be called when the flag is not set.
1382
*/
1383
void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1384
{
1385
if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1386
return;
1387
1388
mutex_lock(&bridge->hpd_mutex);
1389
if (bridge->funcs->hpd_disable)
1390
bridge->funcs->hpd_disable(bridge);
1391
1392
bridge->hpd_cb = NULL;
1393
bridge->hpd_data = NULL;
1394
mutex_unlock(&bridge->hpd_mutex);
1395
}
1396
EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1397
1398
/**
1399
* drm_bridge_hpd_notify - notify hot plug detection events
1400
* @bridge: bridge control structure
1401
* @status: output connection status
1402
*
1403
* Bridge drivers shall call this function to report hot plug events when they
1404
* detect a change in the output status, when hot plug detection has been
1405
* enabled by drm_bridge_hpd_enable().
1406
*
1407
* This function shall be called in a context that can sleep.
1408
*/
1409
void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1410
enum drm_connector_status status)
1411
{
1412
mutex_lock(&bridge->hpd_mutex);
1413
if (bridge->hpd_cb)
1414
bridge->hpd_cb(bridge->hpd_data, status);
1415
mutex_unlock(&bridge->hpd_mutex);
1416
}
1417
EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1418
1419
#ifdef CONFIG_OF
1420
/**
1421
* of_drm_find_bridge - find the bridge corresponding to the device node in
1422
* the global bridge list
1423
*
1424
* @np: device node
1425
*
1426
* RETURNS:
1427
* drm_bridge control struct on success, NULL on failure
1428
*/
1429
struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1430
{
1431
struct drm_bridge *bridge;
1432
1433
mutex_lock(&bridge_lock);
1434
1435
list_for_each_entry(bridge, &bridge_list, list) {
1436
if (bridge->of_node == np) {
1437
mutex_unlock(&bridge_lock);
1438
return bridge;
1439
}
1440
}
1441
1442
mutex_unlock(&bridge_lock);
1443
return NULL;
1444
}
1445
EXPORT_SYMBOL(of_drm_find_bridge);
1446
#endif
1447
1448
/**
1449
* devm_drm_put_bridge - Release a bridge reference obtained via devm
1450
* @dev: device that got the bridge via devm
1451
* @bridge: pointer to a struct drm_bridge obtained via devm
1452
*
1453
* Same as drm_bridge_put() for bridge pointers obtained via devm functions
1454
* such as devm_drm_bridge_alloc().
1455
*
1456
* This function is a temporary workaround and MUST NOT be used. Manual
1457
* handling of bridge lifetime is inherently unsafe.
1458
*/
1459
void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1460
{
1461
devm_release_action(dev, drm_bridge_put_void, bridge);
1462
}
1463
EXPORT_SYMBOL(devm_drm_put_bridge);
1464
1465
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1466
struct drm_bridge *bridge,
1467
unsigned int idx,
1468
bool lingering)
1469
{
1470
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1471
1472
drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
1473
lingering ? " [lingering]" : "");
1474
1475
drm_printf(p, "\ttype: [%d] %s\n",
1476
bridge->type,
1477
drm_get_connector_type_name(bridge->type));
1478
1479
/* The OF node could be freed after drm_bridge_remove() */
1480
if (bridge->of_node && !lingering)
1481
drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1482
1483
drm_printf(p, "\tops: [0x%x]", bridge->ops);
1484
if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1485
drm_puts(p, " detect");
1486
if (bridge->ops & DRM_BRIDGE_OP_EDID)
1487
drm_puts(p, " edid");
1488
if (bridge->ops & DRM_BRIDGE_OP_HPD)
1489
drm_puts(p, " hpd");
1490
if (bridge->ops & DRM_BRIDGE_OP_MODES)
1491
drm_puts(p, " modes");
1492
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1493
drm_puts(p, " hdmi");
1494
drm_puts(p, "\n");
1495
}
1496
1497
static int allbridges_show(struct seq_file *m, void *data)
1498
{
1499
struct drm_printer p = drm_seq_file_printer(m);
1500
struct drm_bridge *bridge;
1501
unsigned int idx = 0;
1502
1503
mutex_lock(&bridge_lock);
1504
1505
list_for_each_entry(bridge, &bridge_list, list)
1506
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1507
1508
list_for_each_entry(bridge, &bridge_lingering_list, list)
1509
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
1510
1511
mutex_unlock(&bridge_lock);
1512
1513
return 0;
1514
}
1515
DEFINE_SHOW_ATTRIBUTE(allbridges);
1516
1517
static int encoder_bridges_show(struct seq_file *m, void *data)
1518
{
1519
struct drm_encoder *encoder = m->private;
1520
struct drm_printer p = drm_seq_file_printer(m);
1521
unsigned int idx = 0;
1522
1523
drm_for_each_bridge_in_chain_scoped(encoder, bridge)
1524
drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1525
1526
return 0;
1527
}
1528
DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1529
1530
void drm_bridge_debugfs_params(struct dentry *root)
1531
{
1532
debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1533
}
1534
1535
void drm_bridge_debugfs_encoder_params(struct dentry *root,
1536
struct drm_encoder *encoder)
1537
{
1538
/* bridges list */
1539
debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1540
}
1541
1542
MODULE_AUTHOR("Ajay Kumar <[email protected]>");
1543
MODULE_DESCRIPTION("DRM bridge infrastructure");
1544
MODULE_LICENSE("GPL and additional rights");
1545
1546