Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/drm_atomic_helper.c
49603 views
1
/*
2
* Copyright (C) 2014 Red Hat
3
* Copyright (C) 2014 Intel Corp.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice shall be included in
13
* all copies or substantial portions of the Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
* OTHER DEALINGS IN THE SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
* Daniel Vetter <[email protected]>
26
*/
27
28
#include <linux/export.h>
29
#include <linux/dma-fence.h>
30
#include <linux/ktime.h>
31
32
#include <drm/drm_atomic.h>
33
#include <drm/drm_atomic_helper.h>
34
#include <drm/drm_atomic_uapi.h>
35
#include <drm/drm_blend.h>
36
#include <drm/drm_bridge.h>
37
#include <drm/drm_damage_helper.h>
38
#include <drm/drm_device.h>
39
#include <drm/drm_drv.h>
40
#include <drm/drm_framebuffer.h>
41
#include <drm/drm_gem_atomic_helper.h>
42
#include <drm/drm_panic.h>
43
#include <drm/drm_print.h>
44
#include <drm/drm_self_refresh_helper.h>
45
#include <drm/drm_vblank.h>
46
#include <drm/drm_writeback.h>
47
48
#include "drm_crtc_helper_internal.h"
49
#include "drm_crtc_internal.h"
50
51
/**
52
* DOC: overview
53
*
54
* This helper library provides implementations of check and commit functions on
55
* top of the CRTC modeset helper callbacks and the plane helper callbacks. It
56
* also provides convenience implementations for the atomic state handling
57
* callbacks for drivers which don't need to subclass the drm core structures to
58
* add their own additional internal state.
59
*
60
* This library also provides default implementations for the check callback in
61
* drm_atomic_helper_check() and for the commit callback with
62
* drm_atomic_helper_commit(). But the individual stages and callbacks are
63
* exposed to allow drivers to mix and match and e.g. use the plane helpers only
64
* together with a driver private modeset implementation.
65
*
66
* This library also provides implementations for all the legacy driver
67
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
68
* drm_atomic_helper_disable_plane(), and the various functions to implement
69
* set_property callbacks. New drivers must not implement these functions
70
* themselves but must use the provided helpers.
71
*
72
* The atomic helper uses the same function table structures as all other
73
* modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
74
* struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
75
* also shares the &struct drm_plane_helper_funcs function table with the plane
76
* helpers.
77
*/
78
static void
79
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
80
struct drm_plane_state *old_plane_state,
81
struct drm_plane_state *plane_state,
82
struct drm_plane *plane)
83
{
84
struct drm_crtc_state *crtc_state;
85
86
if (old_plane_state->crtc) {
87
crtc_state = drm_atomic_get_new_crtc_state(state,
88
old_plane_state->crtc);
89
90
if (WARN_ON(!crtc_state))
91
return;
92
93
crtc_state->planes_changed = true;
94
}
95
96
if (plane_state->crtc) {
97
crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
98
99
if (WARN_ON(!crtc_state))
100
return;
101
102
crtc_state->planes_changed = true;
103
}
104
}
105
106
static int handle_conflicting_encoders(struct drm_atomic_state *state,
107
bool disable_conflicting_encoders)
108
{
109
struct drm_connector_state *new_conn_state;
110
struct drm_connector *connector;
111
struct drm_connector_list_iter conn_iter;
112
struct drm_encoder *encoder;
113
unsigned int encoder_mask = 0;
114
int i, ret = 0;
115
116
/*
117
* First loop, find all newly assigned encoders from the connectors
118
* part of the state. If the same encoder is assigned to multiple
119
* connectors bail out.
120
*/
121
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
122
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
123
struct drm_encoder *new_encoder;
124
125
if (!new_conn_state->crtc)
126
continue;
127
128
if (funcs->atomic_best_encoder)
129
new_encoder = funcs->atomic_best_encoder(connector,
130
state);
131
else if (funcs->best_encoder)
132
new_encoder = funcs->best_encoder(connector);
133
else
134
new_encoder = drm_connector_get_single_encoder(connector);
135
136
if (new_encoder) {
137
if (encoder_mask & drm_encoder_mask(new_encoder)) {
138
drm_dbg_atomic(connector->dev,
139
"[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
140
new_encoder->base.id, new_encoder->name,
141
connector->base.id, connector->name);
142
143
return -EINVAL;
144
}
145
146
encoder_mask |= drm_encoder_mask(new_encoder);
147
}
148
}
149
150
if (!encoder_mask)
151
return 0;
152
153
/*
154
* Second loop, iterate over all connectors not part of the state.
155
*
156
* If a conflicting encoder is found and disable_conflicting_encoders
157
* is not set, an error is returned. Userspace can provide a solution
158
* through the atomic ioctl.
159
*
160
* If the flag is set conflicting connectors are removed from the CRTC
161
* and the CRTC is disabled if no encoder is left. This preserves
162
* compatibility with the legacy set_config behavior.
163
*/
164
drm_connector_list_iter_begin(state->dev, &conn_iter);
165
drm_for_each_connector_iter(connector, &conn_iter) {
166
struct drm_crtc_state *crtc_state;
167
168
if (drm_atomic_get_new_connector_state(state, connector))
169
continue;
170
171
encoder = connector->state->best_encoder;
172
if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
173
continue;
174
175
if (!disable_conflicting_encoders) {
176
drm_dbg_atomic(connector->dev,
177
"[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
178
encoder->base.id, encoder->name,
179
connector->state->crtc->base.id,
180
connector->state->crtc->name,
181
connector->base.id, connector->name);
182
ret = -EINVAL;
183
goto out;
184
}
185
186
new_conn_state = drm_atomic_get_connector_state(state, connector);
187
if (IS_ERR(new_conn_state)) {
188
ret = PTR_ERR(new_conn_state);
189
goto out;
190
}
191
192
drm_dbg_atomic(connector->dev,
193
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
194
encoder->base.id, encoder->name,
195
new_conn_state->crtc->base.id, new_conn_state->crtc->name,
196
connector->base.id, connector->name);
197
198
crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
199
200
ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
201
if (ret)
202
goto out;
203
204
if (!crtc_state->connector_mask) {
205
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
206
NULL);
207
if (ret < 0)
208
goto out;
209
210
crtc_state->active = false;
211
}
212
}
213
out:
214
drm_connector_list_iter_end(&conn_iter);
215
216
return ret;
217
}
218
219
static void
220
set_best_encoder(struct drm_atomic_state *state,
221
struct drm_connector_state *conn_state,
222
struct drm_encoder *encoder)
223
{
224
struct drm_crtc_state *crtc_state;
225
struct drm_crtc *crtc;
226
227
if (conn_state->best_encoder) {
228
/* Unset the encoder_mask in the old crtc state. */
229
crtc = conn_state->connector->state->crtc;
230
231
/* A NULL crtc is an error here because we should have
232
* duplicated a NULL best_encoder when crtc was NULL.
233
* As an exception restoring duplicated atomic state
234
* during resume is allowed, so don't warn when
235
* best_encoder is equal to encoder we intend to set.
236
*/
237
WARN_ON(!crtc && encoder != conn_state->best_encoder);
238
if (crtc) {
239
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
240
241
crtc_state->encoder_mask &=
242
~drm_encoder_mask(conn_state->best_encoder);
243
}
244
}
245
246
if (encoder) {
247
crtc = conn_state->crtc;
248
WARN_ON(!crtc);
249
if (crtc) {
250
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
251
252
crtc_state->encoder_mask |=
253
drm_encoder_mask(encoder);
254
}
255
}
256
257
conn_state->best_encoder = encoder;
258
}
259
260
static void
261
steal_encoder(struct drm_atomic_state *state,
262
struct drm_encoder *encoder)
263
{
264
struct drm_crtc_state *crtc_state;
265
struct drm_connector *connector;
266
struct drm_connector_state *old_connector_state, *new_connector_state;
267
int i;
268
269
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
270
struct drm_crtc *encoder_crtc;
271
272
if (new_connector_state->best_encoder != encoder)
273
continue;
274
275
encoder_crtc = old_connector_state->crtc;
276
277
drm_dbg_atomic(encoder->dev,
278
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
279
encoder->base.id, encoder->name,
280
encoder_crtc->base.id, encoder_crtc->name);
281
282
set_best_encoder(state, new_connector_state, NULL);
283
284
crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
285
crtc_state->connectors_changed = true;
286
287
return;
288
}
289
}
290
291
static int
292
update_connector_routing(struct drm_atomic_state *state,
293
struct drm_connector *connector,
294
struct drm_connector_state *old_connector_state,
295
struct drm_connector_state *new_connector_state,
296
bool added_by_user)
297
{
298
const struct drm_connector_helper_funcs *funcs;
299
struct drm_encoder *new_encoder;
300
struct drm_crtc_state *crtc_state;
301
302
drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
303
connector->base.id, connector->name);
304
305
if (old_connector_state->crtc != new_connector_state->crtc) {
306
if (old_connector_state->crtc) {
307
crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
308
crtc_state->connectors_changed = true;
309
}
310
311
if (new_connector_state->crtc) {
312
crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
313
crtc_state->connectors_changed = true;
314
}
315
}
316
317
if (!new_connector_state->crtc) {
318
drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
319
connector->base.id, connector->name);
320
321
set_best_encoder(state, new_connector_state, NULL);
322
323
return 0;
324
}
325
326
crtc_state = drm_atomic_get_new_crtc_state(state,
327
new_connector_state->crtc);
328
/*
329
* For compatibility with legacy users, we want to make sure that
330
* we allow DPMS On->Off modesets on unregistered connectors. Modesets
331
* which would result in anything else must be considered invalid, to
332
* avoid turning on new displays on dead connectors.
333
*
334
* Since the connector can be unregistered at any point during an
335
* atomic check or commit, this is racy. But that's OK: all we care
336
* about is ensuring that userspace can't do anything but shut off the
337
* display on a connector that was destroyed after it's been notified,
338
* not before.
339
*
340
* Additionally, we also want to ignore connector registration when
341
* we're trying to restore an atomic state during system resume since
342
* there's a chance the connector may have been destroyed during the
343
* process, but it's better to ignore that then cause
344
* drm_atomic_helper_resume() to fail.
345
*
346
* Last, we want to ignore connector registration when the connector
347
* was not pulled in the atomic state by user-space (ie, was pulled
348
* in by the driver, e.g. when updating a DP-MST stream).
349
*/
350
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
351
added_by_user && crtc_state->active) {
352
drm_dbg_atomic(connector->dev,
353
"[CONNECTOR:%d:%s] is not registered\n",
354
connector->base.id, connector->name);
355
return -EINVAL;
356
}
357
358
funcs = connector->helper_private;
359
360
if (funcs->atomic_best_encoder)
361
new_encoder = funcs->atomic_best_encoder(connector, state);
362
else if (funcs->best_encoder)
363
new_encoder = funcs->best_encoder(connector);
364
else
365
new_encoder = drm_connector_get_single_encoder(connector);
366
367
if (!new_encoder) {
368
drm_dbg_atomic(connector->dev,
369
"No suitable encoder found for [CONNECTOR:%d:%s]\n",
370
connector->base.id, connector->name);
371
return -EINVAL;
372
}
373
374
if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
375
drm_dbg_atomic(connector->dev,
376
"[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
377
new_encoder->base.id,
378
new_encoder->name,
379
new_connector_state->crtc->base.id,
380
new_connector_state->crtc->name);
381
return -EINVAL;
382
}
383
384
if (new_encoder == new_connector_state->best_encoder) {
385
set_best_encoder(state, new_connector_state, new_encoder);
386
387
drm_dbg_atomic(connector->dev,
388
"[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
389
connector->base.id,
390
connector->name,
391
new_encoder->base.id,
392
new_encoder->name,
393
new_connector_state->crtc->base.id,
394
new_connector_state->crtc->name);
395
396
return 0;
397
}
398
399
steal_encoder(state, new_encoder);
400
401
set_best_encoder(state, new_connector_state, new_encoder);
402
403
crtc_state->connectors_changed = true;
404
405
drm_dbg_atomic(connector->dev,
406
"[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
407
connector->base.id,
408
connector->name,
409
new_encoder->base.id,
410
new_encoder->name,
411
new_connector_state->crtc->base.id,
412
new_connector_state->crtc->name);
413
414
return 0;
415
}
416
417
static int
418
mode_fixup(struct drm_atomic_state *state)
419
{
420
struct drm_crtc *crtc;
421
struct drm_crtc_state *new_crtc_state;
422
struct drm_connector *connector;
423
struct drm_connector_state *new_conn_state;
424
int i;
425
int ret;
426
427
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
428
if (!new_crtc_state->mode_changed &&
429
!new_crtc_state->connectors_changed)
430
continue;
431
432
drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
433
}
434
435
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
436
const struct drm_encoder_helper_funcs *funcs;
437
struct drm_encoder *encoder;
438
struct drm_bridge *bridge;
439
440
WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
441
442
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
443
continue;
444
445
new_crtc_state =
446
drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
447
448
/*
449
* Each encoder has at most one connector (since we always steal
450
* it away), so we won't call ->mode_fixup twice.
451
*/
452
encoder = new_conn_state->best_encoder;
453
funcs = encoder->helper_private;
454
455
bridge = drm_bridge_chain_get_first_bridge(encoder);
456
ret = drm_atomic_bridge_chain_check(bridge,
457
new_crtc_state,
458
new_conn_state);
459
drm_bridge_put(bridge);
460
if (ret) {
461
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
462
return ret;
463
}
464
465
if (funcs && funcs->atomic_check) {
466
ret = funcs->atomic_check(encoder, new_crtc_state,
467
new_conn_state);
468
if (ret) {
469
drm_dbg_atomic(encoder->dev,
470
"[ENCODER:%d:%s] check failed\n",
471
encoder->base.id, encoder->name);
472
return ret;
473
}
474
} else if (funcs && funcs->mode_fixup) {
475
ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
476
&new_crtc_state->adjusted_mode);
477
if (!ret) {
478
drm_dbg_atomic(encoder->dev,
479
"[ENCODER:%d:%s] fixup failed\n",
480
encoder->base.id, encoder->name);
481
return -EINVAL;
482
}
483
}
484
}
485
486
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
487
const struct drm_crtc_helper_funcs *funcs;
488
489
if (!new_crtc_state->enable)
490
continue;
491
492
if (!new_crtc_state->mode_changed &&
493
!new_crtc_state->connectors_changed)
494
continue;
495
496
funcs = crtc->helper_private;
497
if (!funcs || !funcs->mode_fixup)
498
continue;
499
500
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
501
&new_crtc_state->adjusted_mode);
502
if (!ret) {
503
drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
504
crtc->base.id, crtc->name);
505
return -EINVAL;
506
}
507
}
508
509
return 0;
510
}
511
512
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
513
struct drm_encoder *encoder,
514
struct drm_crtc *crtc,
515
const struct drm_display_mode *mode)
516
{
517
struct drm_bridge *bridge;
518
enum drm_mode_status ret;
519
520
ret = drm_encoder_mode_valid(encoder, mode);
521
if (ret != MODE_OK) {
522
drm_dbg_atomic(encoder->dev,
523
"[ENCODER:%d:%s] mode_valid() failed\n",
524
encoder->base.id, encoder->name);
525
return ret;
526
}
527
528
bridge = drm_bridge_chain_get_first_bridge(encoder);
529
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
530
mode);
531
drm_bridge_put(bridge);
532
if (ret != MODE_OK) {
533
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
534
return ret;
535
}
536
537
ret = drm_crtc_mode_valid(crtc, mode);
538
if (ret != MODE_OK) {
539
drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
540
crtc->base.id, crtc->name);
541
return ret;
542
}
543
544
return ret;
545
}
546
547
static int
548
mode_valid(struct drm_atomic_state *state)
549
{
550
struct drm_connector_state *conn_state;
551
struct drm_connector *connector;
552
int i;
553
554
for_each_new_connector_in_state(state, connector, conn_state, i) {
555
struct drm_encoder *encoder = conn_state->best_encoder;
556
struct drm_crtc *crtc = conn_state->crtc;
557
struct drm_crtc_state *crtc_state;
558
enum drm_mode_status mode_status;
559
const struct drm_display_mode *mode;
560
561
if (!crtc || !encoder)
562
continue;
563
564
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
565
if (!crtc_state)
566
continue;
567
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
568
continue;
569
570
mode = &crtc_state->mode;
571
572
mode_status = mode_valid_path(connector, encoder, crtc, mode);
573
if (mode_status != MODE_OK)
574
return -EINVAL;
575
}
576
577
return 0;
578
}
579
580
static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
581
struct drm_crtc *crtc)
582
{
583
struct drm_encoder *drm_enc;
584
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
585
crtc);
586
587
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
588
if (!drm_enc->possible_clones) {
589
DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
590
continue;
591
}
592
593
if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
594
crtc_state->encoder_mask) {
595
DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
596
crtc->base.id, crtc_state->encoder_mask);
597
return -EINVAL;
598
}
599
}
600
601
return 0;
602
}
603
604
/**
605
* drm_atomic_helper_check_modeset - validate state object for modeset changes
606
* @dev: DRM device
607
* @state: the driver state object
608
*
609
* Check the state object to see if the requested state is physically possible.
610
* This does all the CRTC and connector related computations for an atomic
611
* update and adds any additional connectors needed for full modesets. It calls
612
* the various per-object callbacks in the follow order:
613
*
614
* 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
615
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
616
* 3. If it's determined a modeset is needed then all connectors on the affected
617
* CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
618
* 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
619
* &drm_crtc_helper_funcs.mode_valid are called on the affected components.
620
* 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
621
* 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
622
* This function is only called when the encoder will be part of a configured CRTC,
623
* it must not be used for implementing connector property validation.
624
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
625
* instead.
626
* 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
627
*
628
* &drm_crtc_state.mode_changed is set when the input mode is changed.
629
* &drm_crtc_state.connectors_changed is set when a connector is added or
630
* removed from the CRTC. &drm_crtc_state.active_changed is set when
631
* &drm_crtc_state.active changes, which is used for DPMS.
632
* &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
633
* See also: drm_atomic_crtc_needs_modeset()
634
*
635
* IMPORTANT:
636
*
637
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
638
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
639
* without a full modeset) _must_ call this function after that change. It is
640
* permitted to call this function multiple times for the same update, e.g.
641
* when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
642
* adjusted dotclock for fifo space allocation and watermark computation.
643
*
644
* RETURNS:
645
* Zero for success or -errno
646
*/
647
int
648
drm_atomic_helper_check_modeset(struct drm_device *dev,
649
struct drm_atomic_state *state)
650
{
651
struct drm_crtc *crtc;
652
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
653
struct drm_connector *connector;
654
struct drm_connector_state *old_connector_state, *new_connector_state;
655
int i, ret;
656
unsigned int connectors_mask = 0, user_connectors_mask = 0;
657
658
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
659
user_connectors_mask |= BIT(i);
660
661
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
662
bool has_connectors =
663
!!new_crtc_state->connector_mask;
664
665
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
666
667
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
668
drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
669
crtc->base.id, crtc->name);
670
new_crtc_state->mode_changed = true;
671
}
672
673
if (old_crtc_state->enable != new_crtc_state->enable) {
674
drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
675
crtc->base.id, crtc->name);
676
677
/*
678
* For clarity this assignment is done here, but
679
* enable == 0 is only true when there are no
680
* connectors and a NULL mode.
681
*
682
* The other way around is true as well. enable != 0
683
* implies that connectors are attached and a mode is set.
684
*/
685
new_crtc_state->mode_changed = true;
686
new_crtc_state->connectors_changed = true;
687
}
688
689
if (old_crtc_state->active != new_crtc_state->active) {
690
drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
691
crtc->base.id, crtc->name);
692
new_crtc_state->active_changed = true;
693
}
694
695
if (new_crtc_state->enable != has_connectors) {
696
drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
697
crtc->base.id, crtc->name,
698
new_crtc_state->enable, has_connectors);
699
700
return -EINVAL;
701
}
702
703
if (drm_dev_has_vblank(dev))
704
new_crtc_state->no_vblank = false;
705
else
706
new_crtc_state->no_vblank = true;
707
}
708
709
ret = handle_conflicting_encoders(state, false);
710
if (ret)
711
return ret;
712
713
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
714
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
715
716
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
717
718
/*
719
* This only sets crtc->connectors_changed for routing changes,
720
* drivers must set crtc->connectors_changed themselves when
721
* connector properties need to be updated.
722
*/
723
ret = update_connector_routing(state, connector,
724
old_connector_state,
725
new_connector_state,
726
BIT(i) & user_connectors_mask);
727
if (ret)
728
return ret;
729
if (old_connector_state->crtc) {
730
new_crtc_state = drm_atomic_get_new_crtc_state(state,
731
old_connector_state->crtc);
732
if (old_connector_state->link_status !=
733
new_connector_state->link_status)
734
new_crtc_state->connectors_changed = true;
735
736
if (old_connector_state->max_requested_bpc !=
737
new_connector_state->max_requested_bpc)
738
new_crtc_state->connectors_changed = true;
739
}
740
741
if (funcs->atomic_check)
742
ret = funcs->atomic_check(connector, state);
743
if (ret) {
744
drm_dbg_atomic(dev,
745
"[CONNECTOR:%d:%s] driver check failed\n",
746
connector->base.id, connector->name);
747
return ret;
748
}
749
750
connectors_mask |= BIT(i);
751
}
752
753
/*
754
* After all the routing has been prepared we need to add in any
755
* connector which is itself unchanged, but whose CRTC changes its
756
* configuration. This must be done before calling mode_fixup in case a
757
* crtc only changed its mode but has the same set of connectors.
758
*/
759
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
760
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
761
continue;
762
763
drm_dbg_atomic(dev,
764
"[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
765
crtc->base.id, crtc->name,
766
new_crtc_state->enable ? 'y' : 'n',
767
new_crtc_state->active ? 'y' : 'n');
768
769
ret = drm_atomic_add_affected_connectors(state, crtc);
770
if (ret != 0)
771
return ret;
772
773
ret = drm_atomic_add_affected_planes(state, crtc);
774
if (ret != 0)
775
return ret;
776
777
ret = drm_atomic_check_valid_clones(state, crtc);
778
if (ret != 0)
779
return ret;
780
}
781
782
/*
783
* Iterate over all connectors again, to make sure atomic_check()
784
* has been called on them when a modeset is forced.
785
*/
786
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
787
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
788
789
if (connectors_mask & BIT(i))
790
continue;
791
792
if (funcs->atomic_check)
793
ret = funcs->atomic_check(connector, state);
794
if (ret) {
795
drm_dbg_atomic(dev,
796
"[CONNECTOR:%d:%s] driver check failed\n",
797
connector->base.id, connector->name);
798
return ret;
799
}
800
}
801
802
/*
803
* Iterate over all connectors again, and add all affected bridges to
804
* the state.
805
*/
806
for_each_oldnew_connector_in_state(state, connector,
807
old_connector_state,
808
new_connector_state, i) {
809
struct drm_encoder *encoder;
810
811
encoder = old_connector_state->best_encoder;
812
ret = drm_atomic_add_encoder_bridges(state, encoder);
813
if (ret)
814
return ret;
815
816
encoder = new_connector_state->best_encoder;
817
ret = drm_atomic_add_encoder_bridges(state, encoder);
818
if (ret)
819
return ret;
820
}
821
822
ret = mode_valid(state);
823
if (ret)
824
return ret;
825
826
return mode_fixup(state);
827
}
828
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
829
830
/**
831
* drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
832
* @connector: corresponding connector
833
* @state: the driver state object
834
*
835
* Checks if the writeback connector state is valid, and returns an error if it
836
* isn't.
837
*
838
* RETURNS:
839
* Zero for success or -errno
840
*/
841
int
842
drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
843
struct drm_atomic_state *state)
844
{
845
struct drm_connector_state *conn_state =
846
drm_atomic_get_new_connector_state(state, connector);
847
struct drm_writeback_job *wb_job = conn_state->writeback_job;
848
struct drm_property_blob *pixel_format_blob;
849
struct drm_framebuffer *fb;
850
size_t i, nformats;
851
u32 *formats;
852
853
if (!wb_job || !wb_job->fb)
854
return 0;
855
856
pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
857
nformats = pixel_format_blob->length / sizeof(u32);
858
formats = pixel_format_blob->data;
859
fb = wb_job->fb;
860
861
for (i = 0; i < nformats; i++)
862
if (fb->format->format == formats[i])
863
return 0;
864
865
drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
866
867
return -EINVAL;
868
}
869
EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
870
871
/**
872
* drm_atomic_helper_check_plane_state() - Check plane state for validity
873
* @plane_state: plane state to check
874
* @crtc_state: CRTC state to check
875
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
876
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
877
* @can_position: is it legal to position the plane such that it
878
* doesn't cover the entire CRTC? This will generally
879
* only be false for primary planes.
880
* @can_update_disabled: can the plane be updated while the CRTC
881
* is disabled?
882
*
883
* Checks that a desired plane update is valid, and updates various
884
* bits of derived state (clipped coordinates etc.). Drivers that provide
885
* their own plane handling rather than helper-provided implementations may
886
* still wish to call this function to avoid duplication of error checking
887
* code.
888
*
889
* RETURNS:
890
* Zero if update appears valid, error code on failure
891
*/
892
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
893
const struct drm_crtc_state *crtc_state,
894
int min_scale,
895
int max_scale,
896
bool can_position,
897
bool can_update_disabled)
898
{
899
struct drm_framebuffer *fb = plane_state->fb;
900
struct drm_rect *src = &plane_state->src;
901
struct drm_rect *dst = &plane_state->dst;
902
unsigned int rotation = plane_state->rotation;
903
struct drm_rect clip = {};
904
int hscale, vscale;
905
906
WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
907
908
*src = drm_plane_state_src(plane_state);
909
*dst = drm_plane_state_dest(plane_state);
910
911
if (!fb) {
912
plane_state->visible = false;
913
return 0;
914
}
915
916
/* crtc should only be NULL when disabling (i.e., !fb) */
917
if (WARN_ON(!plane_state->crtc)) {
918
plane_state->visible = false;
919
return 0;
920
}
921
922
if (!crtc_state->enable && !can_update_disabled) {
923
drm_dbg_kms(plane_state->plane->dev,
924
"Cannot update plane of a disabled CRTC.\n");
925
return -EINVAL;
926
}
927
928
drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
929
930
/* Check scaling */
931
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
932
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
933
if (hscale < 0 || vscale < 0) {
934
drm_dbg_kms(plane_state->plane->dev,
935
"Invalid scaling of plane\n");
936
drm_rect_debug_print("src: ", &plane_state->src, true);
937
drm_rect_debug_print("dst: ", &plane_state->dst, false);
938
return -ERANGE;
939
}
940
941
if (crtc_state->enable)
942
drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
943
944
plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
945
946
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
947
948
if (!plane_state->visible)
949
/*
950
* Plane isn't visible; some drivers can handle this
951
* so we just return success here. Drivers that can't
952
* (including those that use the primary plane helper's
953
* update function) will return an error from their
954
* update_plane handler.
955
*/
956
return 0;
957
958
if (!can_position && !drm_rect_equals(dst, &clip)) {
959
drm_dbg_kms(plane_state->plane->dev,
960
"Plane must cover entire CRTC\n");
961
drm_rect_debug_print("dst: ", dst, false);
962
drm_rect_debug_print("clip: ", &clip, false);
963
return -EINVAL;
964
}
965
966
return 0;
967
}
968
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
969
970
/**
971
* drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
972
* @crtc_state: CRTC state to check
973
*
974
* Checks that a CRTC has at least one primary plane attached to it, which is
975
* a requirement on some hardware. Note that this only involves the CRTC side
976
* of the test. To test if the primary plane is visible or if it can be updated
977
* without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
978
* the plane's atomic check.
979
*
980
* RETURNS:
981
* 0 if a primary plane is attached to the CRTC, or an error code otherwise
982
*/
983
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
984
{
985
struct drm_crtc *crtc = crtc_state->crtc;
986
struct drm_device *dev = crtc->dev;
987
struct drm_plane *plane;
988
989
/* needs at least one primary plane to be enabled */
990
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
991
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
992
return 0;
993
}
994
995
drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
996
997
return -EINVAL;
998
}
999
EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
1000
1001
/**
1002
* drm_atomic_helper_check_planes - validate state object for planes changes
1003
* @dev: DRM device
1004
* @state: the driver state object
1005
*
1006
* Check the state object to see if the requested state is physically possible.
1007
* This does all the plane update related checks using by calling into the
1008
* &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
1009
* hooks provided by the driver.
1010
*
1011
* It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
1012
* updated planes.
1013
*
1014
* RETURNS:
1015
* Zero for success or -errno
1016
*/
1017
int
1018
drm_atomic_helper_check_planes(struct drm_device *dev,
1019
struct drm_atomic_state *state)
1020
{
1021
struct drm_crtc *crtc;
1022
struct drm_crtc_state *new_crtc_state;
1023
struct drm_plane *plane;
1024
struct drm_plane_state *new_plane_state, *old_plane_state;
1025
int i, ret = 0;
1026
1027
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1028
const struct drm_plane_helper_funcs *funcs;
1029
1030
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1031
1032
funcs = plane->helper_private;
1033
1034
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1035
1036
drm_atomic_helper_check_plane_damage(state, new_plane_state);
1037
1038
if (!funcs || !funcs->atomic_check)
1039
continue;
1040
1041
ret = funcs->atomic_check(plane, state);
1042
if (ret) {
1043
drm_dbg_atomic(plane->dev,
1044
"[PLANE:%d:%s] atomic driver check failed\n",
1045
plane->base.id, plane->name);
1046
return ret;
1047
}
1048
}
1049
1050
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1051
const struct drm_crtc_helper_funcs *funcs;
1052
1053
funcs = crtc->helper_private;
1054
1055
if (!funcs || !funcs->atomic_check)
1056
continue;
1057
1058
ret = funcs->atomic_check(crtc, state);
1059
if (ret) {
1060
drm_dbg_atomic(crtc->dev,
1061
"[CRTC:%d:%s] atomic driver check failed\n",
1062
crtc->base.id, crtc->name);
1063
return ret;
1064
}
1065
}
1066
1067
return ret;
1068
}
1069
EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1070
1071
/**
1072
* drm_atomic_helper_check - validate state object
1073
* @dev: DRM device
1074
* @state: the driver state object
1075
*
1076
* Check the state object to see if the requested state is physically possible.
1077
* Only CRTCs and planes have check callbacks, so for any additional (global)
1078
* checking that a driver needs it can simply wrap that around this function.
1079
* Drivers without such needs can directly use this as their
1080
* &drm_mode_config_funcs.atomic_check callback.
1081
*
1082
* This just wraps the two parts of the state checking for planes and modeset
1083
* state in the default order: First it calls drm_atomic_helper_check_modeset()
1084
* and then drm_atomic_helper_check_planes(). The assumption is that the
1085
* @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1086
* functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1087
* watermarks.
1088
*
1089
* Note that zpos normalization will add all enable planes to the state which
1090
* might not desired for some drivers.
1091
* For example enable/disable of a cursor plane which have fixed zpos value
1092
* would trigger all other enabled planes to be forced to the state change.
1093
*
1094
* IMPORTANT:
1095
*
1096
* As this function calls drm_atomic_helper_check_modeset() internally, its
1097
* restrictions also apply:
1098
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
1099
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
1100
* without a full modeset) _must_ call drm_atomic_helper_check_modeset()
1101
* function again after that change.
1102
*
1103
* RETURNS:
1104
* Zero for success or -errno
1105
*/
1106
int drm_atomic_helper_check(struct drm_device *dev,
1107
struct drm_atomic_state *state)
1108
{
1109
int ret;
1110
1111
ret = drm_atomic_helper_check_modeset(dev, state);
1112
if (ret)
1113
return ret;
1114
1115
if (dev->mode_config.normalize_zpos) {
1116
ret = drm_atomic_normalize_zpos(dev, state);
1117
if (ret)
1118
return ret;
1119
}
1120
1121
ret = drm_atomic_helper_check_planes(dev, state);
1122
if (ret)
1123
return ret;
1124
1125
if (state->legacy_cursor_update)
1126
state->async_update = !drm_atomic_helper_async_check(dev, state);
1127
1128
drm_self_refresh_helper_alter_state(state);
1129
1130
return ret;
1131
}
1132
EXPORT_SYMBOL(drm_atomic_helper_check);
1133
1134
static bool
1135
crtc_needs_disable(struct drm_crtc_state *old_state,
1136
struct drm_crtc_state *new_state)
1137
{
1138
/*
1139
* No new_state means the CRTC is off, so the only criteria is whether
1140
* it's currently active or in self refresh mode.
1141
*/
1142
if (!new_state)
1143
return drm_atomic_crtc_effectively_active(old_state);
1144
1145
/*
1146
* We need to disable bridge(s) and CRTC if we're transitioning out of
1147
* self-refresh and changing CRTCs at the same time, because the
1148
* bridge tracks self-refresh status via CRTC state.
1149
*/
1150
if (old_state->self_refresh_active &&
1151
old_state->crtc != new_state->crtc)
1152
return true;
1153
1154
/*
1155
* We also need to run through the crtc_funcs->disable() function if
1156
* the CRTC is currently on, if it's transitioning to self refresh
1157
* mode, or if it's in self refresh mode and needs to be fully
1158
* disabled.
1159
*/
1160
return old_state->active ||
1161
(old_state->self_refresh_active && !new_state->active) ||
1162
new_state->self_refresh_active;
1163
}
1164
1165
/**
1166
* drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
1167
* @dev: DRM device
1168
* @state: the driver state object
1169
*
1170
* Loops over all connectors in the current state and if the CRTC needs
1171
* it, disables the bridge chain all the way, then disables the encoder
1172
* afterwards.
1173
*/
1174
void
1175
drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
1176
struct drm_atomic_state *state)
1177
{
1178
struct drm_connector *connector;
1179
struct drm_connector_state *old_conn_state, *new_conn_state;
1180
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1181
int i;
1182
1183
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1184
const struct drm_encoder_helper_funcs *funcs;
1185
struct drm_encoder *encoder;
1186
struct drm_bridge *bridge;
1187
1188
/*
1189
* Shut down everything that's in the changeset and currently
1190
* still on. So need to check the old, saved state.
1191
*/
1192
if (!old_conn_state->crtc)
1193
continue;
1194
1195
old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1196
1197
if (new_conn_state->crtc)
1198
new_crtc_state = drm_atomic_get_new_crtc_state(
1199
state,
1200
new_conn_state->crtc);
1201
else
1202
new_crtc_state = NULL;
1203
1204
if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1205
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1206
continue;
1207
1208
encoder = old_conn_state->best_encoder;
1209
1210
/* We shouldn't get this far if we didn't previously have
1211
* an encoder.. but WARN_ON() rather than explode.
1212
*/
1213
if (WARN_ON(!encoder))
1214
continue;
1215
1216
funcs = encoder->helper_private;
1217
1218
drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1219
encoder->base.id, encoder->name);
1220
1221
/*
1222
* Each encoder has at most one connector (since we always steal
1223
* it away), so we won't call disable hooks twice.
1224
*/
1225
bridge = drm_bridge_chain_get_first_bridge(encoder);
1226
drm_atomic_bridge_chain_disable(bridge, state);
1227
drm_bridge_put(bridge);
1228
1229
/* Right function depends upon target state. */
1230
if (funcs) {
1231
if (funcs->atomic_disable)
1232
funcs->atomic_disable(encoder, state);
1233
else if (new_conn_state->crtc && funcs->prepare)
1234
funcs->prepare(encoder);
1235
else if (funcs->disable)
1236
funcs->disable(encoder);
1237
else if (funcs->dpms)
1238
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1239
}
1240
}
1241
}
1242
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
1243
1244
/**
1245
* drm_atomic_helper_commit_crtc_disable - disable CRTSs
1246
* @dev: DRM device
1247
* @state: the driver state object
1248
*
1249
* Loops over all CRTCs in the current state and if the CRTC needs
1250
* it, disables it.
1251
*/
1252
void
1253
drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
1254
{
1255
struct drm_crtc *crtc;
1256
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1257
int i;
1258
1259
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1260
const struct drm_crtc_helper_funcs *funcs;
1261
int ret;
1262
1263
/* Shut down everything that needs a full modeset. */
1264
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1265
continue;
1266
1267
if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1268
continue;
1269
1270
funcs = crtc->helper_private;
1271
1272
drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1273
crtc->base.id, crtc->name);
1274
1275
1276
/* Right function depends upon target state. */
1277
if (new_crtc_state->enable && funcs->prepare)
1278
funcs->prepare(crtc);
1279
else if (funcs->atomic_disable)
1280
funcs->atomic_disable(crtc, state);
1281
else if (funcs->disable)
1282
funcs->disable(crtc);
1283
else if (funcs->dpms)
1284
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1285
1286
if (!drm_dev_has_vblank(dev))
1287
continue;
1288
1289
ret = drm_crtc_vblank_get(crtc);
1290
/*
1291
* Self-refresh is not a true "disable"; ensure vblank remains
1292
* enabled.
1293
*/
1294
if (new_crtc_state->self_refresh_active)
1295
WARN_ONCE(ret != 0,
1296
"driver disabled vblank in self-refresh\n");
1297
else
1298
WARN_ONCE(ret != -EINVAL,
1299
"driver forgot to call drm_crtc_vblank_off()\n");
1300
if (ret == 0)
1301
drm_crtc_vblank_put(crtc);
1302
}
1303
}
1304
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
1305
1306
/**
1307
* drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
1308
* @dev: DRM device
1309
* @state: the driver state object
1310
*
1311
* Loops over all connectors in the current state and if the CRTC needs
1312
* it, post-disables all encoder bridges.
1313
*/
1314
void
1315
drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
1316
{
1317
struct drm_connector *connector;
1318
struct drm_connector_state *old_conn_state, *new_conn_state;
1319
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1320
int i;
1321
1322
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1323
struct drm_encoder *encoder;
1324
struct drm_bridge *bridge;
1325
1326
/*
1327
* Shut down everything that's in the changeset and currently
1328
* still on. So need to check the old, saved state.
1329
*/
1330
if (!old_conn_state->crtc)
1331
continue;
1332
1333
old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1334
1335
if (new_conn_state->crtc)
1336
new_crtc_state = drm_atomic_get_new_crtc_state(state,
1337
new_conn_state->crtc);
1338
else
1339
new_crtc_state = NULL;
1340
1341
if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1342
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1343
continue;
1344
1345
encoder = old_conn_state->best_encoder;
1346
1347
/*
1348
* We shouldn't get this far if we didn't previously have
1349
* an encoder.. but WARN_ON() rather than explode.
1350
*/
1351
if (WARN_ON(!encoder))
1352
continue;
1353
1354
drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
1355
encoder->base.id, encoder->name);
1356
1357
/*
1358
* Each encoder has at most one connector (since we always steal
1359
* it away), so we won't call disable hooks twice.
1360
*/
1361
bridge = drm_bridge_chain_get_first_bridge(encoder);
1362
drm_atomic_bridge_chain_post_disable(bridge, state);
1363
drm_bridge_put(bridge);
1364
}
1365
}
1366
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
1367
1368
static void
1369
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
1370
{
1371
drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
1372
1373
drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
1374
1375
drm_atomic_helper_commit_crtc_disable(dev, state);
1376
}
1377
1378
/**
1379
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1380
* @dev: DRM device
1381
* @state: atomic state object being committed
1382
*
1383
* This function updates all the various legacy modeset state pointers in
1384
* connectors, encoders and CRTCs.
1385
*
1386
* Drivers can use this for building their own atomic commit if they don't have
1387
* a pure helper-based modeset implementation.
1388
*
1389
* Since these updates are not synchronized with lockings, only code paths
1390
* called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1391
* legacy state filled out by this helper. Defacto this means this helper and
1392
* the legacy state pointers are only really useful for transitioning an
1393
* existing driver to the atomic world.
1394
*/
1395
void
1396
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1397
struct drm_atomic_state *state)
1398
{
1399
struct drm_connector *connector;
1400
struct drm_connector_state *old_conn_state, *new_conn_state;
1401
struct drm_crtc *crtc;
1402
struct drm_crtc_state *new_crtc_state;
1403
int i;
1404
1405
/* clear out existing links and update dpms */
1406
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1407
if (connector->encoder) {
1408
WARN_ON(!connector->encoder->crtc);
1409
1410
connector->encoder->crtc = NULL;
1411
connector->encoder = NULL;
1412
}
1413
1414
crtc = new_conn_state->crtc;
1415
if ((!crtc && old_conn_state->crtc) ||
1416
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1417
int mode = DRM_MODE_DPMS_OFF;
1418
1419
if (crtc && crtc->state->active)
1420
mode = DRM_MODE_DPMS_ON;
1421
1422
connector->dpms = mode;
1423
}
1424
}
1425
1426
/* set new links */
1427
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1428
if (!new_conn_state->crtc)
1429
continue;
1430
1431
if (WARN_ON(!new_conn_state->best_encoder))
1432
continue;
1433
1434
connector->encoder = new_conn_state->best_encoder;
1435
connector->encoder->crtc = new_conn_state->crtc;
1436
}
1437
1438
/* set legacy state in the crtc structure */
1439
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1440
struct drm_plane *primary = crtc->primary;
1441
struct drm_plane_state *new_plane_state;
1442
1443
crtc->mode = new_crtc_state->mode;
1444
crtc->enabled = new_crtc_state->enable;
1445
1446
new_plane_state =
1447
drm_atomic_get_new_plane_state(state, primary);
1448
1449
if (new_plane_state && new_plane_state->crtc == crtc) {
1450
crtc->x = new_plane_state->src_x >> 16;
1451
crtc->y = new_plane_state->src_y >> 16;
1452
}
1453
}
1454
}
1455
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1456
1457
/**
1458
* drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1459
* @state: atomic state object
1460
*
1461
* Updates the timestamping constants used for precise vblank timestamps
1462
* by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1463
*/
1464
void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1465
{
1466
struct drm_crtc_state *new_crtc_state;
1467
struct drm_crtc *crtc;
1468
int i;
1469
1470
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1471
if (new_crtc_state->enable)
1472
drm_calc_timestamping_constants(crtc,
1473
&new_crtc_state->adjusted_mode);
1474
}
1475
}
1476
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1477
1478
/**
1479
* drm_atomic_helper_commit_crtc_set_mode - set the new mode
1480
* @dev: DRM device
1481
* @state: the driver state object
1482
*
1483
* Loops over all connectors in the current state and if the mode has
1484
* changed, change the mode of the CRTC, then call down the bridge
1485
* chain and change the mode in all bridges as well.
1486
*/
1487
void
1488
drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
1489
{
1490
struct drm_crtc *crtc;
1491
struct drm_crtc_state *new_crtc_state;
1492
struct drm_connector *connector;
1493
struct drm_connector_state *new_conn_state;
1494
int i;
1495
1496
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1497
const struct drm_crtc_helper_funcs *funcs;
1498
1499
if (!new_crtc_state->mode_changed)
1500
continue;
1501
1502
funcs = crtc->helper_private;
1503
1504
if (new_crtc_state->enable && funcs->mode_set_nofb) {
1505
drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1506
crtc->base.id, crtc->name);
1507
1508
funcs->mode_set_nofb(crtc);
1509
}
1510
}
1511
1512
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1513
const struct drm_encoder_helper_funcs *funcs;
1514
struct drm_encoder *encoder;
1515
struct drm_display_mode *mode, *adjusted_mode;
1516
struct drm_bridge *bridge;
1517
1518
if (!new_conn_state->best_encoder)
1519
continue;
1520
1521
encoder = new_conn_state->best_encoder;
1522
funcs = encoder->helper_private;
1523
new_crtc_state = new_conn_state->crtc->state;
1524
mode = &new_crtc_state->mode;
1525
adjusted_mode = &new_crtc_state->adjusted_mode;
1526
1527
if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
1528
continue;
1529
1530
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1531
encoder->base.id, encoder->name);
1532
1533
/*
1534
* Each encoder has at most one connector (since we always steal
1535
* it away), so we won't call mode_set hooks twice.
1536
*/
1537
if (funcs && funcs->atomic_mode_set) {
1538
funcs->atomic_mode_set(encoder, new_crtc_state,
1539
new_conn_state);
1540
} else if (funcs && funcs->mode_set) {
1541
funcs->mode_set(encoder, mode, adjusted_mode);
1542
}
1543
1544
bridge = drm_bridge_chain_get_first_bridge(encoder);
1545
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1546
drm_bridge_put(bridge);
1547
}
1548
}
1549
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
1550
1551
/**
1552
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1553
* @dev: DRM device
1554
* @state: atomic state object being committed
1555
*
1556
* This function shuts down all the outputs that need to be shut down and
1557
* prepares them (if required) with the new mode.
1558
*
1559
* For compatibility with legacy CRTC helpers this should be called before
1560
* drm_atomic_helper_commit_planes(), which is what the default commit function
1561
* does. But drivers with different needs can group the modeset commits together
1562
* and do the plane commits at the end. This is useful for drivers doing runtime
1563
* PM since planes updates then only happen when the CRTC is actually enabled.
1564
*/
1565
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1566
struct drm_atomic_state *state)
1567
{
1568
disable_outputs(dev, state);
1569
1570
drm_atomic_helper_update_legacy_modeset_state(dev, state);
1571
drm_atomic_helper_calc_timestamping_constants(state);
1572
1573
drm_atomic_helper_commit_crtc_set_mode(dev, state);
1574
}
1575
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1576
1577
/**
1578
* drm_atomic_helper_commit_writebacks - issue writebacks
1579
* @dev: DRM device
1580
* @state: atomic state object being committed
1581
*
1582
* This loops over the connectors, checks if the new state requires
1583
* a writeback job to be issued and in that case issues an atomic
1584
* commit on each connector.
1585
*/
1586
void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1587
struct drm_atomic_state *state)
1588
{
1589
struct drm_connector *connector;
1590
struct drm_connector_state *new_conn_state;
1591
int i;
1592
1593
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1594
const struct drm_connector_helper_funcs *funcs;
1595
1596
funcs = connector->helper_private;
1597
if (!funcs->atomic_commit)
1598
continue;
1599
1600
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1601
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1602
funcs->atomic_commit(connector, state);
1603
}
1604
}
1605
}
1606
EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
1607
1608
/**
1609
* drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
1610
* @dev: DRM device
1611
* @state: atomic state object being committed
1612
*
1613
* This loops over the connectors and if the CRTC needs it, pre-enables
1614
* the entire bridge chain.
1615
*/
1616
void
1617
drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
1618
{
1619
struct drm_connector *connector;
1620
struct drm_connector_state *new_conn_state;
1621
int i;
1622
1623
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1624
struct drm_encoder *encoder;
1625
struct drm_bridge *bridge;
1626
1627
if (!new_conn_state->best_encoder)
1628
continue;
1629
1630
if (!new_conn_state->crtc->state->active ||
1631
!drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1632
continue;
1633
1634
encoder = new_conn_state->best_encoder;
1635
1636
drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
1637
encoder->base.id, encoder->name);
1638
1639
/*
1640
* Each encoder has at most one connector (since we always steal
1641
* it away), so we won't call enable hooks twice.
1642
*/
1643
bridge = drm_bridge_chain_get_first_bridge(encoder);
1644
drm_atomic_bridge_chain_pre_enable(bridge, state);
1645
drm_bridge_put(bridge);
1646
}
1647
}
1648
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
1649
1650
/**
1651
* drm_atomic_helper_commit_crtc_enable - enables the CRTCs
1652
* @dev: DRM device
1653
* @state: atomic state object being committed
1654
*
1655
* This loops over CRTCs in the new state, and of the CRTC needs
1656
* it, enables it.
1657
*/
1658
void
1659
drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
1660
{
1661
struct drm_crtc *crtc;
1662
struct drm_crtc_state *old_crtc_state;
1663
struct drm_crtc_state *new_crtc_state;
1664
int i;
1665
1666
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1667
const struct drm_crtc_helper_funcs *funcs;
1668
1669
/* Need to filter out CRTCs where only planes change. */
1670
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1671
continue;
1672
1673
if (!new_crtc_state->active)
1674
continue;
1675
1676
funcs = crtc->helper_private;
1677
1678
if (new_crtc_state->enable) {
1679
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1680
crtc->base.id, crtc->name);
1681
if (funcs->atomic_enable)
1682
funcs->atomic_enable(crtc, state);
1683
else if (funcs->commit)
1684
funcs->commit(crtc);
1685
}
1686
}
1687
}
1688
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
1689
1690
/**
1691
* drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
1692
* @dev: DRM device
1693
* @state: atomic state object being committed
1694
*
1695
* This loops over all connectors in the new state, and of the CRTC needs
1696
* it, enables the entire bridge chain.
1697
*/
1698
void
1699
drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
1700
{
1701
struct drm_connector *connector;
1702
struct drm_connector_state *new_conn_state;
1703
int i;
1704
1705
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1706
const struct drm_encoder_helper_funcs *funcs;
1707
struct drm_encoder *encoder;
1708
struct drm_bridge *bridge;
1709
1710
if (!new_conn_state->best_encoder)
1711
continue;
1712
1713
if (!new_conn_state->crtc->state->active ||
1714
!drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1715
continue;
1716
1717
encoder = new_conn_state->best_encoder;
1718
funcs = encoder->helper_private;
1719
1720
drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1721
encoder->base.id, encoder->name);
1722
1723
/*
1724
* Each encoder has at most one connector (since we always steal
1725
* it away), so we won't call enable hooks twice.
1726
*/
1727
bridge = drm_bridge_chain_get_first_bridge(encoder);
1728
1729
if (funcs) {
1730
if (funcs->atomic_enable)
1731
funcs->atomic_enable(encoder, state);
1732
else if (funcs->enable)
1733
funcs->enable(encoder);
1734
else if (funcs->commit)
1735
funcs->commit(encoder);
1736
}
1737
1738
drm_atomic_bridge_chain_enable(bridge, state);
1739
drm_bridge_put(bridge);
1740
}
1741
}
1742
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
1743
1744
/**
1745
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1746
* @dev: DRM device
1747
* @state: atomic state object being committed
1748
*
1749
* This function enables all the outputs with the new configuration which had to
1750
* be turned off for the update.
1751
*
1752
* For compatibility with legacy CRTC helpers this should be called after
1753
* drm_atomic_helper_commit_planes(), which is what the default commit function
1754
* does. But drivers with different needs can group the modeset commits together
1755
* and do the plane commits at the end. This is useful for drivers doing runtime
1756
* PM since planes updates then only happen when the CRTC is actually enabled.
1757
*/
1758
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1759
struct drm_atomic_state *state)
1760
{
1761
drm_atomic_helper_commit_crtc_enable(dev, state);
1762
1763
drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
1764
1765
drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
1766
1767
drm_atomic_helper_commit_writebacks(dev, state);
1768
}
1769
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1770
1771
/*
1772
* For atomic updates which touch just a single CRTC, calculate the time of the
1773
* next vblank, and inform all the fences of the deadline.
1774
*/
1775
static void set_fence_deadline(struct drm_device *dev,
1776
struct drm_atomic_state *state)
1777
{
1778
struct drm_crtc *crtc;
1779
struct drm_crtc_state *new_crtc_state;
1780
struct drm_plane *plane;
1781
struct drm_plane_state *new_plane_state;
1782
ktime_t vbltime = 0;
1783
int i;
1784
1785
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
1786
ktime_t v;
1787
1788
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
1789
continue;
1790
1791
if (!new_crtc_state->active)
1792
continue;
1793
1794
if (drm_crtc_next_vblank_start(crtc, &v))
1795
continue;
1796
1797
if (!vbltime || ktime_before(v, vbltime))
1798
vbltime = v;
1799
}
1800
1801
/* If no CRTCs updated, then nothing to do: */
1802
if (!vbltime)
1803
return;
1804
1805
for_each_new_plane_in_state (state, plane, new_plane_state, i) {
1806
if (!new_plane_state->fence)
1807
continue;
1808
dma_fence_set_deadline(new_plane_state->fence, vbltime);
1809
}
1810
}
1811
1812
/**
1813
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1814
* @dev: DRM device
1815
* @state: atomic state object with old state structures
1816
* @pre_swap: If true, do an interruptible wait, and @state is the new state.
1817
* Otherwise @state is the old state.
1818
*
1819
* For implicit sync, driver should fish the exclusive fence out from the
1820
* incoming fb's and stash it in the drm_plane_state. This is called after
1821
* drm_atomic_helper_swap_state() so it uses the current plane state (and
1822
* just uses the atomic state to find the changed planes)
1823
*
1824
* Note that @pre_swap is needed since the point where we block for fences moves
1825
* around depending upon whether an atomic commit is blocking or
1826
* non-blocking. For non-blocking commit all waiting needs to happen after
1827
* drm_atomic_helper_swap_state() is called, but for blocking commits we want
1828
* to wait **before** we do anything that can't be easily rolled back. That is
1829
* before we call drm_atomic_helper_swap_state().
1830
*
1831
* Returns zero if success or < 0 if dma_fence_wait() fails.
1832
*/
1833
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1834
struct drm_atomic_state *state,
1835
bool pre_swap)
1836
{
1837
struct drm_plane *plane;
1838
struct drm_plane_state *new_plane_state;
1839
int i, ret;
1840
1841
set_fence_deadline(dev, state);
1842
1843
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1844
if (!new_plane_state->fence)
1845
continue;
1846
1847
WARN_ON(!new_plane_state->fb);
1848
1849
/*
1850
* If waiting for fences pre-swap (ie: nonblock), userspace can
1851
* still interrupt the operation. Instead of blocking until the
1852
* timer expires, make the wait interruptible.
1853
*/
1854
ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1855
if (ret)
1856
return ret;
1857
1858
dma_fence_put(new_plane_state->fence);
1859
new_plane_state->fence = NULL;
1860
}
1861
1862
return 0;
1863
}
1864
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1865
1866
/**
1867
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1868
* @dev: DRM device
1869
* @state: atomic state object being committed
1870
*
1871
* Helper to, after atomic commit, wait for vblanks on all affected
1872
* CRTCs (ie. before cleaning up old framebuffers using
1873
* drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1874
* framebuffers have actually changed to optimize for the legacy cursor and
1875
* plane update use-case.
1876
*
1877
* Drivers using the nonblocking commit tracking support initialized by calling
1878
* drm_atomic_helper_setup_commit() should look at
1879
* drm_atomic_helper_wait_for_flip_done() as an alternative.
1880
*/
1881
void
1882
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1883
struct drm_atomic_state *state)
1884
{
1885
struct drm_crtc *crtc;
1886
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1887
int i, ret;
1888
unsigned int crtc_mask = 0;
1889
1890
/*
1891
* Legacy cursor ioctls are completely unsynced, and userspace
1892
* relies on that (by doing tons of cursor updates).
1893
*/
1894
if (state->legacy_cursor_update)
1895
return;
1896
1897
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1898
if (!new_crtc_state->active)
1899
continue;
1900
1901
ret = drm_crtc_vblank_get(crtc);
1902
if (ret != 0)
1903
continue;
1904
1905
crtc_mask |= drm_crtc_mask(crtc);
1906
state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1907
}
1908
1909
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
1910
wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
1911
1912
if (!(crtc_mask & drm_crtc_mask(crtc)))
1913
continue;
1914
1915
ret = wait_event_timeout(*queue,
1916
state->crtcs[i].last_vblank_count !=
1917
drm_crtc_vblank_count(crtc),
1918
msecs_to_jiffies(100));
1919
1920
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1921
crtc->base.id, crtc->name);
1922
1923
drm_crtc_vblank_put(crtc);
1924
}
1925
}
1926
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1927
1928
/**
1929
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1930
* @dev: DRM device
1931
* @state: atomic state object being committed
1932
*
1933
* Helper to, after atomic commit, wait for page flips on all affected
1934
* crtcs (ie. before cleaning up old framebuffers using
1935
* drm_atomic_helper_cleanup_planes()). Compared to
1936
* drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1937
* CRTCs, assuming that cursors-only updates are signalling their completion
1938
* immediately (or using a different path).
1939
*
1940
* This requires that drivers use the nonblocking commit tracking support
1941
* initialized using drm_atomic_helper_setup_commit().
1942
*/
1943
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1944
struct drm_atomic_state *state)
1945
{
1946
struct drm_crtc *crtc;
1947
int i;
1948
1949
for (i = 0; i < dev->mode_config.num_crtc; i++) {
1950
struct drm_crtc_commit *commit = state->crtcs[i].commit;
1951
int ret;
1952
1953
crtc = state->crtcs[i].ptr;
1954
1955
if (!crtc || !commit)
1956
continue;
1957
1958
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1959
if (ret == 0)
1960
drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1961
crtc->base.id, crtc->name);
1962
}
1963
1964
if (state->fake_commit)
1965
complete_all(&state->fake_commit->flip_done);
1966
}
1967
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1968
1969
/**
1970
* drm_atomic_helper_commit_tail - commit atomic update to hardware
1971
* @state: atomic state object being committed
1972
*
1973
* This is the default implementation for the
1974
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1975
* that do not support runtime_pm or do not need the CRTC to be
1976
* enabled to perform a commit. Otherwise, see
1977
* drm_atomic_helper_commit_tail_rpm().
1978
*
1979
* Note that the default ordering of how the various stages are called is to
1980
* match the legacy modeset helper library closest.
1981
*/
1982
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1983
{
1984
struct drm_device *dev = state->dev;
1985
1986
drm_atomic_helper_commit_modeset_disables(dev, state);
1987
1988
drm_atomic_helper_commit_planes(dev, state, 0);
1989
1990
drm_atomic_helper_commit_modeset_enables(dev, state);
1991
1992
drm_atomic_helper_fake_vblank(state);
1993
1994
drm_atomic_helper_commit_hw_done(state);
1995
1996
drm_atomic_helper_wait_for_vblanks(dev, state);
1997
1998
drm_atomic_helper_cleanup_planes(dev, state);
1999
}
2000
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
2001
2002
/**
2003
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
2004
* @state: new modeset state to be committed
2005
*
2006
* This is an alternative implementation for the
2007
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
2008
* that support runtime_pm or need the CRTC to be enabled to perform a
2009
* commit. Otherwise, one should use the default implementation
2010
* drm_atomic_helper_commit_tail().
2011
*/
2012
void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
2013
{
2014
struct drm_device *dev = state->dev;
2015
2016
drm_atomic_helper_commit_modeset_disables(dev, state);
2017
2018
drm_atomic_helper_commit_modeset_enables(dev, state);
2019
2020
drm_atomic_helper_commit_planes(dev, state,
2021
DRM_PLANE_COMMIT_ACTIVE_ONLY);
2022
2023
drm_atomic_helper_fake_vblank(state);
2024
2025
drm_atomic_helper_commit_hw_done(state);
2026
2027
drm_atomic_helper_wait_for_vblanks(dev, state);
2028
2029
drm_atomic_helper_cleanup_planes(dev, state);
2030
}
2031
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
2032
2033
static void commit_tail(struct drm_atomic_state *state)
2034
{
2035
struct drm_device *dev = state->dev;
2036
const struct drm_mode_config_helper_funcs *funcs;
2037
struct drm_crtc_state *new_crtc_state;
2038
struct drm_crtc *crtc;
2039
ktime_t start;
2040
s64 commit_time_ms;
2041
unsigned int i, new_self_refresh_mask = 0;
2042
2043
funcs = dev->mode_config.helper_private;
2044
2045
/*
2046
* We're measuring the _entire_ commit, so the time will vary depending
2047
* on how many fences and objects are involved. For the purposes of self
2048
* refresh, this is desirable since it'll give us an idea of how
2049
* congested things are. This will inform our decision on how often we
2050
* should enter self refresh after idle.
2051
*
2052
* These times will be averaged out in the self refresh helpers to avoid
2053
* overreacting over one outlier frame
2054
*/
2055
start = ktime_get();
2056
2057
drm_atomic_helper_wait_for_fences(dev, state, false);
2058
2059
drm_atomic_helper_wait_for_dependencies(state);
2060
2061
/*
2062
* We cannot safely access new_crtc_state after
2063
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
2064
* self-refresh active beforehand:
2065
*/
2066
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
2067
if (new_crtc_state->self_refresh_active)
2068
new_self_refresh_mask |= BIT(i);
2069
2070
if (funcs && funcs->atomic_commit_tail)
2071
funcs->atomic_commit_tail(state);
2072
else
2073
drm_atomic_helper_commit_tail(state);
2074
2075
commit_time_ms = ktime_ms_delta(ktime_get(), start);
2076
if (commit_time_ms > 0)
2077
drm_self_refresh_helper_update_avg_times(state,
2078
(unsigned long)commit_time_ms,
2079
new_self_refresh_mask);
2080
2081
drm_atomic_helper_commit_cleanup_done(state);
2082
2083
drm_atomic_state_put(state);
2084
}
2085
2086
static void commit_work(struct work_struct *work)
2087
{
2088
struct drm_atomic_state *state = container_of(work,
2089
struct drm_atomic_state,
2090
commit_work);
2091
commit_tail(state);
2092
}
2093
2094
/**
2095
* drm_atomic_helper_async_check - check if state can be committed asynchronously
2096
* @dev: DRM device
2097
* @state: the driver state object
2098
*
2099
* This helper will check if it is possible to commit the state asynchronously.
2100
* Async commits are not supposed to swap the states like normal sync commits
2101
* but just do in-place changes on the current state.
2102
*
2103
* It will return 0 if the commit can happen in an asynchronous fashion or error
2104
* if not. Note that error just mean it can't be committed asynchronously, if it
2105
* fails the commit should be treated like a normal synchronous commit.
2106
*/
2107
int drm_atomic_helper_async_check(struct drm_device *dev,
2108
struct drm_atomic_state *state)
2109
{
2110
struct drm_crtc *crtc;
2111
struct drm_crtc_state *crtc_state;
2112
struct drm_plane *plane = NULL;
2113
struct drm_plane_state *old_plane_state = NULL;
2114
struct drm_plane_state *new_plane_state = NULL;
2115
const struct drm_plane_helper_funcs *funcs;
2116
int i, ret, n_planes = 0;
2117
2118
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2119
if (drm_atomic_crtc_needs_modeset(crtc_state))
2120
return -EINVAL;
2121
}
2122
2123
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2124
n_planes++;
2125
2126
/* FIXME: we support only single plane updates for now */
2127
if (n_planes != 1) {
2128
drm_dbg_atomic(dev,
2129
"only single plane async updates are supported\n");
2130
return -EINVAL;
2131
}
2132
2133
if (!new_plane_state->crtc ||
2134
old_plane_state->crtc != new_plane_state->crtc) {
2135
drm_dbg_atomic(dev,
2136
"[PLANE:%d:%s] async update cannot change CRTC\n",
2137
plane->base.id, plane->name);
2138
return -EINVAL;
2139
}
2140
2141
funcs = plane->helper_private;
2142
if (!funcs->atomic_async_update) {
2143
drm_dbg_atomic(dev,
2144
"[PLANE:%d:%s] driver does not support async updates\n",
2145
plane->base.id, plane->name);
2146
return -EINVAL;
2147
}
2148
2149
if (new_plane_state->fence) {
2150
drm_dbg_atomic(dev,
2151
"[PLANE:%d:%s] missing fence for async update\n",
2152
plane->base.id, plane->name);
2153
return -EINVAL;
2154
}
2155
2156
/*
2157
* Don't do an async update if there is an outstanding commit modifying
2158
* the plane. This prevents our async update's changes from getting
2159
* overridden by a previous synchronous update's state.
2160
*/
2161
if (old_plane_state->commit &&
2162
!try_wait_for_completion(&old_plane_state->commit->hw_done)) {
2163
drm_dbg_atomic(dev,
2164
"[PLANE:%d:%s] inflight previous commit preventing async commit\n",
2165
plane->base.id, plane->name);
2166
return -EBUSY;
2167
}
2168
2169
ret = funcs->atomic_async_check(plane, state, false);
2170
if (ret != 0)
2171
drm_dbg_atomic(dev,
2172
"[PLANE:%d:%s] driver async check failed\n",
2173
plane->base.id, plane->name);
2174
return ret;
2175
}
2176
EXPORT_SYMBOL(drm_atomic_helper_async_check);
2177
2178
/**
2179
* drm_atomic_helper_async_commit - commit state asynchronously
2180
* @dev: DRM device
2181
* @state: the driver state object
2182
*
2183
* This function commits a state asynchronously, i.e., not vblank
2184
* synchronized. It should be used on a state only when
2185
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
2186
* the states like normal sync commits, but just do in-place changes on the
2187
* current state.
2188
*
2189
* TODO: Implement full swap instead of doing in-place changes.
2190
*/
2191
void drm_atomic_helper_async_commit(struct drm_device *dev,
2192
struct drm_atomic_state *state)
2193
{
2194
struct drm_plane *plane;
2195
struct drm_plane_state *plane_state;
2196
const struct drm_plane_helper_funcs *funcs;
2197
int i;
2198
2199
for_each_new_plane_in_state(state, plane, plane_state, i) {
2200
struct drm_framebuffer *new_fb = plane_state->fb;
2201
struct drm_framebuffer *old_fb = plane->state->fb;
2202
2203
funcs = plane->helper_private;
2204
funcs->atomic_async_update(plane, state);
2205
2206
/*
2207
* ->atomic_async_update() is supposed to update the
2208
* plane->state in-place, make sure at least common
2209
* properties have been properly updated.
2210
*/
2211
WARN_ON_ONCE(plane->state->fb != new_fb);
2212
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
2213
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
2214
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
2215
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
2216
2217
/*
2218
* Make sure the FBs have been swapped so that cleanups in the
2219
* new_state performs a cleanup in the old FB.
2220
*/
2221
WARN_ON_ONCE(plane_state->fb != old_fb);
2222
}
2223
}
2224
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
2225
2226
/**
2227
* drm_atomic_helper_commit - commit validated state object
2228
* @dev: DRM device
2229
* @state: the driver state object
2230
* @nonblock: whether nonblocking behavior is requested.
2231
*
2232
* This function commits a with drm_atomic_helper_check() pre-validated state
2233
* object. This can still fail when e.g. the framebuffer reservation fails. This
2234
* function implements nonblocking commits, using
2235
* drm_atomic_helper_setup_commit() and related functions.
2236
*
2237
* Committing the actual hardware state is done through the
2238
* &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2239
* implementation drm_atomic_helper_commit_tail().
2240
*
2241
* RETURNS:
2242
* Zero for success or -errno.
2243
*/
2244
int drm_atomic_helper_commit(struct drm_device *dev,
2245
struct drm_atomic_state *state,
2246
bool nonblock)
2247
{
2248
int ret;
2249
2250
if (state->async_update) {
2251
ret = drm_atomic_helper_prepare_planes(dev, state);
2252
if (ret)
2253
return ret;
2254
2255
drm_atomic_helper_async_commit(dev, state);
2256
drm_atomic_helper_unprepare_planes(dev, state);
2257
2258
return 0;
2259
}
2260
2261
ret = drm_atomic_helper_setup_commit(state, nonblock);
2262
if (ret)
2263
return ret;
2264
2265
INIT_WORK(&state->commit_work, commit_work);
2266
2267
ret = drm_atomic_helper_prepare_planes(dev, state);
2268
if (ret)
2269
return ret;
2270
2271
if (!nonblock) {
2272
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2273
if (ret)
2274
goto err;
2275
}
2276
2277
/*
2278
* This is the point of no return - everything below never fails except
2279
* when the hw goes bonghits. Which means we can commit the new state on
2280
* the software side now.
2281
*/
2282
2283
ret = drm_atomic_helper_swap_state(state, true);
2284
if (ret)
2285
goto err;
2286
2287
/*
2288
* Everything below can be run asynchronously without the need to grab
2289
* any modeset locks at all under one condition: It must be guaranteed
2290
* that the asynchronous work has either been cancelled (if the driver
2291
* supports it, which at least requires that the framebuffers get
2292
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2293
* before the new state gets committed on the software side with
2294
* drm_atomic_helper_swap_state().
2295
*
2296
* This scheme allows new atomic state updates to be prepared and
2297
* checked in parallel to the asynchronous completion of the previous
2298
* update. Which is important since compositors need to figure out the
2299
* composition of the next frame right after having submitted the
2300
* current layout.
2301
*
2302
* NOTE: Commit work has multiple phases, first hardware commit, then
2303
* cleanup. We want them to overlap, hence need system_unbound_wq to
2304
* make sure work items don't artificially stall on each another.
2305
*/
2306
2307
drm_atomic_state_get(state);
2308
if (nonblock)
2309
queue_work(system_unbound_wq, &state->commit_work);
2310
else
2311
commit_tail(state);
2312
2313
return 0;
2314
2315
err:
2316
drm_atomic_helper_unprepare_planes(dev, state);
2317
return ret;
2318
}
2319
EXPORT_SYMBOL(drm_atomic_helper_commit);
2320
2321
/**
2322
* DOC: implementing nonblocking commit
2323
*
2324
* Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2325
* different operations against each another. Locks, especially struct
2326
* &drm_modeset_lock, should not be held in worker threads or any other
2327
* asynchronous context used to commit the hardware state.
2328
*
2329
* drm_atomic_helper_commit() implements the recommended sequence for
2330
* nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2331
*
2332
* 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2333
* need to propagate out of memory/VRAM errors to userspace, it must be called
2334
* synchronously.
2335
*
2336
* 2. Synchronize with any outstanding nonblocking commit worker threads which
2337
* might be affected by the new state update. This is handled by
2338
* drm_atomic_helper_setup_commit().
2339
*
2340
* Asynchronous workers need to have sufficient parallelism to be able to run
2341
* different atomic commits on different CRTCs in parallel. The simplest way to
2342
* achieve this is by running them on the &system_unbound_wq work queue. Note
2343
* that drivers are not required to split up atomic commits and run an
2344
* individual commit in parallel - userspace is supposed to do that if it cares.
2345
* But it might be beneficial to do that for modesets, since those necessarily
2346
* must be done as one global operation, and enabling or disabling a CRTC can
2347
* take a long time. But even that is not required.
2348
*
2349
* IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2350
* against all CRTCs therein. Therefore for atomic state updates which only flip
2351
* planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2352
* in its atomic check code: This would prevent committing of atomic updates to
2353
* multiple CRTCs in parallel. In general, adding additional state structures
2354
* should be avoided as much as possible, because this reduces parallelism in
2355
* (nonblocking) commits, both due to locking and due to commit sequencing
2356
* requirements.
2357
*
2358
* 3. The software state is updated synchronously with
2359
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2360
* locks means concurrent callers never see inconsistent state. Note that commit
2361
* workers do not hold any locks; their access is only coordinated through
2362
* ordering. If workers would access state only through the pointers in the
2363
* free-standing state objects (currently not the case for any driver) then even
2364
* multiple pending commits could be in-flight at the same time.
2365
*
2366
* 4. Schedule a work item to do all subsequent steps, using the split-out
2367
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2368
* then cleaning up the framebuffers after the old framebuffer is no longer
2369
* being displayed. The scheduled work should synchronize against other workers
2370
* using the &drm_crtc_commit infrastructure as needed. See
2371
* drm_atomic_helper_setup_commit() for more details.
2372
*/
2373
2374
static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2375
{
2376
struct drm_crtc_commit *commit, *stall_commit = NULL;
2377
bool completed = true;
2378
int i;
2379
long ret = 0;
2380
2381
spin_lock(&crtc->commit_lock);
2382
i = 0;
2383
list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2384
if (i == 0) {
2385
completed = try_wait_for_completion(&commit->flip_done);
2386
/*
2387
* Userspace is not allowed to get ahead of the previous
2388
* commit with nonblocking ones.
2389
*/
2390
if (!completed && nonblock) {
2391
spin_unlock(&crtc->commit_lock);
2392
drm_dbg_atomic(crtc->dev,
2393
"[CRTC:%d:%s] busy with a previous commit\n",
2394
crtc->base.id, crtc->name);
2395
2396
return -EBUSY;
2397
}
2398
} else if (i == 1) {
2399
stall_commit = drm_crtc_commit_get(commit);
2400
break;
2401
}
2402
2403
i++;
2404
}
2405
spin_unlock(&crtc->commit_lock);
2406
2407
if (!stall_commit)
2408
return 0;
2409
2410
/* We don't want to let commits get ahead of cleanup work too much,
2411
* stalling on 2nd previous commit means triple-buffer won't ever stall.
2412
*/
2413
ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2414
10*HZ);
2415
if (ret == 0)
2416
drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2417
crtc->base.id, crtc->name);
2418
2419
drm_crtc_commit_put(stall_commit);
2420
2421
return ret < 0 ? ret : 0;
2422
}
2423
2424
static void release_crtc_commit(struct completion *completion)
2425
{
2426
struct drm_crtc_commit *commit = container_of(completion,
2427
typeof(*commit),
2428
flip_done);
2429
2430
drm_crtc_commit_put(commit);
2431
}
2432
2433
static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2434
{
2435
init_completion(&commit->flip_done);
2436
init_completion(&commit->hw_done);
2437
init_completion(&commit->cleanup_done);
2438
INIT_LIST_HEAD(&commit->commit_entry);
2439
kref_init(&commit->ref);
2440
commit->crtc = crtc;
2441
}
2442
2443
static struct drm_crtc_commit *
2444
crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2445
{
2446
if (crtc) {
2447
struct drm_crtc_state *new_crtc_state;
2448
2449
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2450
2451
return new_crtc_state->commit;
2452
}
2453
2454
if (!state->fake_commit) {
2455
state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2456
if (!state->fake_commit)
2457
return NULL;
2458
2459
init_commit(state->fake_commit, NULL);
2460
}
2461
2462
return state->fake_commit;
2463
}
2464
2465
/**
2466
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2467
* @state: new modeset state to be committed
2468
* @nonblock: whether nonblocking behavior is requested.
2469
*
2470
* This function prepares @state to be used by the atomic helper's support for
2471
* nonblocking commits. Drivers using the nonblocking commit infrastructure
2472
* should always call this function from their
2473
* &drm_mode_config_funcs.atomic_commit hook.
2474
*
2475
* Drivers that need to extend the commit setup to private objects can use the
2476
* &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2477
*
2478
* To be able to use this support drivers need to use a few more helper
2479
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
2480
* actually committing the hardware state, and for nonblocking commits this call
2481
* must be placed in the async worker. See also drm_atomic_helper_swap_state()
2482
* and its stall parameter, for when a driver's commit hooks look at the
2483
* &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2484
*
2485
* Completion of the hardware commit step must be signalled using
2486
* drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2487
* to read or change any permanent software or hardware modeset state. The only
2488
* exception is state protected by other means than &drm_modeset_lock locks.
2489
* Only the free standing @state with pointers to the old state structures can
2490
* be inspected, e.g. to clean up old buffers using
2491
* drm_atomic_helper_cleanup_planes().
2492
*
2493
* At the very end, before cleaning up @state drivers must call
2494
* drm_atomic_helper_commit_cleanup_done().
2495
*
2496
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2497
* complete and easy-to-use default implementation of the atomic_commit() hook.
2498
*
2499
* The tracking of asynchronously executed and still pending commits is done
2500
* using the core structure &drm_crtc_commit.
2501
*
2502
* By default there's no need to clean up resources allocated by this function
2503
* explicitly: drm_atomic_state_default_clear() will take care of that
2504
* automatically.
2505
*
2506
* Returns:
2507
* 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2508
* -ENOMEM on allocation failures and -EINTR when a signal is pending.
2509
*/
2510
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2511
bool nonblock)
2512
{
2513
struct drm_crtc *crtc;
2514
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2515
struct drm_connector *conn;
2516
struct drm_connector_state *old_conn_state, *new_conn_state;
2517
struct drm_plane *plane;
2518
struct drm_plane_state *old_plane_state, *new_plane_state;
2519
struct drm_crtc_commit *commit;
2520
const struct drm_mode_config_helper_funcs *funcs;
2521
int i, ret;
2522
2523
funcs = state->dev->mode_config.helper_private;
2524
2525
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2526
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2527
if (!commit)
2528
return -ENOMEM;
2529
2530
init_commit(commit, crtc);
2531
2532
new_crtc_state->commit = commit;
2533
2534
ret = stall_checks(crtc, nonblock);
2535
if (ret)
2536
return ret;
2537
2538
/*
2539
* Drivers only send out events when at least either current or
2540
* new CRTC state is active. Complete right away if everything
2541
* stays off.
2542
*/
2543
if (!old_crtc_state->active && !new_crtc_state->active) {
2544
complete_all(&commit->flip_done);
2545
continue;
2546
}
2547
2548
/* Legacy cursor updates are fully unsynced. */
2549
if (state->legacy_cursor_update) {
2550
complete_all(&commit->flip_done);
2551
continue;
2552
}
2553
2554
if (!new_crtc_state->event) {
2555
commit->event = kzalloc(sizeof(*commit->event),
2556
GFP_KERNEL);
2557
if (!commit->event)
2558
return -ENOMEM;
2559
2560
new_crtc_state->event = commit->event;
2561
}
2562
2563
new_crtc_state->event->base.completion = &commit->flip_done;
2564
new_crtc_state->event->base.completion_release = release_crtc_commit;
2565
drm_crtc_commit_get(commit);
2566
2567
commit->abort_completion = true;
2568
2569
state->crtcs[i].commit = commit;
2570
drm_crtc_commit_get(commit);
2571
}
2572
2573
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2574
/*
2575
* Userspace is not allowed to get ahead of the previous
2576
* commit with nonblocking ones.
2577
*/
2578
if (nonblock && old_conn_state->commit &&
2579
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2580
drm_dbg_atomic(conn->dev,
2581
"[CONNECTOR:%d:%s] busy with a previous commit\n",
2582
conn->base.id, conn->name);
2583
2584
return -EBUSY;
2585
}
2586
2587
/* Always track connectors explicitly for e.g. link retraining. */
2588
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2589
if (!commit)
2590
return -ENOMEM;
2591
2592
new_conn_state->commit = drm_crtc_commit_get(commit);
2593
}
2594
2595
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2596
/*
2597
* Userspace is not allowed to get ahead of the previous
2598
* commit with nonblocking ones.
2599
*/
2600
if (nonblock && old_plane_state->commit &&
2601
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2602
drm_dbg_atomic(plane->dev,
2603
"[PLANE:%d:%s] busy with a previous commit\n",
2604
plane->base.id, plane->name);
2605
2606
return -EBUSY;
2607
}
2608
2609
/* Always track planes explicitly for async pageflip support. */
2610
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2611
if (!commit)
2612
return -ENOMEM;
2613
2614
new_plane_state->commit = drm_crtc_commit_get(commit);
2615
}
2616
2617
if (funcs && funcs->atomic_commit_setup)
2618
return funcs->atomic_commit_setup(state);
2619
2620
return 0;
2621
}
2622
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2623
2624
/**
2625
* drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
2626
* @state: atomic state object being committed
2627
*
2628
* This function waits for all preceding commits that touch the same CRTC as
2629
* @state to both be committed to the hardware (as signalled by
2630
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2631
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2632
*
2633
* This is part of the atomic helper support for nonblocking commits, see
2634
* drm_atomic_helper_setup_commit() for an overview.
2635
*/
2636
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
2637
{
2638
struct drm_crtc *crtc;
2639
struct drm_crtc_state *old_crtc_state;
2640
struct drm_plane *plane;
2641
struct drm_plane_state *old_plane_state;
2642
struct drm_connector *conn;
2643
struct drm_connector_state *old_conn_state;
2644
int i;
2645
long ret;
2646
2647
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2648
ret = drm_crtc_commit_wait(old_crtc_state->commit);
2649
if (ret)
2650
drm_err(crtc->dev,
2651
"[CRTC:%d:%s] commit wait timed out\n",
2652
crtc->base.id, crtc->name);
2653
}
2654
2655
for_each_old_connector_in_state(state, conn, old_conn_state, i) {
2656
ret = drm_crtc_commit_wait(old_conn_state->commit);
2657
if (ret)
2658
drm_err(conn->dev,
2659
"[CONNECTOR:%d:%s] commit wait timed out\n",
2660
conn->base.id, conn->name);
2661
}
2662
2663
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2664
ret = drm_crtc_commit_wait(old_plane_state->commit);
2665
if (ret)
2666
drm_err(plane->dev,
2667
"[PLANE:%d:%s] commit wait timed out\n",
2668
plane->base.id, plane->name);
2669
}
2670
}
2671
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2672
2673
/**
2674
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2675
* @state: atomic state object being committed
2676
*
2677
* This function walks all CRTCs and fakes VBLANK events on those with
2678
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2679
* The primary use of this function is writeback connectors working in oneshot
2680
* mode and faking VBLANK events. In this case they only fake the VBLANK event
2681
* when a job is queued, and any change to the pipeline that does not touch the
2682
* connector is leading to timeouts when calling
2683
* drm_atomic_helper_wait_for_vblanks() or
2684
* drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2685
* connectors, this function can also fake VBLANK events for CRTCs without
2686
* VBLANK interrupt.
2687
*
2688
* This is part of the atomic helper support for nonblocking commits, see
2689
* drm_atomic_helper_setup_commit() for an overview.
2690
*/
2691
void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
2692
{
2693
struct drm_crtc_state *new_crtc_state;
2694
struct drm_crtc *crtc;
2695
int i;
2696
2697
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2698
unsigned long flags;
2699
2700
if (!new_crtc_state->no_vblank)
2701
continue;
2702
2703
spin_lock_irqsave(&state->dev->event_lock, flags);
2704
if (new_crtc_state->event) {
2705
drm_crtc_send_vblank_event(crtc,
2706
new_crtc_state->event);
2707
new_crtc_state->event = NULL;
2708
}
2709
spin_unlock_irqrestore(&state->dev->event_lock, flags);
2710
}
2711
}
2712
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2713
2714
/**
2715
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2716
* @state: atomic state object being committed
2717
*
2718
* This function is used to signal completion of the hardware commit step. After
2719
* this step the driver is not allowed to read or change any permanent software
2720
* or hardware modeset state. The only exception is state protected by other
2721
* means than &drm_modeset_lock locks.
2722
*
2723
* Drivers should try to postpone any expensive or delayed cleanup work after
2724
* this function is called.
2725
*
2726
* This is part of the atomic helper support for nonblocking commits, see
2727
* drm_atomic_helper_setup_commit() for an overview.
2728
*/
2729
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
2730
{
2731
struct drm_crtc *crtc;
2732
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2733
struct drm_crtc_commit *commit;
2734
int i;
2735
2736
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2737
commit = new_crtc_state->commit;
2738
if (!commit)
2739
continue;
2740
2741
/*
2742
* copy new_crtc_state->commit to old_crtc_state->commit,
2743
* it's unsafe to touch new_crtc_state after hw_done,
2744
* but we still need to do so in cleanup_done().
2745
*/
2746
if (old_crtc_state->commit)
2747
drm_crtc_commit_put(old_crtc_state->commit);
2748
2749
old_crtc_state->commit = drm_crtc_commit_get(commit);
2750
2751
/* backend must have consumed any event by now */
2752
WARN_ON(new_crtc_state->event);
2753
complete_all(&commit->hw_done);
2754
}
2755
2756
if (state->fake_commit) {
2757
complete_all(&state->fake_commit->hw_done);
2758
complete_all(&state->fake_commit->flip_done);
2759
}
2760
}
2761
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2762
2763
/**
2764
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
2765
* @state: atomic state object being committed
2766
*
2767
* This signals completion of the atomic update @state, including any
2768
* cleanup work. If used, it must be called right before calling
2769
* drm_atomic_state_put().
2770
*
2771
* This is part of the atomic helper support for nonblocking commits, see
2772
* drm_atomic_helper_setup_commit() for an overview.
2773
*/
2774
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
2775
{
2776
struct drm_crtc *crtc;
2777
struct drm_crtc_state *old_crtc_state;
2778
struct drm_crtc_commit *commit;
2779
int i;
2780
2781
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2782
commit = old_crtc_state->commit;
2783
if (WARN_ON(!commit))
2784
continue;
2785
2786
complete_all(&commit->cleanup_done);
2787
WARN_ON(!try_wait_for_completion(&commit->hw_done));
2788
2789
spin_lock(&crtc->commit_lock);
2790
list_del(&commit->commit_entry);
2791
spin_unlock(&crtc->commit_lock);
2792
}
2793
2794
if (state->fake_commit) {
2795
complete_all(&state->fake_commit->cleanup_done);
2796
WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
2797
}
2798
}
2799
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2800
2801
/**
2802
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
2803
* @dev: DRM device
2804
* @state: atomic state object with new state structures
2805
*
2806
* This function prepares plane state, specifically framebuffers, for the new
2807
* configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2808
* is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2809
* any already successfully prepared framebuffer.
2810
*
2811
* Returns:
2812
* 0 on success, negative error code on failure.
2813
*/
2814
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2815
struct drm_atomic_state *state)
2816
{
2817
struct drm_connector *connector;
2818
struct drm_connector_state *new_conn_state;
2819
struct drm_plane *plane;
2820
struct drm_plane_state *new_plane_state;
2821
int ret, i, j;
2822
2823
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2824
if (!new_conn_state->writeback_job)
2825
continue;
2826
2827
ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2828
if (ret < 0)
2829
return ret;
2830
}
2831
2832
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2833
const struct drm_plane_helper_funcs *funcs;
2834
2835
funcs = plane->helper_private;
2836
2837
if (funcs->prepare_fb) {
2838
ret = funcs->prepare_fb(plane, new_plane_state);
2839
if (ret)
2840
goto fail_prepare_fb;
2841
} else {
2842
WARN_ON_ONCE(funcs->cleanup_fb);
2843
2844
if (!drm_core_check_feature(dev, DRIVER_GEM))
2845
continue;
2846
2847
ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2848
if (ret)
2849
goto fail_prepare_fb;
2850
}
2851
}
2852
2853
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2854
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2855
2856
if (funcs->begin_fb_access) {
2857
ret = funcs->begin_fb_access(plane, new_plane_state);
2858
if (ret)
2859
goto fail_begin_fb_access;
2860
}
2861
}
2862
2863
return 0;
2864
2865
fail_begin_fb_access:
2866
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2867
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2868
2869
if (j >= i)
2870
continue;
2871
2872
if (funcs->end_fb_access)
2873
funcs->end_fb_access(plane, new_plane_state);
2874
}
2875
i = j; /* set i to upper limit to cleanup all planes */
2876
fail_prepare_fb:
2877
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2878
const struct drm_plane_helper_funcs *funcs;
2879
2880
if (j >= i)
2881
continue;
2882
2883
funcs = plane->helper_private;
2884
2885
if (funcs->cleanup_fb)
2886
funcs->cleanup_fb(plane, new_plane_state);
2887
}
2888
2889
return ret;
2890
}
2891
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2892
2893
/**
2894
* drm_atomic_helper_unprepare_planes - release plane resources on aborts
2895
* @dev: DRM device
2896
* @state: atomic state object with old state structures
2897
*
2898
* This function cleans up plane state, specifically framebuffers, from the
2899
* atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
2900
* when aborting an atomic commit. For cleaning up after a successful commit
2901
* use drm_atomic_helper_cleanup_planes().
2902
*/
2903
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
2904
struct drm_atomic_state *state)
2905
{
2906
struct drm_plane *plane;
2907
struct drm_plane_state *new_plane_state;
2908
int i;
2909
2910
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2911
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2912
2913
if (funcs->end_fb_access)
2914
funcs->end_fb_access(plane, new_plane_state);
2915
}
2916
2917
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2918
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2919
2920
if (funcs->cleanup_fb)
2921
funcs->cleanup_fb(plane, new_plane_state);
2922
}
2923
}
2924
EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
2925
2926
static bool plane_crtc_active(const struct drm_plane_state *state)
2927
{
2928
return state->crtc && state->crtc->state->active;
2929
}
2930
2931
/**
2932
* drm_atomic_helper_commit_planes - commit plane state
2933
* @dev: DRM device
2934
* @state: atomic state object being committed
2935
* @flags: flags for committing plane state
2936
*
2937
* This function commits the new plane state using the plane and atomic helper
2938
* functions for planes and CRTCs. It assumes that the atomic state has already
2939
* been pushed into the relevant object state pointers, since this step can no
2940
* longer fail.
2941
*
2942
* It still requires the global state object @state to know which planes and
2943
* crtcs need to be updated though.
2944
*
2945
* Note that this function does all plane updates across all CRTCs in one step.
2946
* If the hardware can't support this approach look at
2947
* drm_atomic_helper_commit_planes_on_crtc() instead.
2948
*
2949
* Plane parameters can be updated by applications while the associated CRTC is
2950
* disabled. The DRM/KMS core will store the parameters in the plane state,
2951
* which will be available to the driver when the CRTC is turned on. As a result
2952
* most drivers don't need to be immediately notified of plane updates for a
2953
* disabled CRTC.
2954
*
2955
* Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2956
* @flags in order not to receive plane update notifications related to a
2957
* disabled CRTC. This avoids the need to manually ignore plane updates in
2958
* driver code when the driver and/or hardware can't or just don't need to deal
2959
* with updates on disabled CRTCs, for example when supporting runtime PM.
2960
*
2961
* Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2962
* display controllers require to disable a CRTC's planes when the CRTC is
2963
* disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2964
* call for a plane if the CRTC of the old plane state needs a modesetting
2965
* operation. Of course, the drivers need to disable the planes in their CRTC
2966
* disable callbacks since no one else would do that.
2967
*
2968
* The drm_atomic_helper_commit() default implementation doesn't set the
2969
* ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2970
* This should not be copied blindly by drivers.
2971
*/
2972
void drm_atomic_helper_commit_planes(struct drm_device *dev,
2973
struct drm_atomic_state *state,
2974
uint32_t flags)
2975
{
2976
struct drm_crtc *crtc;
2977
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2978
struct drm_plane *plane;
2979
struct drm_plane_state *old_plane_state, *new_plane_state;
2980
int i;
2981
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2982
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2983
2984
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2985
const struct drm_crtc_helper_funcs *funcs;
2986
2987
funcs = crtc->helper_private;
2988
2989
if (!funcs || !funcs->atomic_begin)
2990
continue;
2991
2992
if (active_only && !new_crtc_state->active)
2993
continue;
2994
2995
funcs->atomic_begin(crtc, state);
2996
}
2997
2998
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2999
const struct drm_plane_helper_funcs *funcs;
3000
bool disabling;
3001
3002
funcs = plane->helper_private;
3003
3004
if (!funcs)
3005
continue;
3006
3007
disabling = drm_atomic_plane_disabling(old_plane_state,
3008
new_plane_state);
3009
3010
if (active_only) {
3011
/*
3012
* Skip planes related to inactive CRTCs. If the plane
3013
* is enabled use the state of the current CRTC. If the
3014
* plane is being disabled use the state of the old
3015
* CRTC to avoid skipping planes being disabled on an
3016
* active CRTC.
3017
*/
3018
if (!disabling && !plane_crtc_active(new_plane_state))
3019
continue;
3020
if (disabling && !plane_crtc_active(old_plane_state))
3021
continue;
3022
}
3023
3024
/*
3025
* Special-case disabling the plane if drivers support it.
3026
*/
3027
if (disabling && funcs->atomic_disable) {
3028
struct drm_crtc_state *crtc_state;
3029
3030
crtc_state = old_plane_state->crtc->state;
3031
3032
if (drm_atomic_crtc_needs_modeset(crtc_state) &&
3033
no_disable)
3034
continue;
3035
3036
funcs->atomic_disable(plane, state);
3037
} else if (new_plane_state->crtc || disabling) {
3038
funcs->atomic_update(plane, state);
3039
3040
if (!disabling && funcs->atomic_enable) {
3041
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3042
funcs->atomic_enable(plane, state);
3043
}
3044
}
3045
}
3046
3047
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3048
const struct drm_crtc_helper_funcs *funcs;
3049
3050
funcs = crtc->helper_private;
3051
3052
if (!funcs || !funcs->atomic_flush)
3053
continue;
3054
3055
if (active_only && !new_crtc_state->active)
3056
continue;
3057
3058
funcs->atomic_flush(crtc, state);
3059
}
3060
3061
/*
3062
* Signal end of framebuffer access here before hw_done. After hw_done,
3063
* a later commit might have already released the plane state.
3064
*/
3065
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3066
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3067
3068
if (funcs->end_fb_access)
3069
funcs->end_fb_access(plane, old_plane_state);
3070
}
3071
}
3072
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
3073
3074
/**
3075
* drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
3076
* @old_crtc_state: atomic state object with the old CRTC state
3077
*
3078
* This function commits the new plane state using the plane and atomic helper
3079
* functions for planes on the specific CRTC. It assumes that the atomic state
3080
* has already been pushed into the relevant object state pointers, since this
3081
* step can no longer fail.
3082
*
3083
* This function is useful when plane updates should be done CRTC-by-CRTC
3084
* instead of one global step like drm_atomic_helper_commit_planes() does.
3085
*
3086
* This function can only be savely used when planes are not allowed to move
3087
* between different CRTCs because this function doesn't handle inter-CRTC
3088
* dependencies. Callers need to ensure that either no such dependencies exist,
3089
* resolve them through ordering of commit calls or through some other means.
3090
*/
3091
void
3092
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
3093
{
3094
const struct drm_crtc_helper_funcs *crtc_funcs;
3095
struct drm_crtc *crtc = old_crtc_state->crtc;
3096
struct drm_atomic_state *old_state = old_crtc_state->state;
3097
struct drm_crtc_state *new_crtc_state =
3098
drm_atomic_get_new_crtc_state(old_state, crtc);
3099
struct drm_plane *plane;
3100
unsigned int plane_mask;
3101
3102
plane_mask = old_crtc_state->plane_mask;
3103
plane_mask |= new_crtc_state->plane_mask;
3104
3105
crtc_funcs = crtc->helper_private;
3106
if (crtc_funcs && crtc_funcs->atomic_begin)
3107
crtc_funcs->atomic_begin(crtc, old_state);
3108
3109
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
3110
struct drm_plane_state *old_plane_state =
3111
drm_atomic_get_old_plane_state(old_state, plane);
3112
struct drm_plane_state *new_plane_state =
3113
drm_atomic_get_new_plane_state(old_state, plane);
3114
const struct drm_plane_helper_funcs *plane_funcs;
3115
bool disabling;
3116
3117
plane_funcs = plane->helper_private;
3118
3119
if (!old_plane_state || !plane_funcs)
3120
continue;
3121
3122
WARN_ON(new_plane_state->crtc &&
3123
new_plane_state->crtc != crtc);
3124
3125
disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
3126
3127
if (disabling && plane_funcs->atomic_disable) {
3128
plane_funcs->atomic_disable(plane, old_state);
3129
} else if (new_plane_state->crtc || disabling) {
3130
plane_funcs->atomic_update(plane, old_state);
3131
3132
if (!disabling && plane_funcs->atomic_enable) {
3133
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3134
plane_funcs->atomic_enable(plane, old_state);
3135
}
3136
}
3137
}
3138
3139
if (crtc_funcs && crtc_funcs->atomic_flush)
3140
crtc_funcs->atomic_flush(crtc, old_state);
3141
}
3142
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
3143
3144
/**
3145
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
3146
* @old_crtc_state: atomic state object with the old CRTC state
3147
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
3148
*
3149
* Disables all planes associated with the given CRTC. This can be
3150
* used for instance in the CRTC helper atomic_disable callback to disable
3151
* all planes.
3152
*
3153
* If the atomic-parameter is set the function calls the CRTC's
3154
* atomic_begin hook before and atomic_flush hook after disabling the
3155
* planes.
3156
*
3157
* It is a bug to call this function without having implemented the
3158
* &drm_plane_helper_funcs.atomic_disable plane hook.
3159
*/
3160
void
3161
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
3162
bool atomic)
3163
{
3164
struct drm_crtc *crtc = old_crtc_state->crtc;
3165
const struct drm_crtc_helper_funcs *crtc_funcs =
3166
crtc->helper_private;
3167
struct drm_plane *plane;
3168
3169
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
3170
crtc_funcs->atomic_begin(crtc, NULL);
3171
3172
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
3173
const struct drm_plane_helper_funcs *plane_funcs =
3174
plane->helper_private;
3175
3176
if (!plane_funcs)
3177
continue;
3178
3179
WARN_ON(!plane_funcs->atomic_disable);
3180
if (plane_funcs->atomic_disable)
3181
plane_funcs->atomic_disable(plane, NULL);
3182
}
3183
3184
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
3185
crtc_funcs->atomic_flush(crtc, NULL);
3186
}
3187
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
3188
3189
/**
3190
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
3191
* @dev: DRM device
3192
* @state: atomic state object being committed
3193
*
3194
* This function cleans up plane state, specifically framebuffers, from the old
3195
* configuration. Hence the old configuration must be perserved in @state to
3196
* be able to call this function.
3197
*
3198
* This function may not be called on the new state when the atomic update
3199
* fails at any point after calling drm_atomic_helper_prepare_planes(). Use
3200
* drm_atomic_helper_unprepare_planes() in this case.
3201
*/
3202
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
3203
struct drm_atomic_state *state)
3204
{
3205
struct drm_plane *plane;
3206
struct drm_plane_state *old_plane_state;
3207
int i;
3208
3209
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3210
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3211
3212
if (funcs->cleanup_fb)
3213
funcs->cleanup_fb(plane, old_plane_state);
3214
}
3215
}
3216
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
3217
3218
/**
3219
* drm_atomic_helper_swap_state - store atomic state into current sw state
3220
* @state: atomic state
3221
* @stall: stall for preceding commits
3222
*
3223
* This function stores the atomic state into the current state pointers in all
3224
* driver objects. It should be called after all failing steps have been done
3225
* and succeeded, but before the actual hardware state is committed.
3226
*
3227
* For cleanup and error recovery the current state for all changed objects will
3228
* be swapped into @state.
3229
*
3230
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
3231
*
3232
* 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
3233
*
3234
* 2. Do any other steps that might fail.
3235
*
3236
* 3. Put the staged state into the current state pointers with this function.
3237
*
3238
* 4. Actually commit the hardware state.
3239
*
3240
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
3241
* contains the old state. Also do any other cleanup required with that state.
3242
*
3243
* @stall must be set when nonblocking commits for this driver directly access
3244
* the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
3245
* the current atomic helpers this is almost always the case, since the helpers
3246
* don't pass the right state structures to the callbacks.
3247
*
3248
* Returns:
3249
* Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
3250
* waiting for the previous commits has been interrupted.
3251
*/
3252
int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
3253
bool stall)
3254
{
3255
int i, ret;
3256
unsigned long flags = 0;
3257
struct drm_connector *connector;
3258
struct drm_connector_state *old_conn_state, *new_conn_state;
3259
struct drm_crtc *crtc;
3260
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3261
struct drm_plane *plane;
3262
struct drm_plane_state *old_plane_state, *new_plane_state;
3263
struct drm_colorop *colorop;
3264
struct drm_colorop_state *old_colorop_state, *new_colorop_state;
3265
struct drm_crtc_commit *commit;
3266
struct drm_private_obj *obj;
3267
struct drm_private_state *old_obj_state, *new_obj_state;
3268
3269
if (stall) {
3270
/*
3271
* We have to stall for hw_done here before
3272
* drm_atomic_helper_wait_for_dependencies() because flip
3273
* depth > 1 is not yet supported by all drivers. As long as
3274
* obj->state is directly dereferenced anywhere in the drivers
3275
* atomic_commit_tail function, then it's unsafe to swap state
3276
* before drm_atomic_helper_commit_hw_done() is called.
3277
*/
3278
3279
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
3280
commit = old_crtc_state->commit;
3281
3282
if (!commit)
3283
continue;
3284
3285
ret = wait_for_completion_interruptible(&commit->hw_done);
3286
if (ret)
3287
return ret;
3288
}
3289
3290
for_each_old_connector_in_state(state, connector, old_conn_state, i) {
3291
commit = old_conn_state->commit;
3292
3293
if (!commit)
3294
continue;
3295
3296
ret = wait_for_completion_interruptible(&commit->hw_done);
3297
if (ret)
3298
return ret;
3299
}
3300
3301
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3302
commit = old_plane_state->commit;
3303
3304
if (!commit)
3305
continue;
3306
3307
ret = wait_for_completion_interruptible(&commit->hw_done);
3308
if (ret)
3309
return ret;
3310
}
3311
}
3312
3313
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
3314
WARN_ON(connector->state != old_conn_state);
3315
3316
old_conn_state->state = state;
3317
new_conn_state->state = NULL;
3318
3319
state->connectors[i].state_to_destroy = old_conn_state;
3320
connector->state = new_conn_state;
3321
}
3322
3323
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3324
WARN_ON(crtc->state != old_crtc_state);
3325
3326
old_crtc_state->state = state;
3327
new_crtc_state->state = NULL;
3328
3329
state->crtcs[i].state_to_destroy = old_crtc_state;
3330
crtc->state = new_crtc_state;
3331
3332
if (new_crtc_state->commit) {
3333
spin_lock(&crtc->commit_lock);
3334
list_add(&new_crtc_state->commit->commit_entry,
3335
&crtc->commit_list);
3336
spin_unlock(&crtc->commit_lock);
3337
3338
new_crtc_state->commit->event = NULL;
3339
}
3340
}
3341
3342
for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
3343
WARN_ON(colorop->state != old_colorop_state);
3344
3345
old_colorop_state->state = state;
3346
new_colorop_state->state = NULL;
3347
3348
state->colorops[i].state = old_colorop_state;
3349
colorop->state = new_colorop_state;
3350
}
3351
3352
drm_panic_lock(state->dev, flags);
3353
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3354
WARN_ON(plane->state != old_plane_state);
3355
3356
old_plane_state->state = state;
3357
new_plane_state->state = NULL;
3358
3359
state->planes[i].state_to_destroy = old_plane_state;
3360
plane->state = new_plane_state;
3361
}
3362
drm_panic_unlock(state->dev, flags);
3363
3364
for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3365
WARN_ON(obj->state != old_obj_state);
3366
3367
old_obj_state->state = state;
3368
new_obj_state->state = NULL;
3369
3370
state->private_objs[i].state_to_destroy = old_obj_state;
3371
obj->state = new_obj_state;
3372
}
3373
3374
return 0;
3375
}
3376
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3377
3378
/**
3379
* drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3380
* @plane: plane object to update
3381
* @crtc: owning CRTC of owning plane
3382
* @fb: framebuffer to flip onto plane
3383
* @crtc_x: x offset of primary plane on @crtc
3384
* @crtc_y: y offset of primary plane on @crtc
3385
* @crtc_w: width of primary plane rectangle on @crtc
3386
* @crtc_h: height of primary plane rectangle on @crtc
3387
* @src_x: x offset of @fb for panning
3388
* @src_y: y offset of @fb for panning
3389
* @src_w: width of source rectangle in @fb
3390
* @src_h: height of source rectangle in @fb
3391
* @ctx: lock acquire context
3392
*
3393
* Provides a default plane update handler using the atomic driver interface.
3394
*
3395
* RETURNS:
3396
* Zero on success, error code on failure
3397
*/
3398
int drm_atomic_helper_update_plane(struct drm_plane *plane,
3399
struct drm_crtc *crtc,
3400
struct drm_framebuffer *fb,
3401
int crtc_x, int crtc_y,
3402
unsigned int crtc_w, unsigned int crtc_h,
3403
uint32_t src_x, uint32_t src_y,
3404
uint32_t src_w, uint32_t src_h,
3405
struct drm_modeset_acquire_ctx *ctx)
3406
{
3407
struct drm_atomic_state *state;
3408
struct drm_plane_state *plane_state;
3409
int ret = 0;
3410
3411
state = drm_atomic_state_alloc(plane->dev);
3412
if (!state)
3413
return -ENOMEM;
3414
3415
state->acquire_ctx = ctx;
3416
plane_state = drm_atomic_get_plane_state(state, plane);
3417
if (IS_ERR(plane_state)) {
3418
ret = PTR_ERR(plane_state);
3419
goto fail;
3420
}
3421
3422
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3423
if (ret != 0)
3424
goto fail;
3425
drm_atomic_set_fb_for_plane(plane_state, fb);
3426
plane_state->crtc_x = crtc_x;
3427
plane_state->crtc_y = crtc_y;
3428
plane_state->crtc_w = crtc_w;
3429
plane_state->crtc_h = crtc_h;
3430
plane_state->src_x = src_x;
3431
plane_state->src_y = src_y;
3432
plane_state->src_w = src_w;
3433
plane_state->src_h = src_h;
3434
3435
if (plane == crtc->cursor)
3436
state->legacy_cursor_update = true;
3437
3438
ret = drm_atomic_commit(state);
3439
fail:
3440
drm_atomic_state_put(state);
3441
return ret;
3442
}
3443
EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3444
3445
/**
3446
* drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
3447
* @plane: plane to disable
3448
* @ctx: lock acquire context
3449
*
3450
* Provides a default plane disable handler using the atomic driver interface.
3451
*
3452
* RETURNS:
3453
* Zero on success, error code on failure
3454
*/
3455
int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3456
struct drm_modeset_acquire_ctx *ctx)
3457
{
3458
struct drm_atomic_state *state;
3459
struct drm_plane_state *plane_state;
3460
int ret = 0;
3461
3462
state = drm_atomic_state_alloc(plane->dev);
3463
if (!state)
3464
return -ENOMEM;
3465
3466
state->acquire_ctx = ctx;
3467
plane_state = drm_atomic_get_plane_state(state, plane);
3468
if (IS_ERR(plane_state)) {
3469
ret = PTR_ERR(plane_state);
3470
goto fail;
3471
}
3472
3473
if (plane_state->crtc && plane_state->crtc->cursor == plane)
3474
plane_state->state->legacy_cursor_update = true;
3475
3476
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3477
if (ret != 0)
3478
goto fail;
3479
3480
ret = drm_atomic_commit(state);
3481
fail:
3482
drm_atomic_state_put(state);
3483
return ret;
3484
}
3485
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3486
3487
/**
3488
* drm_atomic_helper_set_config - set a new config from userspace
3489
* @set: mode set configuration
3490
* @ctx: lock acquisition context
3491
*
3492
* Provides a default CRTC set_config handler using the atomic driver interface.
3493
*
3494
* NOTE: For backwards compatibility with old userspace this automatically
3495
* resets the "link-status" property to GOOD, to force any link
3496
* re-training. The SETCRTC ioctl does not define whether an update does
3497
* need a full modeset or just a plane update, hence we're allowed to do
3498
* that. See also drm_connector_set_link_status_property().
3499
*
3500
* Returns:
3501
* Returns 0 on success, negative errno numbers on failure.
3502
*/
3503
int drm_atomic_helper_set_config(struct drm_mode_set *set,
3504
struct drm_modeset_acquire_ctx *ctx)
3505
{
3506
struct drm_atomic_state *state;
3507
struct drm_crtc *crtc = set->crtc;
3508
int ret = 0;
3509
3510
state = drm_atomic_state_alloc(crtc->dev);
3511
if (!state)
3512
return -ENOMEM;
3513
3514
state->acquire_ctx = ctx;
3515
ret = __drm_atomic_helper_set_config(set, state);
3516
if (ret != 0)
3517
goto fail;
3518
3519
ret = handle_conflicting_encoders(state, true);
3520
if (ret)
3521
goto fail;
3522
3523
ret = drm_atomic_commit(state);
3524
3525
fail:
3526
drm_atomic_state_put(state);
3527
return ret;
3528
}
3529
EXPORT_SYMBOL(drm_atomic_helper_set_config);
3530
3531
/**
3532
* drm_atomic_helper_disable_all - disable all currently active outputs
3533
* @dev: DRM device
3534
* @ctx: lock acquisition context
3535
*
3536
* Loops through all connectors, finding those that aren't turned off and then
3537
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3538
* that they are connected to.
3539
*
3540
* This is used for example in suspend/resume to disable all currently active
3541
* functions when suspending. If you just want to shut down everything at e.g.
3542
* driver unload, look at drm_atomic_helper_shutdown().
3543
*
3544
* Note that if callers haven't already acquired all modeset locks this might
3545
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3546
*
3547
* Returns:
3548
* 0 on success or a negative error code on failure.
3549
*
3550
* See also:
3551
* drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3552
* drm_atomic_helper_shutdown().
3553
*/
3554
int drm_atomic_helper_disable_all(struct drm_device *dev,
3555
struct drm_modeset_acquire_ctx *ctx)
3556
{
3557
struct drm_atomic_state *state;
3558
struct drm_connector_state *conn_state;
3559
struct drm_connector *conn;
3560
struct drm_plane_state *plane_state;
3561
struct drm_plane *plane;
3562
struct drm_crtc_state *crtc_state;
3563
struct drm_crtc *crtc;
3564
int ret, i;
3565
3566
state = drm_atomic_state_alloc(dev);
3567
if (!state)
3568
return -ENOMEM;
3569
3570
state->acquire_ctx = ctx;
3571
3572
drm_for_each_crtc(crtc, dev) {
3573
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3574
if (IS_ERR(crtc_state)) {
3575
ret = PTR_ERR(crtc_state);
3576
goto free;
3577
}
3578
3579
crtc_state->active = false;
3580
3581
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3582
if (ret < 0)
3583
goto free;
3584
3585
ret = drm_atomic_add_affected_planes(state, crtc);
3586
if (ret < 0)
3587
goto free;
3588
3589
ret = drm_atomic_add_affected_connectors(state, crtc);
3590
if (ret < 0)
3591
goto free;
3592
}
3593
3594
for_each_new_connector_in_state(state, conn, conn_state, i) {
3595
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3596
if (ret < 0)
3597
goto free;
3598
}
3599
3600
for_each_new_plane_in_state(state, plane, plane_state, i) {
3601
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3602
if (ret < 0)
3603
goto free;
3604
3605
drm_atomic_set_fb_for_plane(plane_state, NULL);
3606
}
3607
3608
ret = drm_atomic_commit(state);
3609
free:
3610
drm_atomic_state_put(state);
3611
return ret;
3612
}
3613
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3614
3615
/**
3616
* drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
3617
* @crtc: DRM CRTC
3618
* @ctx: lock acquisition context
3619
*
3620
* Reset the active outputs by indicating that connectors have changed.
3621
* This implies a reset of all active components available between the CRTC and
3622
* connectors.
3623
*
3624
* A variant of this function exists with
3625
* drm_bridge_helper_reset_crtc(), dedicated to bridges.
3626
*
3627
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
3628
* For drivers which optimize out unnecessary modesets this will result in
3629
* a no-op commit, achieving nothing.
3630
*
3631
* Returns:
3632
* 0 on success or a negative error code on failure.
3633
*/
3634
int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
3635
struct drm_modeset_acquire_ctx *ctx)
3636
{
3637
struct drm_atomic_state *state;
3638
struct drm_crtc_state *crtc_state;
3639
int ret;
3640
3641
state = drm_atomic_state_alloc(crtc->dev);
3642
if (!state)
3643
return -ENOMEM;
3644
3645
state->acquire_ctx = ctx;
3646
3647
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3648
if (IS_ERR(crtc_state)) {
3649
ret = PTR_ERR(crtc_state);
3650
goto out;
3651
}
3652
3653
crtc_state->connectors_changed = true;
3654
3655
ret = drm_atomic_commit(state);
3656
out:
3657
drm_atomic_state_put(state);
3658
3659
return ret;
3660
}
3661
EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
3662
3663
/**
3664
* drm_atomic_helper_shutdown - shutdown all CRTC
3665
* @dev: DRM device
3666
*
3667
* This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3668
* suspend should instead be handled with drm_atomic_helper_suspend(), since
3669
* that also takes a snapshot of the modeset state to be restored on resume.
3670
*
3671
* This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3672
* and it is the atomic version of drm_helper_force_disable_all().
3673
*/
3674
void drm_atomic_helper_shutdown(struct drm_device *dev)
3675
{
3676
struct drm_modeset_acquire_ctx ctx;
3677
int ret;
3678
3679
if (dev == NULL)
3680
return;
3681
3682
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3683
3684
ret = drm_atomic_helper_disable_all(dev, &ctx);
3685
if (ret)
3686
drm_err(dev,
3687
"Disabling all crtc's during unload failed with %i\n",
3688
ret);
3689
3690
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3691
}
3692
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3693
3694
/**
3695
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
3696
* @dev: DRM device
3697
* @ctx: lock acquisition context
3698
*
3699
* Makes a copy of the current atomic state by looping over all objects and
3700
* duplicating their respective states. This is used for example by suspend/
3701
* resume support code to save the state prior to suspend such that it can
3702
* be restored upon resume.
3703
*
3704
* Note that this treats atomic state as persistent between save and restore.
3705
* Drivers must make sure that this is possible and won't result in confusion
3706
* or erroneous behaviour.
3707
*
3708
* Note that if callers haven't already acquired all modeset locks this might
3709
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3710
*
3711
* Returns:
3712
* A pointer to the copy of the atomic state object on success or an
3713
* ERR_PTR()-encoded error code on failure.
3714
*
3715
* See also:
3716
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3717
*/
3718
struct drm_atomic_state *
3719
drm_atomic_helper_duplicate_state(struct drm_device *dev,
3720
struct drm_modeset_acquire_ctx *ctx)
3721
{
3722
struct drm_atomic_state *state;
3723
struct drm_connector *conn;
3724
struct drm_connector_list_iter conn_iter;
3725
struct drm_plane *plane;
3726
struct drm_crtc *crtc;
3727
int err = 0;
3728
3729
state = drm_atomic_state_alloc(dev);
3730
if (!state)
3731
return ERR_PTR(-ENOMEM);
3732
3733
state->acquire_ctx = ctx;
3734
state->duplicated = true;
3735
3736
drm_for_each_crtc(crtc, dev) {
3737
struct drm_crtc_state *crtc_state;
3738
3739
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3740
if (IS_ERR(crtc_state)) {
3741
err = PTR_ERR(crtc_state);
3742
goto free;
3743
}
3744
}
3745
3746
drm_for_each_plane(plane, dev) {
3747
struct drm_plane_state *plane_state;
3748
3749
plane_state = drm_atomic_get_plane_state(state, plane);
3750
if (IS_ERR(plane_state)) {
3751
err = PTR_ERR(plane_state);
3752
goto free;
3753
}
3754
}
3755
3756
drm_connector_list_iter_begin(dev, &conn_iter);
3757
drm_for_each_connector_iter(conn, &conn_iter) {
3758
struct drm_connector_state *conn_state;
3759
3760
conn_state = drm_atomic_get_connector_state(state, conn);
3761
if (IS_ERR(conn_state)) {
3762
err = PTR_ERR(conn_state);
3763
drm_connector_list_iter_end(&conn_iter);
3764
goto free;
3765
}
3766
}
3767
drm_connector_list_iter_end(&conn_iter);
3768
3769
/* clear the acquire context so that it isn't accidentally reused */
3770
state->acquire_ctx = NULL;
3771
3772
free:
3773
if (err < 0) {
3774
drm_atomic_state_put(state);
3775
state = ERR_PTR(err);
3776
}
3777
3778
return state;
3779
}
3780
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3781
3782
/**
3783
* drm_atomic_helper_suspend - subsystem-level suspend helper
3784
* @dev: DRM device
3785
*
3786
* Duplicates the current atomic state, disables all active outputs and then
3787
* returns a pointer to the original atomic state to the caller. Drivers can
3788
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3789
* restore the output configuration that was active at the time the system
3790
* entered suspend.
3791
*
3792
* Note that it is potentially unsafe to use this. The atomic state object
3793
* returned by this function is assumed to be persistent. Drivers must ensure
3794
* that this holds true. Before calling this function, drivers must make sure
3795
* to suspend fbdev emulation so that nothing can be using the device.
3796
*
3797
* Returns:
3798
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3799
* encoded error code on failure. Drivers should store the returned atomic
3800
* state object and pass it to the drm_atomic_helper_resume() helper upon
3801
* resume.
3802
*
3803
* See also:
3804
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3805
* drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3806
*/
3807
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3808
{
3809
struct drm_modeset_acquire_ctx ctx;
3810
struct drm_atomic_state *state;
3811
int err;
3812
3813
/* This can never be returned, but it makes the compiler happy */
3814
state = ERR_PTR(-EINVAL);
3815
3816
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3817
3818
state = drm_atomic_helper_duplicate_state(dev, &ctx);
3819
if (IS_ERR(state))
3820
goto unlock;
3821
3822
err = drm_atomic_helper_disable_all(dev, &ctx);
3823
if (err < 0) {
3824
drm_atomic_state_put(state);
3825
state = ERR_PTR(err);
3826
goto unlock;
3827
}
3828
3829
unlock:
3830
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3831
if (err)
3832
return ERR_PTR(err);
3833
3834
return state;
3835
}
3836
EXPORT_SYMBOL(drm_atomic_helper_suspend);
3837
3838
/**
3839
* drm_atomic_helper_commit_duplicated_state - commit duplicated state
3840
* @state: duplicated atomic state to commit
3841
* @ctx: pointer to acquire_ctx to use for commit.
3842
*
3843
* The state returned by drm_atomic_helper_duplicate_state() and
3844
* drm_atomic_helper_suspend() is partially invalid, and needs to
3845
* be fixed up before commit.
3846
*
3847
* Returns:
3848
* 0 on success or a negative error code on failure.
3849
*
3850
* See also:
3851
* drm_atomic_helper_suspend()
3852
*/
3853
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3854
struct drm_modeset_acquire_ctx *ctx)
3855
{
3856
int i, ret;
3857
struct drm_plane *plane;
3858
struct drm_plane_state *new_plane_state;
3859
struct drm_connector *connector;
3860
struct drm_connector_state *new_conn_state;
3861
struct drm_crtc *crtc;
3862
struct drm_crtc_state *new_crtc_state;
3863
3864
state->acquire_ctx = ctx;
3865
3866
for_each_new_plane_in_state(state, plane, new_plane_state, i)
3867
state->planes[i].old_state = plane->state;
3868
3869
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3870
state->crtcs[i].old_state = crtc->state;
3871
3872
for_each_new_connector_in_state(state, connector, new_conn_state, i)
3873
state->connectors[i].old_state = connector->state;
3874
3875
ret = drm_atomic_commit(state);
3876
3877
state->acquire_ctx = NULL;
3878
3879
return ret;
3880
}
3881
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3882
3883
/**
3884
* drm_atomic_helper_resume - subsystem-level resume helper
3885
* @dev: DRM device
3886
* @state: atomic state to resume to
3887
*
3888
* Calls drm_mode_config_reset() to synchronize hardware and software states,
3889
* grabs all modeset locks and commits the atomic state object. This can be
3890
* used in conjunction with the drm_atomic_helper_suspend() helper to
3891
* implement suspend/resume for drivers that support atomic mode-setting.
3892
*
3893
* Returns:
3894
* 0 on success or a negative error code on failure.
3895
*
3896
* See also:
3897
* drm_atomic_helper_suspend()
3898
*/
3899
int drm_atomic_helper_resume(struct drm_device *dev,
3900
struct drm_atomic_state *state)
3901
{
3902
struct drm_modeset_acquire_ctx ctx;
3903
int err;
3904
3905
drm_mode_config_reset(dev);
3906
3907
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3908
3909
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3910
3911
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3912
drm_atomic_state_put(state);
3913
3914
return err;
3915
}
3916
EXPORT_SYMBOL(drm_atomic_helper_resume);
3917
3918
static int page_flip_common(struct drm_atomic_state *state,
3919
struct drm_crtc *crtc,
3920
struct drm_framebuffer *fb,
3921
struct drm_pending_vblank_event *event,
3922
uint32_t flags)
3923
{
3924
struct drm_plane *plane = crtc->primary;
3925
struct drm_plane_state *plane_state;
3926
struct drm_crtc_state *crtc_state;
3927
int ret = 0;
3928
3929
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3930
if (IS_ERR(crtc_state))
3931
return PTR_ERR(crtc_state);
3932
3933
crtc_state->event = event;
3934
crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3935
3936
plane_state = drm_atomic_get_plane_state(state, plane);
3937
if (IS_ERR(plane_state))
3938
return PTR_ERR(plane_state);
3939
3940
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3941
if (ret != 0)
3942
return ret;
3943
drm_atomic_set_fb_for_plane(plane_state, fb);
3944
3945
/* Make sure we don't accidentally do a full modeset. */
3946
state->allow_modeset = false;
3947
if (!crtc_state->active) {
3948
drm_dbg_atomic(crtc->dev,
3949
"[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3950
crtc->base.id, crtc->name);
3951
return -EINVAL;
3952
}
3953
3954
return ret;
3955
}
3956
3957
/**
3958
* drm_atomic_helper_page_flip - execute a legacy page flip
3959
* @crtc: DRM CRTC
3960
* @fb: DRM framebuffer
3961
* @event: optional DRM event to signal upon completion
3962
* @flags: flip flags for non-vblank sync'ed updates
3963
* @ctx: lock acquisition context
3964
*
3965
* Provides a default &drm_crtc_funcs.page_flip implementation
3966
* using the atomic driver interface.
3967
*
3968
* Returns:
3969
* Returns 0 on success, negative errno numbers on failure.
3970
*
3971
* See also:
3972
* drm_atomic_helper_page_flip_target()
3973
*/
3974
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3975
struct drm_framebuffer *fb,
3976
struct drm_pending_vblank_event *event,
3977
uint32_t flags,
3978
struct drm_modeset_acquire_ctx *ctx)
3979
{
3980
struct drm_plane *plane = crtc->primary;
3981
struct drm_atomic_state *state;
3982
int ret = 0;
3983
3984
state = drm_atomic_state_alloc(plane->dev);
3985
if (!state)
3986
return -ENOMEM;
3987
3988
state->acquire_ctx = ctx;
3989
3990
ret = page_flip_common(state, crtc, fb, event, flags);
3991
if (ret != 0)
3992
goto fail;
3993
3994
ret = drm_atomic_nonblocking_commit(state);
3995
fail:
3996
drm_atomic_state_put(state);
3997
return ret;
3998
}
3999
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
4000
4001
/**
4002
* drm_atomic_helper_page_flip_target - do page flip on target vblank period.
4003
* @crtc: DRM CRTC
4004
* @fb: DRM framebuffer
4005
* @event: optional DRM event to signal upon completion
4006
* @flags: flip flags for non-vblank sync'ed updates
4007
* @target: specifying the target vblank period when the flip to take effect
4008
* @ctx: lock acquisition context
4009
*
4010
* Provides a default &drm_crtc_funcs.page_flip_target implementation.
4011
* Similar to drm_atomic_helper_page_flip() with extra parameter to specify
4012
* target vblank period to flip.
4013
*
4014
* Returns:
4015
* Returns 0 on success, negative errno numbers on failure.
4016
*/
4017
int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
4018
struct drm_framebuffer *fb,
4019
struct drm_pending_vblank_event *event,
4020
uint32_t flags,
4021
uint32_t target,
4022
struct drm_modeset_acquire_ctx *ctx)
4023
{
4024
struct drm_plane *plane = crtc->primary;
4025
struct drm_atomic_state *state;
4026
struct drm_crtc_state *crtc_state;
4027
int ret = 0;
4028
4029
state = drm_atomic_state_alloc(plane->dev);
4030
if (!state)
4031
return -ENOMEM;
4032
4033
state->acquire_ctx = ctx;
4034
4035
ret = page_flip_common(state, crtc, fb, event, flags);
4036
if (ret != 0)
4037
goto fail;
4038
4039
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4040
if (WARN_ON(!crtc_state)) {
4041
ret = -EINVAL;
4042
goto fail;
4043
}
4044
crtc_state->target_vblank = target;
4045
4046
ret = drm_atomic_nonblocking_commit(state);
4047
fail:
4048
drm_atomic_state_put(state);
4049
return ret;
4050
}
4051
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
4052
4053
/**
4054
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
4055
* the input end of a bridge
4056
* @bridge: bridge control structure
4057
* @bridge_state: new bridge state
4058
* @crtc_state: new CRTC state
4059
* @conn_state: new connector state
4060
* @output_fmt: tested output bus format
4061
* @num_input_fmts: will contain the size of the returned array
4062
*
4063
* This helper is a pluggable implementation of the
4064
* &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
4065
* modify the bus configuration between their input and their output. It
4066
* returns an array of input formats with a single element set to @output_fmt.
4067
*
4068
* RETURNS:
4069
* a valid format array of size @num_input_fmts, or NULL if the allocation
4070
* failed
4071
*/
4072
u32 *
4073
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
4074
struct drm_bridge_state *bridge_state,
4075
struct drm_crtc_state *crtc_state,
4076
struct drm_connector_state *conn_state,
4077
u32 output_fmt,
4078
unsigned int *num_input_fmts)
4079
{
4080
u32 *input_fmts;
4081
4082
input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
4083
if (!input_fmts) {
4084
*num_input_fmts = 0;
4085
return NULL;
4086
}
4087
4088
*num_input_fmts = 1;
4089
input_fmts[0] = output_fmt;
4090
return input_fmts;
4091
}
4092
EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
4093
4094