Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
26519 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4
* Author: James.Qian.Wang <[email protected]>
5
*
6
*/
7
#include <linux/interrupt.h>
8
9
#include <drm/drm_atomic.h>
10
#include <drm/drm_atomic_helper.h>
11
#include <drm/drm_drv.h>
12
#include <drm/drm_fbdev_dma.h>
13
#include <drm/drm_gem_dma_helper.h>
14
#include <drm/drm_gem_framebuffer_helper.h>
15
#include <drm/drm_managed.h>
16
#include <drm/drm_probe_helper.h>
17
#include <drm/drm_vblank.h>
18
19
#include "komeda_dev.h"
20
#include "komeda_framebuffer.h"
21
#include "komeda_kms.h"
22
23
DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
24
25
static int komeda_gem_dma_dumb_create(struct drm_file *file,
26
struct drm_device *dev,
27
struct drm_mode_create_dumb *args)
28
{
29
struct komeda_dev *mdev = dev->dev_private;
30
u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
31
32
args->pitch = ALIGN(pitch, mdev->chip.bus_width);
33
34
return drm_gem_dma_dumb_create_internal(file, dev, args);
35
}
36
37
static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
38
{
39
struct drm_device *drm = data;
40
struct komeda_dev *mdev = drm->dev_private;
41
struct komeda_kms_dev *kms = to_kdev(drm);
42
struct komeda_events evts;
43
irqreturn_t status;
44
u32 i;
45
46
/* Call into the CHIP to recognize events */
47
memset(&evts, 0, sizeof(evts));
48
status = mdev->funcs->irq_handler(mdev, &evts);
49
50
komeda_print_events(&evts, drm);
51
52
/* Notify the crtc to handle the events */
53
for (i = 0; i < kms->n_crtcs; i++)
54
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
55
56
return status;
57
}
58
59
static const struct drm_driver komeda_kms_driver = {
60
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
61
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
62
DRM_FBDEV_DMA_DRIVER_OPS,
63
.fops = &komeda_cma_fops,
64
.name = "komeda",
65
.desc = "Arm Komeda Display Processor driver",
66
.major = 0,
67
.minor = 1,
68
};
69
70
static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state)
71
{
72
struct drm_device *dev = state->dev;
73
struct komeda_kms_dev *kms = to_kdev(dev);
74
int i;
75
76
for (i = 0; i < kms->n_crtcs; i++) {
77
struct komeda_crtc *kcrtc = &kms->crtcs[i];
78
79
if (kcrtc->base.state->active) {
80
struct completion *flip_done = NULL;
81
if (kcrtc->base.state->event)
82
flip_done = kcrtc->base.state->event->base.completion;
83
komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done);
84
}
85
}
86
drm_atomic_helper_commit_hw_done(state);
87
}
88
89
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
90
{
91
struct drm_device *dev = old_state->dev;
92
bool fence_cookie = dma_fence_begin_signalling();
93
94
drm_atomic_helper_commit_modeset_disables(dev, old_state);
95
96
drm_atomic_helper_commit_planes(dev, old_state,
97
DRM_PLANE_COMMIT_ACTIVE_ONLY);
98
99
drm_atomic_helper_commit_modeset_enables(dev, old_state);
100
101
komeda_kms_atomic_commit_hw_done(old_state);
102
103
drm_atomic_helper_wait_for_flip_done(dev, old_state);
104
105
dma_fence_end_signalling(fence_cookie);
106
107
drm_atomic_helper_cleanup_planes(dev, old_state);
108
}
109
110
static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
111
.atomic_commit_tail = komeda_kms_commit_tail,
112
};
113
114
static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
115
struct list_head *zorder_list)
116
{
117
struct komeda_plane_state *new = to_kplane_st(plane_st);
118
struct komeda_plane_state *node, *last;
119
120
last = list_empty(zorder_list) ?
121
NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
122
123
/* Considering the list sequence is zpos increasing, so if list is empty
124
* or the zpos of new node bigger than the last node in list, no need
125
* loop and just insert the new one to the tail of the list.
126
*/
127
if (!last || (new->base.zpos > last->base.zpos)) {
128
list_add_tail(&new->zlist_node, zorder_list);
129
return 0;
130
}
131
132
/* Build the list by zpos increasing */
133
list_for_each_entry(node, zorder_list, zlist_node) {
134
if (new->base.zpos < node->base.zpos) {
135
list_add_tail(&new->zlist_node, &node->zlist_node);
136
break;
137
} else if (node->base.zpos == new->base.zpos) {
138
struct drm_plane *a = node->base.plane;
139
struct drm_plane *b = new->base.plane;
140
141
/* Komeda doesn't support setting a same zpos for
142
* different planes.
143
*/
144
DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
145
a->name, b->name, node->base.zpos);
146
return -EINVAL;
147
}
148
}
149
150
return 0;
151
}
152
153
static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
154
struct drm_crtc_state *crtc_st)
155
{
156
struct drm_atomic_state *state = crtc_st->state;
157
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
158
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
159
struct komeda_plane_state *kplane_st;
160
struct drm_plane_state *plane_st;
161
struct drm_plane *plane;
162
struct list_head zorder_list;
163
int order = 0, err;
164
u32 slave_zpos = 0;
165
166
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
167
crtc->base.id, crtc->name);
168
169
INIT_LIST_HEAD(&zorder_list);
170
171
/* This loop also added all effected planes into the new state */
172
drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
173
plane_st = drm_atomic_get_plane_state(state, plane);
174
if (IS_ERR(plane_st))
175
return PTR_ERR(plane_st);
176
177
/* Build a list by zpos increasing */
178
err = komeda_plane_state_list_add(plane_st, &zorder_list);
179
if (err)
180
return err;
181
}
182
183
kcrtc_st->max_slave_zorder = 0;
184
185
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
186
plane_st = &kplane_st->base;
187
plane = plane_st->plane;
188
189
plane_st->normalized_zpos = order++;
190
/* When layer_split has been enabled, one plane will be handled
191
* by two separated komeda layers (left/right), which may needs
192
* two zorders.
193
* - zorder: for left_layer for left display part.
194
* - zorder + 1: will be reserved for right layer.
195
*/
196
if (to_kplane_st(plane_st)->layer_split)
197
order++;
198
199
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
200
plane->base.id, plane->name,
201
plane_st->zpos, plane_st->normalized_zpos);
202
203
/* calculate max slave zorder */
204
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
205
slave_zpos = plane_st->normalized_zpos;
206
if (to_kplane_st(plane_st)->layer_split)
207
slave_zpos++;
208
kcrtc_st->max_slave_zorder =
209
max(slave_zpos, kcrtc_st->max_slave_zorder);
210
}
211
}
212
213
crtc_st->zpos_changed = true;
214
215
return 0;
216
}
217
218
static int komeda_kms_check(struct drm_device *dev,
219
struct drm_atomic_state *state)
220
{
221
struct drm_crtc *crtc;
222
struct drm_crtc_state *new_crtc_st;
223
int i, err;
224
225
err = drm_atomic_helper_check_modeset(dev, state);
226
if (err)
227
return err;
228
229
/* Komeda need to re-calculate resource assumption in every commit
230
* so need to add all affected_planes (even unchanged) to
231
* drm_atomic_state.
232
*/
233
for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
234
err = drm_atomic_add_affected_planes(state, crtc);
235
if (err)
236
return err;
237
238
err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
239
if (err)
240
return err;
241
}
242
243
err = drm_atomic_helper_check_planes(dev, state);
244
if (err)
245
return err;
246
247
return 0;
248
}
249
250
static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
251
.fb_create = komeda_fb_create,
252
.atomic_check = komeda_kms_check,
253
.atomic_commit = drm_atomic_helper_commit,
254
};
255
256
static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
257
struct komeda_dev *mdev)
258
{
259
struct drm_mode_config *config = &kms->base.mode_config;
260
261
drm_mode_config_init(&kms->base);
262
263
komeda_kms_setup_crtcs(kms, mdev);
264
265
/* Get value from dev */
266
config->min_width = 0;
267
config->min_height = 0;
268
config->max_width = 4096;
269
config->max_height = 4096;
270
271
config->funcs = &komeda_mode_config_funcs;
272
config->helper_private = &komeda_mode_config_helpers;
273
}
274
275
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
276
{
277
struct komeda_kms_dev *kms;
278
struct drm_device *drm;
279
int err;
280
281
kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
282
struct komeda_kms_dev, base);
283
if (IS_ERR(kms))
284
return kms;
285
286
drm = &kms->base;
287
288
drm->dev_private = mdev;
289
290
komeda_kms_mode_config_init(kms, mdev);
291
292
err = komeda_kms_add_private_objs(kms, mdev);
293
if (err)
294
goto cleanup_mode_config;
295
296
err = komeda_kms_add_planes(kms, mdev);
297
if (err)
298
goto cleanup_mode_config;
299
300
err = drm_vblank_init(drm, kms->n_crtcs);
301
if (err)
302
goto cleanup_mode_config;
303
304
err = komeda_kms_add_crtcs(kms, mdev);
305
if (err)
306
goto cleanup_mode_config;
307
308
err = komeda_kms_add_wb_connectors(kms, mdev);
309
if (err)
310
goto cleanup_mode_config;
311
312
drm_mode_config_reset(drm);
313
314
err = devm_request_irq(drm->dev, mdev->irq,
315
komeda_kms_irq_handler, IRQF_SHARED,
316
drm->driver->name, drm);
317
if (err)
318
goto cleanup_mode_config;
319
320
drm_kms_helper_poll_init(drm);
321
322
err = drm_dev_register(drm, 0);
323
if (err)
324
goto free_interrupts;
325
326
return kms;
327
328
free_interrupts:
329
drm_kms_helper_poll_fini(drm);
330
cleanup_mode_config:
331
drm_mode_config_cleanup(drm);
332
komeda_kms_cleanup_private_objs(kms);
333
drm->dev_private = NULL;
334
return ERR_PTR(err);
335
}
336
337
void komeda_kms_detach(struct komeda_kms_dev *kms)
338
{
339
struct drm_device *drm = &kms->base;
340
341
drm_dev_unregister(drm);
342
drm_kms_helper_poll_fini(drm);
343
drm_atomic_helper_shutdown(drm);
344
drm_mode_config_cleanup(drm);
345
komeda_kms_cleanup_private_objs(kms);
346
drm->dev_private = NULL;
347
}
348
349
void komeda_kms_shutdown(struct komeda_kms_dev *kms)
350
{
351
struct drm_device *drm = &kms->base;
352
353
drm_atomic_helper_shutdown(drm);
354
}
355
356