Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/i915/intel_ringbuffer.c
15113 views
1
/*
2
* Copyright © 2008-2010 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
* Authors:
24
* Eric Anholt <[email protected]>
25
* Zou Nan hai <[email protected]>
26
* Xiang Hai hao<[email protected]>
27
*
28
*/
29
30
#include "drmP.h"
31
#include "drm.h"
32
#include "i915_drv.h"
33
#include "i915_drm.h"
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
37
static inline int ring_space(struct intel_ring_buffer *ring)
38
{
39
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40
if (space < 0)
41
space += ring->size;
42
return space;
43
}
44
45
static u32 i915_gem_get_seqno(struct drm_device *dev)
46
{
47
drm_i915_private_t *dev_priv = dev->dev_private;
48
u32 seqno;
49
50
seqno = dev_priv->next_seqno;
51
52
/* reserve 0 for non-seqno */
53
if (++dev_priv->next_seqno == 0)
54
dev_priv->next_seqno = 1;
55
56
return seqno;
57
}
58
59
static int
60
render_ring_flush(struct intel_ring_buffer *ring,
61
u32 invalidate_domains,
62
u32 flush_domains)
63
{
64
struct drm_device *dev = ring->dev;
65
u32 cmd;
66
int ret;
67
68
/*
69
* read/write caches:
70
*
71
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
72
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73
* also flushed at 2d versus 3d pipeline switches.
74
*
75
* read-only caches:
76
*
77
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78
* MI_READ_FLUSH is set, and is always flushed on 965.
79
*
80
* I915_GEM_DOMAIN_COMMAND may not exist?
81
*
82
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83
* invalidated when MI_EXE_FLUSH is set.
84
*
85
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86
* invalidated with every MI_FLUSH.
87
*
88
* TLBs:
89
*
90
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93
* are flushed at any MI_FLUSH.
94
*/
95
96
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97
if ((invalidate_domains|flush_domains) &
98
I915_GEM_DOMAIN_RENDER)
99
cmd &= ~MI_NO_WRITE_FLUSH;
100
if (INTEL_INFO(dev)->gen < 4) {
101
/*
102
* On the 965, the sampler cache always gets flushed
103
* and this bit is reserved.
104
*/
105
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106
cmd |= MI_READ_FLUSH;
107
}
108
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109
cmd |= MI_EXE_FLUSH;
110
111
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112
(IS_G4X(dev) || IS_GEN5(dev)))
113
cmd |= MI_INVALIDATE_ISP;
114
115
ret = intel_ring_begin(ring, 2);
116
if (ret)
117
return ret;
118
119
intel_ring_emit(ring, cmd);
120
intel_ring_emit(ring, MI_NOOP);
121
intel_ring_advance(ring);
122
123
return 0;
124
}
125
126
static void ring_write_tail(struct intel_ring_buffer *ring,
127
u32 value)
128
{
129
drm_i915_private_t *dev_priv = ring->dev->dev_private;
130
I915_WRITE_TAIL(ring, value);
131
}
132
133
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
134
{
135
drm_i915_private_t *dev_priv = ring->dev->dev_private;
136
u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
137
RING_ACTHD(ring->mmio_base) : ACTHD;
138
139
return I915_READ(acthd_reg);
140
}
141
142
static int init_ring_common(struct intel_ring_buffer *ring)
143
{
144
drm_i915_private_t *dev_priv = ring->dev->dev_private;
145
struct drm_i915_gem_object *obj = ring->obj;
146
u32 head;
147
148
/* Stop the ring if it's running. */
149
I915_WRITE_CTL(ring, 0);
150
I915_WRITE_HEAD(ring, 0);
151
ring->write_tail(ring, 0);
152
153
/* Initialize the ring. */
154
I915_WRITE_START(ring, obj->gtt_offset);
155
head = I915_READ_HEAD(ring) & HEAD_ADDR;
156
157
/* G45 ring initialization fails to reset head to zero */
158
if (head != 0) {
159
DRM_DEBUG_KMS("%s head not reset to zero "
160
"ctl %08x head %08x tail %08x start %08x\n",
161
ring->name,
162
I915_READ_CTL(ring),
163
I915_READ_HEAD(ring),
164
I915_READ_TAIL(ring),
165
I915_READ_START(ring));
166
167
I915_WRITE_HEAD(ring, 0);
168
169
if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170
DRM_ERROR("failed to set %s head to zero "
171
"ctl %08x head %08x tail %08x start %08x\n",
172
ring->name,
173
I915_READ_CTL(ring),
174
I915_READ_HEAD(ring),
175
I915_READ_TAIL(ring),
176
I915_READ_START(ring));
177
}
178
}
179
180
I915_WRITE_CTL(ring,
181
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
182
| RING_REPORT_64K | RING_VALID);
183
184
/* If the head is still not zero, the ring is dead */
185
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
186
I915_READ_START(ring) != obj->gtt_offset ||
187
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
188
DRM_ERROR("%s initialization failed "
189
"ctl %08x head %08x tail %08x start %08x\n",
190
ring->name,
191
I915_READ_CTL(ring),
192
I915_READ_HEAD(ring),
193
I915_READ_TAIL(ring),
194
I915_READ_START(ring));
195
return -EIO;
196
}
197
198
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199
i915_kernel_lost_context(ring->dev);
200
else {
201
ring->head = I915_READ_HEAD(ring);
202
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
203
ring->space = ring_space(ring);
204
}
205
206
return 0;
207
}
208
209
/*
210
* 965+ support PIPE_CONTROL commands, which provide finer grained control
211
* over cache flushing.
212
*/
213
struct pipe_control {
214
struct drm_i915_gem_object *obj;
215
volatile u32 *cpu_page;
216
u32 gtt_offset;
217
};
218
219
static int
220
init_pipe_control(struct intel_ring_buffer *ring)
221
{
222
struct pipe_control *pc;
223
struct drm_i915_gem_object *obj;
224
int ret;
225
226
if (ring->private)
227
return 0;
228
229
pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230
if (!pc)
231
return -ENOMEM;
232
233
obj = i915_gem_alloc_object(ring->dev, 4096);
234
if (obj == NULL) {
235
DRM_ERROR("Failed to allocate seqno page\n");
236
ret = -ENOMEM;
237
goto err;
238
}
239
obj->cache_level = I915_CACHE_LLC;
240
241
ret = i915_gem_object_pin(obj, 4096, true);
242
if (ret)
243
goto err_unref;
244
245
pc->gtt_offset = obj->gtt_offset;
246
pc->cpu_page = kmap(obj->pages[0]);
247
if (pc->cpu_page == NULL)
248
goto err_unpin;
249
250
pc->obj = obj;
251
ring->private = pc;
252
return 0;
253
254
err_unpin:
255
i915_gem_object_unpin(obj);
256
err_unref:
257
drm_gem_object_unreference(&obj->base);
258
err:
259
kfree(pc);
260
return ret;
261
}
262
263
static void
264
cleanup_pipe_control(struct intel_ring_buffer *ring)
265
{
266
struct pipe_control *pc = ring->private;
267
struct drm_i915_gem_object *obj;
268
269
if (!ring->private)
270
return;
271
272
obj = pc->obj;
273
kunmap(obj->pages[0]);
274
i915_gem_object_unpin(obj);
275
drm_gem_object_unreference(&obj->base);
276
277
kfree(pc);
278
ring->private = NULL;
279
}
280
281
static int init_render_ring(struct intel_ring_buffer *ring)
282
{
283
struct drm_device *dev = ring->dev;
284
struct drm_i915_private *dev_priv = dev->dev_private;
285
int ret = init_ring_common(ring);
286
287
if (INTEL_INFO(dev)->gen > 3) {
288
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289
if (IS_GEN6(dev) || IS_GEN7(dev))
290
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291
I915_WRITE(MI_MODE, mode);
292
}
293
294
if (INTEL_INFO(dev)->gen >= 6) {
295
} else if (IS_GEN5(dev)) {
296
ret = init_pipe_control(ring);
297
if (ret)
298
return ret;
299
}
300
301
return ret;
302
}
303
304
static void render_ring_cleanup(struct intel_ring_buffer *ring)
305
{
306
if (!ring->private)
307
return;
308
309
cleanup_pipe_control(ring);
310
}
311
312
static void
313
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
314
{
315
struct drm_device *dev = ring->dev;
316
struct drm_i915_private *dev_priv = dev->dev_private;
317
int id;
318
319
/*
320
* cs -> 1 = vcs, 0 = bcs
321
* vcs -> 1 = bcs, 0 = cs,
322
* bcs -> 1 = cs, 0 = vcs.
323
*/
324
id = ring - dev_priv->ring;
325
id += 2 - i;
326
id %= 3;
327
328
intel_ring_emit(ring,
329
MI_SEMAPHORE_MBOX |
330
MI_SEMAPHORE_REGISTER |
331
MI_SEMAPHORE_UPDATE);
332
intel_ring_emit(ring, seqno);
333
intel_ring_emit(ring,
334
RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
335
}
336
337
static int
338
gen6_add_request(struct intel_ring_buffer *ring,
339
u32 *result)
340
{
341
u32 seqno;
342
int ret;
343
344
ret = intel_ring_begin(ring, 10);
345
if (ret)
346
return ret;
347
348
seqno = i915_gem_get_seqno(ring->dev);
349
update_semaphore(ring, 0, seqno);
350
update_semaphore(ring, 1, seqno);
351
352
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
353
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
354
intel_ring_emit(ring, seqno);
355
intel_ring_emit(ring, MI_USER_INTERRUPT);
356
intel_ring_advance(ring);
357
358
*result = seqno;
359
return 0;
360
}
361
362
int
363
intel_ring_sync(struct intel_ring_buffer *ring,
364
struct intel_ring_buffer *to,
365
u32 seqno)
366
{
367
int ret;
368
369
ret = intel_ring_begin(ring, 4);
370
if (ret)
371
return ret;
372
373
intel_ring_emit(ring,
374
MI_SEMAPHORE_MBOX |
375
MI_SEMAPHORE_REGISTER |
376
intel_ring_sync_index(ring, to) << 17 |
377
MI_SEMAPHORE_COMPARE);
378
intel_ring_emit(ring, seqno);
379
intel_ring_emit(ring, 0);
380
intel_ring_emit(ring, MI_NOOP);
381
intel_ring_advance(ring);
382
383
return 0;
384
}
385
386
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
387
do { \
388
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
389
PIPE_CONTROL_DEPTH_STALL | 2); \
390
intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
391
intel_ring_emit(ring__, 0); \
392
intel_ring_emit(ring__, 0); \
393
} while (0)
394
395
static int
396
pc_render_add_request(struct intel_ring_buffer *ring,
397
u32 *result)
398
{
399
struct drm_device *dev = ring->dev;
400
u32 seqno = i915_gem_get_seqno(dev);
401
struct pipe_control *pc = ring->private;
402
u32 scratch_addr = pc->gtt_offset + 128;
403
int ret;
404
405
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
406
* incoherent with writes to memory, i.e. completely fubar,
407
* so we need to use PIPE_NOTIFY instead.
408
*
409
* However, we also need to workaround the qword write
410
* incoherence by flushing the 6 PIPE_NOTIFY buffers out to
411
* memory before requesting an interrupt.
412
*/
413
ret = intel_ring_begin(ring, 32);
414
if (ret)
415
return ret;
416
417
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
418
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
419
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
420
intel_ring_emit(ring, seqno);
421
intel_ring_emit(ring, 0);
422
PIPE_CONTROL_FLUSH(ring, scratch_addr);
423
scratch_addr += 128; /* write to separate cachelines */
424
PIPE_CONTROL_FLUSH(ring, scratch_addr);
425
scratch_addr += 128;
426
PIPE_CONTROL_FLUSH(ring, scratch_addr);
427
scratch_addr += 128;
428
PIPE_CONTROL_FLUSH(ring, scratch_addr);
429
scratch_addr += 128;
430
PIPE_CONTROL_FLUSH(ring, scratch_addr);
431
scratch_addr += 128;
432
PIPE_CONTROL_FLUSH(ring, scratch_addr);
433
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
434
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
435
PIPE_CONTROL_NOTIFY);
436
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
437
intel_ring_emit(ring, seqno);
438
intel_ring_emit(ring, 0);
439
intel_ring_advance(ring);
440
441
*result = seqno;
442
return 0;
443
}
444
445
static int
446
render_ring_add_request(struct intel_ring_buffer *ring,
447
u32 *result)
448
{
449
struct drm_device *dev = ring->dev;
450
u32 seqno = i915_gem_get_seqno(dev);
451
int ret;
452
453
ret = intel_ring_begin(ring, 4);
454
if (ret)
455
return ret;
456
457
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
458
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
459
intel_ring_emit(ring, seqno);
460
intel_ring_emit(ring, MI_USER_INTERRUPT);
461
intel_ring_advance(ring);
462
463
*result = seqno;
464
return 0;
465
}
466
467
static u32
468
ring_get_seqno(struct intel_ring_buffer *ring)
469
{
470
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
471
}
472
473
static u32
474
pc_render_get_seqno(struct intel_ring_buffer *ring)
475
{
476
struct pipe_control *pc = ring->private;
477
return pc->cpu_page[0];
478
}
479
480
static void
481
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
482
{
483
dev_priv->gt_irq_mask &= ~mask;
484
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
485
POSTING_READ(GTIMR);
486
}
487
488
static void
489
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
490
{
491
dev_priv->gt_irq_mask |= mask;
492
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493
POSTING_READ(GTIMR);
494
}
495
496
static void
497
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
498
{
499
dev_priv->irq_mask &= ~mask;
500
I915_WRITE(IMR, dev_priv->irq_mask);
501
POSTING_READ(IMR);
502
}
503
504
static void
505
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
506
{
507
dev_priv->irq_mask |= mask;
508
I915_WRITE(IMR, dev_priv->irq_mask);
509
POSTING_READ(IMR);
510
}
511
512
static bool
513
render_ring_get_irq(struct intel_ring_buffer *ring)
514
{
515
struct drm_device *dev = ring->dev;
516
drm_i915_private_t *dev_priv = dev->dev_private;
517
518
if (!dev->irq_enabled)
519
return false;
520
521
spin_lock(&ring->irq_lock);
522
if (ring->irq_refcount++ == 0) {
523
if (HAS_PCH_SPLIT(dev))
524
ironlake_enable_irq(dev_priv,
525
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
526
else
527
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
528
}
529
spin_unlock(&ring->irq_lock);
530
531
return true;
532
}
533
534
static void
535
render_ring_put_irq(struct intel_ring_buffer *ring)
536
{
537
struct drm_device *dev = ring->dev;
538
drm_i915_private_t *dev_priv = dev->dev_private;
539
540
spin_lock(&ring->irq_lock);
541
if (--ring->irq_refcount == 0) {
542
if (HAS_PCH_SPLIT(dev))
543
ironlake_disable_irq(dev_priv,
544
GT_USER_INTERRUPT |
545
GT_PIPE_NOTIFY);
546
else
547
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
548
}
549
spin_unlock(&ring->irq_lock);
550
}
551
552
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
553
{
554
struct drm_device *dev = ring->dev;
555
drm_i915_private_t *dev_priv = ring->dev->dev_private;
556
u32 mmio = 0;
557
558
/* The ring status page addresses are no longer next to the rest of
559
* the ring registers as of gen7.
560
*/
561
if (IS_GEN7(dev)) {
562
switch (ring->id) {
563
case RING_RENDER:
564
mmio = RENDER_HWS_PGA_GEN7;
565
break;
566
case RING_BLT:
567
mmio = BLT_HWS_PGA_GEN7;
568
break;
569
case RING_BSD:
570
mmio = BSD_HWS_PGA_GEN7;
571
break;
572
}
573
} else if (IS_GEN6(ring->dev)) {
574
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
575
} else {
576
mmio = RING_HWS_PGA(ring->mmio_base);
577
}
578
579
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
580
POSTING_READ(mmio);
581
}
582
583
static int
584
bsd_ring_flush(struct intel_ring_buffer *ring,
585
u32 invalidate_domains,
586
u32 flush_domains)
587
{
588
int ret;
589
590
ret = intel_ring_begin(ring, 2);
591
if (ret)
592
return ret;
593
594
intel_ring_emit(ring, MI_FLUSH);
595
intel_ring_emit(ring, MI_NOOP);
596
intel_ring_advance(ring);
597
return 0;
598
}
599
600
static int
601
ring_add_request(struct intel_ring_buffer *ring,
602
u32 *result)
603
{
604
u32 seqno;
605
int ret;
606
607
ret = intel_ring_begin(ring, 4);
608
if (ret)
609
return ret;
610
611
seqno = i915_gem_get_seqno(ring->dev);
612
613
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
614
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
615
intel_ring_emit(ring, seqno);
616
intel_ring_emit(ring, MI_USER_INTERRUPT);
617
intel_ring_advance(ring);
618
619
*result = seqno;
620
return 0;
621
}
622
623
static bool
624
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
625
{
626
struct drm_device *dev = ring->dev;
627
drm_i915_private_t *dev_priv = dev->dev_private;
628
629
if (!dev->irq_enabled)
630
return false;
631
632
spin_lock(&ring->irq_lock);
633
if (ring->irq_refcount++ == 0) {
634
ring->irq_mask &= ~rflag;
635
I915_WRITE_IMR(ring, ring->irq_mask);
636
ironlake_enable_irq(dev_priv, gflag);
637
}
638
spin_unlock(&ring->irq_lock);
639
640
return true;
641
}
642
643
static void
644
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
645
{
646
struct drm_device *dev = ring->dev;
647
drm_i915_private_t *dev_priv = dev->dev_private;
648
649
spin_lock(&ring->irq_lock);
650
if (--ring->irq_refcount == 0) {
651
ring->irq_mask |= rflag;
652
I915_WRITE_IMR(ring, ring->irq_mask);
653
ironlake_disable_irq(dev_priv, gflag);
654
}
655
spin_unlock(&ring->irq_lock);
656
}
657
658
static bool
659
bsd_ring_get_irq(struct intel_ring_buffer *ring)
660
{
661
struct drm_device *dev = ring->dev;
662
drm_i915_private_t *dev_priv = dev->dev_private;
663
664
if (!dev->irq_enabled)
665
return false;
666
667
spin_lock(&ring->irq_lock);
668
if (ring->irq_refcount++ == 0) {
669
if (IS_G4X(dev))
670
i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
671
else
672
ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
673
}
674
spin_unlock(&ring->irq_lock);
675
676
return true;
677
}
678
static void
679
bsd_ring_put_irq(struct intel_ring_buffer *ring)
680
{
681
struct drm_device *dev = ring->dev;
682
drm_i915_private_t *dev_priv = dev->dev_private;
683
684
spin_lock(&ring->irq_lock);
685
if (--ring->irq_refcount == 0) {
686
if (IS_G4X(dev))
687
i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
688
else
689
ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
690
}
691
spin_unlock(&ring->irq_lock);
692
}
693
694
static int
695
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
696
{
697
int ret;
698
699
ret = intel_ring_begin(ring, 2);
700
if (ret)
701
return ret;
702
703
intel_ring_emit(ring,
704
MI_BATCH_BUFFER_START | (2 << 6) |
705
MI_BATCH_NON_SECURE_I965);
706
intel_ring_emit(ring, offset);
707
intel_ring_advance(ring);
708
709
return 0;
710
}
711
712
static int
713
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
714
u32 offset, u32 len)
715
{
716
struct drm_device *dev = ring->dev;
717
int ret;
718
719
if (IS_I830(dev) || IS_845G(dev)) {
720
ret = intel_ring_begin(ring, 4);
721
if (ret)
722
return ret;
723
724
intel_ring_emit(ring, MI_BATCH_BUFFER);
725
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
726
intel_ring_emit(ring, offset + len - 8);
727
intel_ring_emit(ring, 0);
728
} else {
729
ret = intel_ring_begin(ring, 2);
730
if (ret)
731
return ret;
732
733
if (INTEL_INFO(dev)->gen >= 4) {
734
intel_ring_emit(ring,
735
MI_BATCH_BUFFER_START | (2 << 6) |
736
MI_BATCH_NON_SECURE_I965);
737
intel_ring_emit(ring, offset);
738
} else {
739
intel_ring_emit(ring,
740
MI_BATCH_BUFFER_START | (2 << 6));
741
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
742
}
743
}
744
intel_ring_advance(ring);
745
746
return 0;
747
}
748
749
static void cleanup_status_page(struct intel_ring_buffer *ring)
750
{
751
drm_i915_private_t *dev_priv = ring->dev->dev_private;
752
struct drm_i915_gem_object *obj;
753
754
obj = ring->status_page.obj;
755
if (obj == NULL)
756
return;
757
758
kunmap(obj->pages[0]);
759
i915_gem_object_unpin(obj);
760
drm_gem_object_unreference(&obj->base);
761
ring->status_page.obj = NULL;
762
763
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
764
}
765
766
static int init_status_page(struct intel_ring_buffer *ring)
767
{
768
struct drm_device *dev = ring->dev;
769
drm_i915_private_t *dev_priv = dev->dev_private;
770
struct drm_i915_gem_object *obj;
771
int ret;
772
773
obj = i915_gem_alloc_object(dev, 4096);
774
if (obj == NULL) {
775
DRM_ERROR("Failed to allocate status page\n");
776
ret = -ENOMEM;
777
goto err;
778
}
779
obj->cache_level = I915_CACHE_LLC;
780
781
ret = i915_gem_object_pin(obj, 4096, true);
782
if (ret != 0) {
783
goto err_unref;
784
}
785
786
ring->status_page.gfx_addr = obj->gtt_offset;
787
ring->status_page.page_addr = kmap(obj->pages[0]);
788
if (ring->status_page.page_addr == NULL) {
789
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
790
goto err_unpin;
791
}
792
ring->status_page.obj = obj;
793
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
794
795
intel_ring_setup_status_page(ring);
796
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
797
ring->name, ring->status_page.gfx_addr);
798
799
return 0;
800
801
err_unpin:
802
i915_gem_object_unpin(obj);
803
err_unref:
804
drm_gem_object_unreference(&obj->base);
805
err:
806
return ret;
807
}
808
809
int intel_init_ring_buffer(struct drm_device *dev,
810
struct intel_ring_buffer *ring)
811
{
812
struct drm_i915_gem_object *obj;
813
int ret;
814
815
ring->dev = dev;
816
INIT_LIST_HEAD(&ring->active_list);
817
INIT_LIST_HEAD(&ring->request_list);
818
INIT_LIST_HEAD(&ring->gpu_write_list);
819
820
init_waitqueue_head(&ring->irq_queue);
821
spin_lock_init(&ring->irq_lock);
822
ring->irq_mask = ~0;
823
824
if (I915_NEED_GFX_HWS(dev)) {
825
ret = init_status_page(ring);
826
if (ret)
827
return ret;
828
}
829
830
obj = i915_gem_alloc_object(dev, ring->size);
831
if (obj == NULL) {
832
DRM_ERROR("Failed to allocate ringbuffer\n");
833
ret = -ENOMEM;
834
goto err_hws;
835
}
836
837
ring->obj = obj;
838
839
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
840
if (ret)
841
goto err_unref;
842
843
ring->map.size = ring->size;
844
ring->map.offset = dev->agp->base + obj->gtt_offset;
845
ring->map.type = 0;
846
ring->map.flags = 0;
847
ring->map.mtrr = 0;
848
849
drm_core_ioremap_wc(&ring->map, dev);
850
if (ring->map.handle == NULL) {
851
DRM_ERROR("Failed to map ringbuffer.\n");
852
ret = -EINVAL;
853
goto err_unpin;
854
}
855
856
ring->virtual_start = ring->map.handle;
857
ret = ring->init(ring);
858
if (ret)
859
goto err_unmap;
860
861
/* Workaround an erratum on the i830 which causes a hang if
862
* the TAIL pointer points to within the last 2 cachelines
863
* of the buffer.
864
*/
865
ring->effective_size = ring->size;
866
if (IS_I830(ring->dev))
867
ring->effective_size -= 128;
868
869
return 0;
870
871
err_unmap:
872
drm_core_ioremapfree(&ring->map, dev);
873
err_unpin:
874
i915_gem_object_unpin(obj);
875
err_unref:
876
drm_gem_object_unreference(&obj->base);
877
ring->obj = NULL;
878
err_hws:
879
cleanup_status_page(ring);
880
return ret;
881
}
882
883
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
884
{
885
struct drm_i915_private *dev_priv;
886
int ret;
887
888
if (ring->obj == NULL)
889
return;
890
891
/* Disable the ring buffer. The ring must be idle at this point */
892
dev_priv = ring->dev->dev_private;
893
ret = intel_wait_ring_idle(ring);
894
if (ret)
895
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
896
ring->name, ret);
897
898
I915_WRITE_CTL(ring, 0);
899
900
drm_core_ioremapfree(&ring->map, ring->dev);
901
902
i915_gem_object_unpin(ring->obj);
903
drm_gem_object_unreference(&ring->obj->base);
904
ring->obj = NULL;
905
906
if (ring->cleanup)
907
ring->cleanup(ring);
908
909
cleanup_status_page(ring);
910
}
911
912
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
913
{
914
unsigned int *virt;
915
int rem = ring->size - ring->tail;
916
917
if (ring->space < rem) {
918
int ret = intel_wait_ring_buffer(ring, rem);
919
if (ret)
920
return ret;
921
}
922
923
virt = (unsigned int *)(ring->virtual_start + ring->tail);
924
rem /= 8;
925
while (rem--) {
926
*virt++ = MI_NOOP;
927
*virt++ = MI_NOOP;
928
}
929
930
ring->tail = 0;
931
ring->space = ring_space(ring);
932
933
return 0;
934
}
935
936
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
937
{
938
struct drm_device *dev = ring->dev;
939
struct drm_i915_private *dev_priv = dev->dev_private;
940
unsigned long end;
941
u32 head;
942
943
/* If the reported head position has wrapped or hasn't advanced,
944
* fallback to the slow and accurate path.
945
*/
946
head = intel_read_status_page(ring, 4);
947
if (head > ring->head) {
948
ring->head = head;
949
ring->space = ring_space(ring);
950
if (ring->space >= n)
951
return 0;
952
}
953
954
trace_i915_ring_wait_begin(ring);
955
end = jiffies + 3 * HZ;
956
do {
957
ring->head = I915_READ_HEAD(ring);
958
ring->space = ring_space(ring);
959
if (ring->space >= n) {
960
trace_i915_ring_wait_end(ring);
961
return 0;
962
}
963
964
if (dev->primary->master) {
965
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
966
if (master_priv->sarea_priv)
967
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
968
}
969
970
msleep(1);
971
if (atomic_read(&dev_priv->mm.wedged))
972
return -EAGAIN;
973
} while (!time_after(jiffies, end));
974
trace_i915_ring_wait_end(ring);
975
return -EBUSY;
976
}
977
978
int intel_ring_begin(struct intel_ring_buffer *ring,
979
int num_dwords)
980
{
981
struct drm_i915_private *dev_priv = ring->dev->dev_private;
982
int n = 4*num_dwords;
983
int ret;
984
985
if (unlikely(atomic_read(&dev_priv->mm.wedged)))
986
return -EIO;
987
988
if (unlikely(ring->tail + n > ring->effective_size)) {
989
ret = intel_wrap_ring_buffer(ring);
990
if (unlikely(ret))
991
return ret;
992
}
993
994
if (unlikely(ring->space < n)) {
995
ret = intel_wait_ring_buffer(ring, n);
996
if (unlikely(ret))
997
return ret;
998
}
999
1000
ring->space -= n;
1001
return 0;
1002
}
1003
1004
void intel_ring_advance(struct intel_ring_buffer *ring)
1005
{
1006
ring->tail &= ring->size - 1;
1007
ring->write_tail(ring, ring->tail);
1008
}
1009
1010
static const struct intel_ring_buffer render_ring = {
1011
.name = "render ring",
1012
.id = RING_RENDER,
1013
.mmio_base = RENDER_RING_BASE,
1014
.size = 32 * PAGE_SIZE,
1015
.init = init_render_ring,
1016
.write_tail = ring_write_tail,
1017
.flush = render_ring_flush,
1018
.add_request = render_ring_add_request,
1019
.get_seqno = ring_get_seqno,
1020
.irq_get = render_ring_get_irq,
1021
.irq_put = render_ring_put_irq,
1022
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
1023
.cleanup = render_ring_cleanup,
1024
};
1025
1026
/* ring buffer for bit-stream decoder */
1027
1028
static const struct intel_ring_buffer bsd_ring = {
1029
.name = "bsd ring",
1030
.id = RING_BSD,
1031
.mmio_base = BSD_RING_BASE,
1032
.size = 32 * PAGE_SIZE,
1033
.init = init_ring_common,
1034
.write_tail = ring_write_tail,
1035
.flush = bsd_ring_flush,
1036
.add_request = ring_add_request,
1037
.get_seqno = ring_get_seqno,
1038
.irq_get = bsd_ring_get_irq,
1039
.irq_put = bsd_ring_put_irq,
1040
.dispatch_execbuffer = ring_dispatch_execbuffer,
1041
};
1042
1043
1044
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1045
u32 value)
1046
{
1047
drm_i915_private_t *dev_priv = ring->dev->dev_private;
1048
1049
/* Every tail move must follow the sequence below */
1050
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1051
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1052
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1053
I915_WRITE(GEN6_BSD_RNCID, 0x0);
1054
1055
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1056
GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1057
50))
1058
DRM_ERROR("timed out waiting for IDLE Indicator\n");
1059
1060
I915_WRITE_TAIL(ring, value);
1061
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1062
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1063
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1064
}
1065
1066
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1067
u32 invalidate, u32 flush)
1068
{
1069
uint32_t cmd;
1070
int ret;
1071
1072
ret = intel_ring_begin(ring, 4);
1073
if (ret)
1074
return ret;
1075
1076
cmd = MI_FLUSH_DW;
1077
if (invalidate & I915_GEM_GPU_DOMAINS)
1078
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1079
intel_ring_emit(ring, cmd);
1080
intel_ring_emit(ring, 0);
1081
intel_ring_emit(ring, 0);
1082
intel_ring_emit(ring, MI_NOOP);
1083
intel_ring_advance(ring);
1084
return 0;
1085
}
1086
1087
static int
1088
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1089
u32 offset, u32 len)
1090
{
1091
int ret;
1092
1093
ret = intel_ring_begin(ring, 2);
1094
if (ret)
1095
return ret;
1096
1097
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1098
/* bit0-7 is the length on GEN6+ */
1099
intel_ring_emit(ring, offset);
1100
intel_ring_advance(ring);
1101
1102
return 0;
1103
}
1104
1105
static bool
1106
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1107
{
1108
return gen6_ring_get_irq(ring,
1109
GT_USER_INTERRUPT,
1110
GEN6_RENDER_USER_INTERRUPT);
1111
}
1112
1113
static void
1114
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1115
{
1116
return gen6_ring_put_irq(ring,
1117
GT_USER_INTERRUPT,
1118
GEN6_RENDER_USER_INTERRUPT);
1119
}
1120
1121
static bool
1122
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1123
{
1124
return gen6_ring_get_irq(ring,
1125
GT_GEN6_BSD_USER_INTERRUPT,
1126
GEN6_BSD_USER_INTERRUPT);
1127
}
1128
1129
static void
1130
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1131
{
1132
return gen6_ring_put_irq(ring,
1133
GT_GEN6_BSD_USER_INTERRUPT,
1134
GEN6_BSD_USER_INTERRUPT);
1135
}
1136
1137
/* ring buffer for Video Codec for Gen6+ */
1138
static const struct intel_ring_buffer gen6_bsd_ring = {
1139
.name = "gen6 bsd ring",
1140
.id = RING_BSD,
1141
.mmio_base = GEN6_BSD_RING_BASE,
1142
.size = 32 * PAGE_SIZE,
1143
.init = init_ring_common,
1144
.write_tail = gen6_bsd_ring_write_tail,
1145
.flush = gen6_ring_flush,
1146
.add_request = gen6_add_request,
1147
.get_seqno = ring_get_seqno,
1148
.irq_get = gen6_bsd_ring_get_irq,
1149
.irq_put = gen6_bsd_ring_put_irq,
1150
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1151
};
1152
1153
/* Blitter support (SandyBridge+) */
1154
1155
static bool
1156
blt_ring_get_irq(struct intel_ring_buffer *ring)
1157
{
1158
return gen6_ring_get_irq(ring,
1159
GT_BLT_USER_INTERRUPT,
1160
GEN6_BLITTER_USER_INTERRUPT);
1161
}
1162
1163
static void
1164
blt_ring_put_irq(struct intel_ring_buffer *ring)
1165
{
1166
gen6_ring_put_irq(ring,
1167
GT_BLT_USER_INTERRUPT,
1168
GEN6_BLITTER_USER_INTERRUPT);
1169
}
1170
1171
1172
/* Workaround for some stepping of SNB,
1173
* each time when BLT engine ring tail moved,
1174
* the first command in the ring to be parsed
1175
* should be MI_BATCH_BUFFER_START
1176
*/
1177
#define NEED_BLT_WORKAROUND(dev) \
1178
(IS_GEN6(dev) && (dev->pdev->revision < 8))
1179
1180
static inline struct drm_i915_gem_object *
1181
to_blt_workaround(struct intel_ring_buffer *ring)
1182
{
1183
return ring->private;
1184
}
1185
1186
static int blt_ring_init(struct intel_ring_buffer *ring)
1187
{
1188
if (NEED_BLT_WORKAROUND(ring->dev)) {
1189
struct drm_i915_gem_object *obj;
1190
u32 *ptr;
1191
int ret;
1192
1193
obj = i915_gem_alloc_object(ring->dev, 4096);
1194
if (obj == NULL)
1195
return -ENOMEM;
1196
1197
ret = i915_gem_object_pin(obj, 4096, true);
1198
if (ret) {
1199
drm_gem_object_unreference(&obj->base);
1200
return ret;
1201
}
1202
1203
ptr = kmap(obj->pages[0]);
1204
*ptr++ = MI_BATCH_BUFFER_END;
1205
*ptr++ = MI_NOOP;
1206
kunmap(obj->pages[0]);
1207
1208
ret = i915_gem_object_set_to_gtt_domain(obj, false);
1209
if (ret) {
1210
i915_gem_object_unpin(obj);
1211
drm_gem_object_unreference(&obj->base);
1212
return ret;
1213
}
1214
1215
ring->private = obj;
1216
}
1217
1218
return init_ring_common(ring);
1219
}
1220
1221
static int blt_ring_begin(struct intel_ring_buffer *ring,
1222
int num_dwords)
1223
{
1224
if (ring->private) {
1225
int ret = intel_ring_begin(ring, num_dwords+2);
1226
if (ret)
1227
return ret;
1228
1229
intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1230
intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1231
1232
return 0;
1233
} else
1234
return intel_ring_begin(ring, 4);
1235
}
1236
1237
static int blt_ring_flush(struct intel_ring_buffer *ring,
1238
u32 invalidate, u32 flush)
1239
{
1240
uint32_t cmd;
1241
int ret;
1242
1243
ret = blt_ring_begin(ring, 4);
1244
if (ret)
1245
return ret;
1246
1247
cmd = MI_FLUSH_DW;
1248
if (invalidate & I915_GEM_DOMAIN_RENDER)
1249
cmd |= MI_INVALIDATE_TLB;
1250
intel_ring_emit(ring, cmd);
1251
intel_ring_emit(ring, 0);
1252
intel_ring_emit(ring, 0);
1253
intel_ring_emit(ring, MI_NOOP);
1254
intel_ring_advance(ring);
1255
return 0;
1256
}
1257
1258
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1259
{
1260
if (!ring->private)
1261
return;
1262
1263
i915_gem_object_unpin(ring->private);
1264
drm_gem_object_unreference(ring->private);
1265
ring->private = NULL;
1266
}
1267
1268
static const struct intel_ring_buffer gen6_blt_ring = {
1269
.name = "blt ring",
1270
.id = RING_BLT,
1271
.mmio_base = BLT_RING_BASE,
1272
.size = 32 * PAGE_SIZE,
1273
.init = blt_ring_init,
1274
.write_tail = ring_write_tail,
1275
.flush = blt_ring_flush,
1276
.add_request = gen6_add_request,
1277
.get_seqno = ring_get_seqno,
1278
.irq_get = blt_ring_get_irq,
1279
.irq_put = blt_ring_put_irq,
1280
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1281
.cleanup = blt_ring_cleanup,
1282
};
1283
1284
int intel_init_render_ring_buffer(struct drm_device *dev)
1285
{
1286
drm_i915_private_t *dev_priv = dev->dev_private;
1287
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1288
1289
*ring = render_ring;
1290
if (INTEL_INFO(dev)->gen >= 6) {
1291
ring->add_request = gen6_add_request;
1292
ring->irq_get = gen6_render_ring_get_irq;
1293
ring->irq_put = gen6_render_ring_put_irq;
1294
} else if (IS_GEN5(dev)) {
1295
ring->add_request = pc_render_add_request;
1296
ring->get_seqno = pc_render_get_seqno;
1297
}
1298
1299
if (!I915_NEED_GFX_HWS(dev)) {
1300
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1301
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1302
}
1303
1304
return intel_init_ring_buffer(dev, ring);
1305
}
1306
1307
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1308
{
1309
drm_i915_private_t *dev_priv = dev->dev_private;
1310
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1311
1312
*ring = render_ring;
1313
if (INTEL_INFO(dev)->gen >= 6) {
1314
ring->add_request = gen6_add_request;
1315
ring->irq_get = gen6_render_ring_get_irq;
1316
ring->irq_put = gen6_render_ring_put_irq;
1317
} else if (IS_GEN5(dev)) {
1318
ring->add_request = pc_render_add_request;
1319
ring->get_seqno = pc_render_get_seqno;
1320
}
1321
1322
ring->dev = dev;
1323
INIT_LIST_HEAD(&ring->active_list);
1324
INIT_LIST_HEAD(&ring->request_list);
1325
INIT_LIST_HEAD(&ring->gpu_write_list);
1326
1327
ring->size = size;
1328
ring->effective_size = ring->size;
1329
if (IS_I830(ring->dev))
1330
ring->effective_size -= 128;
1331
1332
ring->map.offset = start;
1333
ring->map.size = size;
1334
ring->map.type = 0;
1335
ring->map.flags = 0;
1336
ring->map.mtrr = 0;
1337
1338
drm_core_ioremap_wc(&ring->map, dev);
1339
if (ring->map.handle == NULL) {
1340
DRM_ERROR("can not ioremap virtual address for"
1341
" ring buffer\n");
1342
return -ENOMEM;
1343
}
1344
1345
ring->virtual_start = (void __force __iomem *)ring->map.handle;
1346
return 0;
1347
}
1348
1349
int intel_init_bsd_ring_buffer(struct drm_device *dev)
1350
{
1351
drm_i915_private_t *dev_priv = dev->dev_private;
1352
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1353
1354
if (IS_GEN6(dev) || IS_GEN7(dev))
1355
*ring = gen6_bsd_ring;
1356
else
1357
*ring = bsd_ring;
1358
1359
return intel_init_ring_buffer(dev, ring);
1360
}
1361
1362
int intel_init_blt_ring_buffer(struct drm_device *dev)
1363
{
1364
drm_i915_private_t *dev_priv = dev->dev_private;
1365
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1366
1367
*ring = gen6_blt_ring;
1368
1369
return intel_init_ring_buffer(dev, ring);
1370
}
1371
1372