Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/alpha/kernel/core_titan.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/arch/alpha/kernel/core_titan.c
4
*
5
* Code common to all TITAN core logic chips.
6
*/
7
8
#define __EXTERN_INLINE inline
9
#include <asm/io.h>
10
#include <asm/core_titan.h>
11
#undef __EXTERN_INLINE
12
13
#include <linux/module.h>
14
#include <linux/types.h>
15
#include <linux/pci.h>
16
#include <linux/sched.h>
17
#include <linux/init.h>
18
#include <linux/vmalloc.h>
19
#include <linux/memblock.h>
20
21
#include <asm/ptrace.h>
22
#include <asm/smp.h>
23
#include <asm/tlbflush.h>
24
#include <asm/vga.h>
25
26
#include "proto.h"
27
#include "pci_impl.h"
28
29
/* Save Titan configuration data as the console had it set up. */
30
31
struct
32
{
33
unsigned long wsba[4];
34
unsigned long wsm[4];
35
unsigned long tba[4];
36
} saved_config[4] __attribute__((common));
37
38
/*
39
* Is PChip 1 present? No need to query it more than once.
40
*/
41
static int titan_pchip1_present;
42
43
/*
44
* BIOS32-style PCI interface:
45
*/
46
47
#define DEBUG_CONFIG 0
48
49
#if DEBUG_CONFIG
50
# define DBG_CFG(args) printk args
51
#else
52
# define DBG_CFG(args)
53
#endif
54
55
56
/*
57
* Routines to access TIG registers.
58
*/
59
static inline volatile unsigned long *
60
mk_tig_addr(int offset)
61
{
62
return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
63
}
64
65
static inline u8
66
titan_read_tig(int offset, u8 value)
67
{
68
volatile unsigned long *tig_addr = mk_tig_addr(offset);
69
return (u8)(*tig_addr & 0xff);
70
}
71
72
static inline void
73
titan_write_tig(int offset, u8 value)
74
{
75
volatile unsigned long *tig_addr = mk_tig_addr(offset);
76
*tig_addr = (unsigned long)value;
77
}
78
79
80
/*
81
* Given a bus, device, and function number, compute resulting
82
* configuration space address
83
* accordingly. It is therefore not safe to have concurrent
84
* invocations to configuration space access routines, but there
85
* really shouldn't be any need for this.
86
*
87
* Note that all config space accesses use Type 1 address format.
88
*
89
* Note also that type 1 is determined by non-zero bus number.
90
*
91
* Type 1:
92
*
93
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
94
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
95
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
96
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
97
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
98
*
99
* 31:24 reserved
100
* 23:16 bus number (8 bits = 128 possible buses)
101
* 15:11 Device number (5 bits)
102
* 10:8 function number
103
* 7:2 register number
104
*
105
* Notes:
106
* The function number selects which function of a multi-function device
107
* (e.g., SCSI and Ethernet).
108
*
109
* The register selects a DWORD (32 bit) register offset. Hence it
110
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
111
* bits.
112
*/
113
114
static int
115
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
116
unsigned long *pci_addr, unsigned char *type1)
117
{
118
struct pci_controller *hose = pbus->sysdata;
119
unsigned long addr;
120
u8 bus = pbus->number;
121
122
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
123
"pci_addr=0x%p, type1=0x%p)\n",
124
bus, device_fn, where, pci_addr, type1));
125
126
if (!pbus->parent) /* No parent means peer PCI bus. */
127
bus = 0;
128
*type1 = (bus != 0);
129
130
addr = (bus << 16) | (device_fn << 8) | where;
131
addr |= hose->config_space_base;
132
133
*pci_addr = addr;
134
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
135
return 0;
136
}
137
138
static int
139
titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
140
int size, u32 *value)
141
{
142
unsigned long addr;
143
unsigned char type1;
144
145
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
146
return PCIBIOS_DEVICE_NOT_FOUND;
147
148
switch (size) {
149
case 1:
150
*value = __kernel_ldbu(*(vucp)addr);
151
break;
152
case 2:
153
*value = __kernel_ldwu(*(vusp)addr);
154
break;
155
case 4:
156
*value = *(vuip)addr;
157
break;
158
}
159
160
return PCIBIOS_SUCCESSFUL;
161
}
162
163
static int
164
titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
165
int size, u32 value)
166
{
167
unsigned long addr;
168
unsigned char type1;
169
170
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
171
return PCIBIOS_DEVICE_NOT_FOUND;
172
173
switch (size) {
174
case 1:
175
__kernel_stb(value, *(vucp)addr);
176
mb();
177
__kernel_ldbu(*(vucp)addr);
178
break;
179
case 2:
180
__kernel_stw(value, *(vusp)addr);
181
mb();
182
__kernel_ldwu(*(vusp)addr);
183
break;
184
case 4:
185
*(vuip)addr = value;
186
mb();
187
*(vuip)addr;
188
break;
189
}
190
191
return PCIBIOS_SUCCESSFUL;
192
}
193
194
struct pci_ops titan_pci_ops =
195
{
196
.read = titan_read_config,
197
.write = titan_write_config,
198
};
199
200
201
void
202
titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
203
{
204
titan_pachip *pachip =
205
(hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
206
titan_pachip_port *port;
207
volatile unsigned long *csr;
208
unsigned long value;
209
210
/* Get the right hose. */
211
port = &pachip->g_port;
212
if (hose->index & 2)
213
port = &pachip->a_port;
214
215
/* We can invalidate up to 8 tlb entries in a go. The flush
216
matches against <31:16> in the pci address.
217
Note that gtlbi* and atlbi* are in the same place in the g_port
218
and a_port, respectively, so the g_port offset can be used
219
even if hose is an a_port */
220
csr = &port->port_specific.g.gtlbia.csr;
221
if (((start ^ end) & 0xffff0000) == 0)
222
csr = &port->port_specific.g.gtlbiv.csr;
223
224
/* For TBIA, it doesn't matter what value we write. For TBI,
225
it's the shifted tag bits. */
226
value = (start & 0xffff0000) >> 12;
227
228
wmb();
229
*csr = value;
230
mb();
231
*csr;
232
}
233
234
static int
235
titan_query_agp(titan_pachip_port *port)
236
{
237
union TPAchipPCTL pctl;
238
239
/* set up APCTL */
240
pctl.pctl_q_whole = port->pctl.csr;
241
242
return pctl.pctl_r_bits.apctl_v_agp_present;
243
244
}
245
246
static void __init
247
titan_init_one_pachip_port(titan_pachip_port *port, int index)
248
{
249
struct pci_controller *hose;
250
251
hose = alloc_pci_controller();
252
if (index == 0)
253
pci_isa_hose = hose;
254
hose->io_space = alloc_resource();
255
hose->mem_space = alloc_resource();
256
257
/*
258
* This is for userland consumption. The 40-bit PIO bias that we
259
* use in the kernel through KSEG doesn't work in the page table
260
* based user mappings. (43-bit KSEG sign extends the physical
261
* address from bit 40 to hit the I/O bit - mapped addresses don't).
262
* So make sure we get the 43-bit PIO bias.
263
*/
264
hose->sparse_mem_base = 0;
265
hose->sparse_io_base = 0;
266
hose->dense_mem_base
267
= (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
268
hose->dense_io_base
269
= (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
270
271
hose->config_space_base = TITAN_CONF(index);
272
hose->index = index;
273
274
hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
275
hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
276
hose->io_space->name = pci_io_names[index];
277
hose->io_space->flags = IORESOURCE_IO;
278
279
hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
280
hose->mem_space->end = hose->mem_space->start + 0xffffffff;
281
hose->mem_space->name = pci_mem_names[index];
282
hose->mem_space->flags = IORESOURCE_MEM;
283
284
if (request_resource(&ioport_resource, hose->io_space) < 0)
285
printk(KERN_ERR "Failed to request IO on hose %d\n", index);
286
if (request_resource(&iomem_resource, hose->mem_space) < 0)
287
printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
288
289
/*
290
* Save the existing PCI window translations. SRM will
291
* need them when we go to reboot.
292
*/
293
saved_config[index].wsba[0] = port->wsba[0].csr;
294
saved_config[index].wsm[0] = port->wsm[0].csr;
295
saved_config[index].tba[0] = port->tba[0].csr;
296
297
saved_config[index].wsba[1] = port->wsba[1].csr;
298
saved_config[index].wsm[1] = port->wsm[1].csr;
299
saved_config[index].tba[1] = port->tba[1].csr;
300
301
saved_config[index].wsba[2] = port->wsba[2].csr;
302
saved_config[index].wsm[2] = port->wsm[2].csr;
303
saved_config[index].tba[2] = port->tba[2].csr;
304
305
saved_config[index].wsba[3] = port->wsba[3].csr;
306
saved_config[index].wsm[3] = port->wsm[3].csr;
307
saved_config[index].tba[3] = port->tba[3].csr;
308
309
/*
310
* Set up the PCI to main memory translation windows.
311
*
312
* Note: Window 3 on Titan is Scatter-Gather ONLY.
313
*
314
* Window 0 is scatter-gather 8MB at 8MB (for isa)
315
* Window 1 is direct access 1GB at 2GB
316
* Window 2 is scatter-gather 1GB at 3GB
317
*/
318
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
319
SMP_CACHE_BYTES);
320
hose->sg_isa->align_entry = 8; /* 64KB for ISA */
321
322
hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000,
323
SMP_CACHE_BYTES);
324
hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
325
326
port->wsba[0].csr = hose->sg_isa->dma_base | 3;
327
port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
328
port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
329
330
port->wsba[1].csr = __direct_map_base | 1;
331
port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
332
port->tba[1].csr = 0;
333
334
port->wsba[2].csr = hose->sg_pci->dma_base | 3;
335
port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
336
port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
337
338
port->wsba[3].csr = 0;
339
340
/* Enable the Monster Window to make DAC pci64 possible. */
341
port->pctl.csr |= pctl_m_mwin;
342
343
/*
344
* If it's an AGP port, initialize agplastwr.
345
*/
346
if (titan_query_agp(port))
347
port->port_specific.a.agplastwr.csr = __direct_map_base;
348
349
titan_pci_tbi(hose, 0, -1);
350
}
351
352
static void __init
353
titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
354
{
355
titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
356
357
/* Init the ports in hose order... */
358
titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
359
if (titan_pchip1_present)
360
titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
361
titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
362
if (titan_pchip1_present)
363
titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
364
}
365
366
void __init
367
titan_init_arch(void)
368
{
369
#if 0
370
printk("%s: titan_init_arch()\n", __func__);
371
printk("%s: CChip registers:\n", __func__);
372
printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
373
printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
374
printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
375
printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
376
printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
377
printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
378
printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
379
printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
380
381
printk("%s: DChip registers:\n", __func__);
382
printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
383
printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
384
printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
385
#endif
386
387
boot_cpuid = __hard_smp_processor_id();
388
389
/* With multiple PCI busses, we play with I/O as physical addrs. */
390
ioport_resource.end = ~0UL;
391
iomem_resource.end = ~0UL;
392
393
/* PCI DMA Direct Mapping is 1GB at 2GB. */
394
__direct_map_base = 0x80000000;
395
__direct_map_size = 0x40000000;
396
397
/* Init the PA chip(s). */
398
titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
399
400
/* Check for graphic console location (if any). */
401
find_console_vga_hose();
402
}
403
404
static void
405
titan_kill_one_pachip_port(titan_pachip_port *port, int index)
406
{
407
port->wsba[0].csr = saved_config[index].wsba[0];
408
port->wsm[0].csr = saved_config[index].wsm[0];
409
port->tba[0].csr = saved_config[index].tba[0];
410
411
port->wsba[1].csr = saved_config[index].wsba[1];
412
port->wsm[1].csr = saved_config[index].wsm[1];
413
port->tba[1].csr = saved_config[index].tba[1];
414
415
port->wsba[2].csr = saved_config[index].wsba[2];
416
port->wsm[2].csr = saved_config[index].wsm[2];
417
port->tba[2].csr = saved_config[index].tba[2];
418
419
port->wsba[3].csr = saved_config[index].wsba[3];
420
port->wsm[3].csr = saved_config[index].wsm[3];
421
port->tba[3].csr = saved_config[index].tba[3];
422
}
423
424
static void
425
titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
426
{
427
if (titan_pchip1_present) {
428
titan_kill_one_pachip_port(&pachip1->g_port, 1);
429
titan_kill_one_pachip_port(&pachip1->a_port, 3);
430
}
431
titan_kill_one_pachip_port(&pachip0->g_port, 0);
432
titan_kill_one_pachip_port(&pachip0->a_port, 2);
433
}
434
435
void
436
titan_kill_arch(int mode)
437
{
438
titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
439
}
440
441
442
/*
443
* IO map support.
444
*/
445
446
void __iomem *
447
titan_ioportmap(unsigned long addr)
448
{
449
FIXUP_IOADDR_VGA(addr);
450
return (void __iomem *)(addr + TITAN_IO_BIAS);
451
}
452
453
454
void __iomem *
455
titan_ioremap(unsigned long addr, unsigned long size)
456
{
457
int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
458
unsigned long baddr = addr & ~TITAN_HOSE_MASK;
459
unsigned long last = baddr + size - 1;
460
struct pci_controller *hose;
461
struct vm_struct *area;
462
unsigned long vaddr;
463
unsigned long *ptes;
464
unsigned long pfn;
465
466
#ifdef CONFIG_VGA_HOSE
467
/*
468
* Adjust the address and hose, if necessary.
469
*/
470
if (pci_vga_hose && __is_mem_vga(addr)) {
471
h = pci_vga_hose->index;
472
addr += pci_vga_hose->mem_space->start;
473
}
474
#endif
475
476
/*
477
* Find the hose.
478
*/
479
for (hose = hose_head; hose; hose = hose->next)
480
if (hose->index == h)
481
break;
482
if (!hose)
483
return NULL;
484
485
/*
486
* Is it direct-mapped?
487
*/
488
if ((baddr >= __direct_map_base) &&
489
((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
490
vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
491
return (void __iomem *) vaddr;
492
}
493
494
/*
495
* Check the scatter-gather arena.
496
*/
497
if (hose->sg_pci &&
498
baddr >= (unsigned long)hose->sg_pci->dma_base &&
499
last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
500
501
/*
502
* Adjust the limits (mappings must be page aligned)
503
*/
504
baddr -= hose->sg_pci->dma_base;
505
last -= hose->sg_pci->dma_base;
506
baddr &= PAGE_MASK;
507
size = PAGE_ALIGN(last) - baddr;
508
509
/*
510
* Map it
511
*/
512
area = get_vm_area(size, VM_IOREMAP);
513
if (!area) {
514
printk("ioremap failed... no vm_area...\n");
515
return NULL;
516
}
517
518
ptes = hose->sg_pci->ptes;
519
for (vaddr = (unsigned long)area->addr;
520
baddr <= last;
521
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
522
pfn = ptes[baddr >> PAGE_SHIFT];
523
if (!(pfn & 1)) {
524
printk("ioremap failed... pte not valid...\n");
525
vfree(area->addr);
526
return NULL;
527
}
528
pfn >>= 1; /* make it a true pfn */
529
530
if (__alpha_remap_area_pages(vaddr,
531
pfn << PAGE_SHIFT,
532
PAGE_SIZE, 0)) {
533
printk("FAILED to remap_area_pages...\n");
534
vfree(area->addr);
535
return NULL;
536
}
537
}
538
539
flush_tlb_all();
540
541
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
542
return (void __iomem *) vaddr;
543
}
544
545
/* Assume a legacy (read: VGA) address, and return appropriately. */
546
return (void __iomem *)(addr + TITAN_MEM_BIAS);
547
}
548
549
void
550
titan_iounmap(volatile void __iomem *xaddr)
551
{
552
unsigned long addr = (unsigned long) xaddr;
553
if (addr >= VMALLOC_START)
554
vfree((void *)(PAGE_MASK & addr));
555
}
556
557
int
558
titan_is_mmio(const volatile void __iomem *xaddr)
559
{
560
unsigned long addr = (unsigned long) xaddr;
561
562
if (addr >= VMALLOC_START)
563
return 1;
564
else
565
return (addr & 0x100000000UL) == 0;
566
}
567
568
#ifndef CONFIG_ALPHA_GENERIC
569
EXPORT_SYMBOL(titan_ioportmap);
570
EXPORT_SYMBOL(titan_ioremap);
571
EXPORT_SYMBOL(titan_iounmap);
572
EXPORT_SYMBOL(titan_is_mmio);
573
#endif
574
575
/*
576
* AGP GART Support.
577
*/
578
#include <linux/agp_backend.h>
579
#include <asm/agp_backend.h>
580
#include <linux/slab.h>
581
#include <linux/delay.h>
582
583
struct titan_agp_aperture {
584
struct pci_iommu_arena *arena;
585
long pg_start;
586
long pg_count;
587
};
588
589
static int
590
titan_agp_setup(alpha_agp_info *agp)
591
{
592
struct titan_agp_aperture *aper;
593
594
if (!alpha_agpgart_size)
595
return -ENOMEM;
596
597
aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
598
if (aper == NULL)
599
return -ENOMEM;
600
601
aper->arena = agp->hose->sg_pci;
602
aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
603
aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
604
aper->pg_count - 1);
605
if (aper->pg_start < 0) {
606
printk(KERN_ERR "Failed to reserve AGP memory\n");
607
kfree(aper);
608
return -ENOMEM;
609
}
610
611
agp->aperture.bus_base =
612
aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
613
agp->aperture.size = aper->pg_count * PAGE_SIZE;
614
agp->aperture.sysdata = aper;
615
616
return 0;
617
}
618
619
static void
620
titan_agp_cleanup(alpha_agp_info *agp)
621
{
622
struct titan_agp_aperture *aper = agp->aperture.sysdata;
623
int status;
624
625
status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
626
if (status == -EBUSY) {
627
printk(KERN_WARNING
628
"Attempted to release bound AGP memory - unbinding\n");
629
iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
630
status = iommu_release(aper->arena, aper->pg_start,
631
aper->pg_count);
632
}
633
if (status < 0)
634
printk(KERN_ERR "Failed to release AGP memory\n");
635
636
kfree(aper);
637
kfree(agp);
638
}
639
640
static int
641
titan_agp_configure(alpha_agp_info *agp)
642
{
643
union TPAchipPCTL pctl;
644
titan_pachip_port *port = agp->private;
645
pctl.pctl_q_whole = port->pctl.csr;
646
647
/* Side-Band Addressing? */
648
pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
649
650
/* AGP Rate? */
651
pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
652
if (agp->mode.bits.rate & 2)
653
pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
654
#if 0
655
if (agp->mode.bits.rate & 4)
656
pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
657
#endif
658
659
/* RQ Depth? */
660
pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
661
pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
662
663
/*
664
* AGP Enable.
665
*/
666
pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
667
668
/* Tell the user. */
669
printk("Enabling AGP: %dX%s\n",
670
1 << pctl.pctl_r_bits.apctl_v_agp_rate,
671
pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
672
673
/* Write it. */
674
port->pctl.csr = pctl.pctl_q_whole;
675
676
/* And wait at least 5000 66MHz cycles (per Titan spec). */
677
udelay(100);
678
679
return 0;
680
}
681
682
static int
683
titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
684
{
685
struct titan_agp_aperture *aper = agp->aperture.sysdata;
686
return iommu_bind(aper->arena, aper->pg_start + pg_start,
687
mem->page_count, mem->pages);
688
}
689
690
static int
691
titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
692
{
693
struct titan_agp_aperture *aper = agp->aperture.sysdata;
694
return iommu_unbind(aper->arena, aper->pg_start + pg_start,
695
mem->page_count);
696
}
697
698
static unsigned long
699
titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
700
{
701
struct titan_agp_aperture *aper = agp->aperture.sysdata;
702
unsigned long baddr = addr - aper->arena->dma_base;
703
unsigned long pte;
704
705
if (addr < agp->aperture.bus_base ||
706
addr >= agp->aperture.bus_base + agp->aperture.size) {
707
printk("%s: addr out of range\n", __func__);
708
return -EINVAL;
709
}
710
711
pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
712
if (!(pte & 1)) {
713
printk("%s: pte not valid\n", __func__);
714
return -EINVAL;
715
}
716
717
return (pte >> 1) << PAGE_SHIFT;
718
}
719
720
struct alpha_agp_ops titan_agp_ops =
721
{
722
.setup = titan_agp_setup,
723
.cleanup = titan_agp_cleanup,
724
.configure = titan_agp_configure,
725
.bind = titan_agp_bind_memory,
726
.unbind = titan_agp_unbind_memory,
727
.translate = titan_agp_translate
728
};
729
730
alpha_agp_info *
731
titan_agp_info(void)
732
{
733
alpha_agp_info *agp;
734
struct pci_controller *hose;
735
titan_pachip_port *port;
736
int hosenum = -1;
737
union TPAchipPCTL pctl;
738
739
/*
740
* Find the AGP port.
741
*/
742
port = &TITAN_pachip0->a_port;
743
if (titan_query_agp(port))
744
hosenum = 2;
745
if (hosenum < 0 &&
746
titan_pchip1_present &&
747
titan_query_agp(port = &TITAN_pachip1->a_port))
748
hosenum = 3;
749
750
/*
751
* Find the hose the port is on.
752
*/
753
for (hose = hose_head; hose; hose = hose->next)
754
if (hose->index == hosenum)
755
break;
756
757
if (!hose || !hose->sg_pci)
758
return NULL;
759
760
/*
761
* Allocate the info structure.
762
*/
763
agp = kmalloc(sizeof(*agp), GFP_KERNEL);
764
if (!agp)
765
return NULL;
766
767
/*
768
* Fill it in.
769
*/
770
agp->hose = hose;
771
agp->private = port;
772
agp->ops = &titan_agp_ops;
773
774
/*
775
* Aperture - not configured until ops.setup().
776
*
777
* FIXME - should we go ahead and allocate it here?
778
*/
779
agp->aperture.bus_base = 0;
780
agp->aperture.size = 0;
781
agp->aperture.sysdata = NULL;
782
783
/*
784
* Capabilities.
785
*/
786
agp->capability.lw = 0;
787
agp->capability.bits.rate = 3; /* 2x, 1x */
788
agp->capability.bits.sba = 1;
789
agp->capability.bits.rq = 7; /* 8 - 1 */
790
791
/*
792
* Mode.
793
*/
794
pctl.pctl_q_whole = port->pctl.csr;
795
agp->mode.lw = 0;
796
agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
797
agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
798
agp->mode.bits.rq = 7; /* RQ Depth? */
799
agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
800
801
return agp;
802
}
803
804