#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/bhnd/bhnd_eromvar.h>
#include "bcma_eromreg.h"
#include "bcma_eromvar.h"
static const char *bcma_erom_entry_type_name (uint8_t entry);
static int bcma_erom_read32(struct bcma_erom *erom,
uint32_t *entry);
static int bcma_erom_skip32(struct bcma_erom *erom);
static int bcma_erom_skip_core(struct bcma_erom *erom);
static int bcma_erom_skip_mport(struct bcma_erom *erom);
static int bcma_erom_skip_sport_region(struct bcma_erom *erom);
static int bcma_erom_seek_next(struct bcma_erom *erom,
uint8_t etype);
static int bcma_erom_region_to_port_type(struct bcma_erom *erom,
uint8_t region_type, bhnd_port_type *port_type);
static int bcma_erom_peek32(struct bcma_erom *erom,
uint32_t *entry);
static bus_size_t bcma_erom_tell(struct bcma_erom *erom);
static void bcma_erom_seek(struct bcma_erom *erom,
bus_size_t offset);
static void bcma_erom_reset(struct bcma_erom *erom);
static int bcma_erom_seek_matching_core(struct bcma_erom *sc,
const struct bhnd_core_match *desc,
struct bhnd_core_info *core);
static int bcma_erom_parse_core(struct bcma_erom *erom,
struct bcma_erom_core *core);
static int bcma_erom_parse_mport(struct bcma_erom *erom,
struct bcma_erom_mport *mport);
static int bcma_erom_parse_sport_region(struct bcma_erom *erom,
struct bcma_erom_sport_region *region);
static void bcma_erom_to_core_info(const struct bcma_erom_core *core,
u_int core_idx, int core_unit,
struct bhnd_core_info *info);
struct bcma_erom {
struct bhnd_erom obj;
device_t dev;
struct bhnd_erom_io *eio;
bhnd_size_t offset;
};
#define EROM_LOG(erom, fmt, ...) do { \
printf("%s erom[0x%llx]: " fmt, __FUNCTION__, \
(unsigned long long)(erom->offset), ##__VA_ARGS__); \
} while(0)
static const char *
bcma_erom_entry_type_name (uint8_t entry)
{
switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
case BCMA_EROM_ENTRY_TYPE_CORE:
return "core";
case BCMA_EROM_ENTRY_TYPE_MPORT:
return "mport";
case BCMA_EROM_ENTRY_TYPE_REGION:
return "region";
default:
return "unknown";
}
}
static int
bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
struct bhnd_erom_io *eio)
{
struct bcma_erom *sc;
bhnd_addr_t table_addr;
int error;
sc = (struct bcma_erom *)erom;
sc->eio = eio;
sc->offset = 0;
if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
return (ENXIO);
table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
if (error)
return (error);
return (0);
}
static int
bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
{
int error;
if (hint != NULL)
return (EINVAL);
if ((error = bhnd_erom_read_chipid(eio, cid)))
return (error);
switch (cid->chip_type) {
case BHND_CHIPTYPE_BCMA:
return (BUS_PROBE_DEFAULT);
case BHND_CHIPTYPE_BCMA_ALT:
case BHND_CHIPTYPE_UBUS:
return (BUS_PROBE_GENERIC);
default:
return (ENXIO);
}
}
static void
bcma_erom_fini(bhnd_erom_t *erom)
{
struct bcma_erom *sc = (struct bcma_erom *)erom;
bhnd_erom_io_fini(sc->eio);
}
static int
bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
struct bhnd_core_info *core)
{
struct bcma_erom *sc = (struct bcma_erom *)erom;
return (bcma_erom_seek_matching_core(sc, desc, core));
}
static int
bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
bhnd_port_type port_type, u_int port_num, u_int region_num,
struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
{
struct bcma_erom *sc;
struct bcma_erom_core ec;
uint32_t entry;
uint8_t region_port, region_type;
bool found;
int error;
sc = (struct bcma_erom *)erom;
if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
return (error);
if ((error = bcma_erom_parse_core(sc, &ec)))
return (error);
for (u_long i = 0; i < ec.num_mport; i++) {
if ((error = bcma_erom_skip_mport(sc)))
return (error);
}
found = false;
while (1) {
bhnd_port_type p_type;
uint8_t r_type;
if ((error = bcma_erom_peek32(sc, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, REGION))
return (ENOENT);
r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
if (error)
return (error);
if (p_type == port_type) {
found = true;
break;
}
if ((error = bcma_erom_skip_sport_region(sc)))
return (error);
}
if (!found)
return (ENOENT);
found = false;
for (u_int i = 0; i <= port_num; i++) {
bhnd_port_type p_type;
if ((error = bcma_erom_peek32(sc, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, REGION))
return (ENOENT);
region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
if (i == port_num) {
error = bcma_erom_region_to_port_type(sc, region_type,
&p_type);
if (error)
return (error);
if (p_type == port_type)
found = true;
break;
}
while (1) {
uint8_t next_type, next_port;
if ((error = bcma_erom_skip_sport_region(sc)))
return (error);
if ((error = bcma_erom_peek32(sc, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, REGION))
return (ENOENT);
next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
if (next_type != region_type ||
next_port != region_port)
break;
}
}
if (!found)
return (ENOENT);
for (u_int i = 0; i <= region_num; i++) {
struct bcma_erom_sport_region region;
uint8_t next_port, next_type;
if ((error = bcma_erom_peek32(sc, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, REGION))
return (ENOENT);
next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
if (next_type != region_type ||
next_port != region_port)
break;
if ((error = bcma_erom_parse_sport_region(sc, ®ion)))
return (error);
if (i == region_num) {
*addr = region.base_addr;
*size = region.size;
return (0);
}
}
return (ENOENT);
};
static int
bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
u_int *num_cores)
{
struct bcma_erom *sc;
struct bhnd_core_info *buffer;
bus_size_t initial_offset;
u_int count;
int error;
sc = (struct bcma_erom *)erom;
buffer = NULL;
initial_offset = bcma_erom_tell(sc);
bcma_erom_reset(sc);
for (count = 0, error = 0; !error; count++) {
struct bcma_erom_core core;
error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
if (error == ENOENT)
break;
else if (error)
goto cleanup;
if ((error = bcma_erom_parse_core(sc, &core)))
goto cleanup;
}
buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND,
M_NOWAIT);
if (buffer == NULL) {
error = ENOMEM;
goto cleanup;
}
bcma_erom_reset(sc);
for (u_int i = 0; i < count; i++) {
struct bcma_erom_core core;
int unit;
error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
if (error)
goto cleanup;
error = bcma_erom_parse_core(sc, &core);
if (error)
goto cleanup;
unit = 0;
for (u_int j = 0; j < i; j++) {
if (buffer[i].vendor == buffer[j].vendor &&
buffer[i].device == buffer[j].device)
unit++;
}
bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
}
cleanup:
if (!error) {
*cores = buffer;
*num_cores = count;
} else {
if (buffer != NULL)
free(buffer, M_BHND);
}
bcma_erom_seek(sc, initial_offset);
return (error);
}
static void
bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
{
free(cores, M_BHND);
}
static bus_size_t
bcma_erom_tell(struct bcma_erom *erom)
{
return (erom->offset);
}
static void
bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
{
erom->offset = offset;
}
static int
bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
{
if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
return (EINVAL);
}
*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
return (0);
}
static int
bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
{
int error;
if ((error = bcma_erom_peek32(erom, entry)) == 0)
erom->offset += 4;
return (error);
}
static int
bcma_erom_skip32(struct bcma_erom *erom)
{
uint32_t entry;
return bcma_erom_read32(erom, &entry);
}
static int
bcma_erom_skip_core(struct bcma_erom *erom)
{
struct bcma_erom_core core;
return (bcma_erom_parse_core(erom, &core));
}
static int
bcma_erom_skip_mport(struct bcma_erom *erom)
{
struct bcma_erom_mport mp;
return (bcma_erom_parse_mport(erom, &mp));
}
static int
bcma_erom_skip_sport_region(struct bcma_erom *erom)
{
struct bcma_erom_sport_region r;
return (bcma_erom_parse_sport_region(erom, &r));
}
static int
bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
{
uint32_t entry;
int error;
while (!(error = bcma_erom_peek32(erom, &entry))) {
if (entry == BCMA_EROM_TABLE_EOF)
return (ENOENT);
if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
return (EINVAL);
if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
return (0);
switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
case BCMA_EROM_ENTRY_TYPE_CORE:
if ((error = bcma_erom_skip_core(erom)))
return (error);
break;
case BCMA_EROM_ENTRY_TYPE_MPORT:
if ((error = bcma_erom_skip_mport(erom)))
return (error);
break;
case BCMA_EROM_ENTRY_TYPE_REGION:
if ((error = bcma_erom_skip_sport_region(erom)))
return (error);
break;
default:
return (EINVAL);
}
}
return (error);
}
static void
bcma_erom_reset(struct bcma_erom *erom)
{
erom->offset = 0;
}
static int
bcma_erom_seek_matching_core(struct bcma_erom *sc,
const struct bhnd_core_match *desc, struct bhnd_core_info *core)
{
struct bhnd_core_match imatch;
bus_size_t core_offset, next_offset;
int error;
bcma_erom_reset(sc);
imatch = *desc;
imatch.m.match.core_unit = 0;
for (u_int i = 0; i < UINT_MAX; i++) {
struct bcma_erom_core ec;
struct bhnd_core_info ci;
error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
if (error)
return (error);
core_offset = bcma_erom_tell(sc);
if ((error = bcma_erom_parse_core(sc, &ec)))
return (error);
bcma_erom_to_core_info(&ec, i, 0, &ci);
if (!bhnd_core_matches(&ci, &imatch))
continue;
next_offset = bcma_erom_tell(sc);
bcma_erom_reset(sc);
for (u_int j = 0; j < i; j++) {
error = bcma_erom_seek_next(sc,
BCMA_EROM_ENTRY_TYPE_CORE);
if (error)
return (error);
if ((error = bcma_erom_parse_core(sc, &ec)))
return (error);
if (ec.vendor == ci.vendor && ec.device == ci.device)
ci.unit++;
}
if (!bhnd_core_matches(&ci, desc)) {
bcma_erom_seek(sc, next_offset);
continue;
}
bcma_erom_seek(sc, core_offset);
if (core != NULL)
*core = ci;
return (0);
}
return (error);
}
static int
bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
{
uint32_t entry;
int error;
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
if (entry == BCMA_EROM_TABLE_EOF)
return (ENOENT);
if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
entry, bcma_erom_entry_type_name(entry));
return (EINVAL);
}
core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
return (EINVAL);
}
core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
return (0);
}
static int
bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
{
uint32_t entry;
int error;
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
return (EINVAL);
mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
return (0);
}
static int
bcma_erom_parse_sport_region(struct bcma_erom *erom,
struct bcma_erom_sport_region *region)
{
uint32_t entry;
uint8_t size_type;
int error;
if (bcma_erom_peek32(erom, &entry))
return (EINVAL);
if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
return (ENOENT);
} else {
bcma_erom_skip32(erom);
}
region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
region->base_addr |= ((bhnd_addr_t) entry << 32);
}
if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
if ((error = bcma_erom_read32(erom, &entry)))
return (error);
region->size |= ((bhnd_size_t) entry << 32);
}
} else {
region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
}
if (region->size != 0 &&
BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
{
EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
bcma_erom_entry_type_name(region->region_type),
region->region_port,
(unsigned long long) region->base_addr,
(unsigned long long) region->size);
return (EINVAL);
}
return (0);
}
static void
bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
int core_unit, struct bhnd_core_info *info)
{
info->vendor = core->vendor;
info->device = core->device;
info->hwrev = core->rev;
info->core_idx = core_idx;
info->unit = core_unit;
}
static int
bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
bhnd_port_type *port_type)
{
switch (region_type) {
case BCMA_EROM_REGION_TYPE_DEVICE:
*port_type = BHND_PORT_DEVICE;
return (0);
case BCMA_EROM_REGION_TYPE_BRIDGE:
*port_type = BHND_PORT_BRIDGE;
return (0);
case BCMA_EROM_REGION_TYPE_MWRAP:
case BCMA_EROM_REGION_TYPE_SWRAP:
*port_type = BHND_PORT_AGENT;
return (0);
default:
EROM_LOG(erom, "unsupported region type %hhx\n",
region_type);
return (EINVAL);
}
}
static int
bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
struct bcma_corecfg *corecfg, bcma_pid_t port_num,
uint8_t region_type)
{
struct bcma_sport *sport;
struct bcma_sport_list *sports;
bus_size_t entry_offset;
int error;
bhnd_port_type port_type;
error = 0;
error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
if (error)
return (error);
sports = bcma_corecfg_get_port_list(corecfg, port_type);
sport = bcma_alloc_sport(port_num, port_type);
if (sport == NULL)
return (ENOMEM);
for (bcma_rmid_t region_num = 0;; region_num++) {
struct bcma_map *map;
struct bcma_erom_sport_region spr;
if (region_num == BCMA_RMID_MAX) {
EROM_LOG(erom, "core%u %s%u: region count reached "
"upper limit of %u\n",
corecfg->core_info.core_idx,
bhnd_port_type_name(port_type),
port_num, BCMA_RMID_MAX);
error = EINVAL;
goto cleanup;
}
entry_offset = bcma_erom_tell(erom);
error = bcma_erom_parse_sport_region(erom, &spr);
if (error && error != ENOENT) {
EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
"address region\n",
corecfg->core_info.core_idx,
bhnd_port_type_name(port_type),
port_num, region_num);
goto cleanup;
}
if (error == ENOENT) {
error = 0;
break;
}
if (spr.region_port != port_num ||
spr.region_type != region_type)
{
bcma_erom_seek(erom, entry_offset);
error = 0;
goto cleanup;
}
map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
if (map == NULL) {
error = ENOMEM;
goto cleanup;
}
map->m_region_num = region_num;
map->m_base = spr.base_addr;
map->m_size = spr.size;
map->m_rid = -1;
STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
sport->sp_num_maps++;
}
cleanup:
if (error == 0) {
STAILQ_INSERT_TAIL(sports, sport, sp_link);
} else if (sport != NULL) {
bcma_free_sport(sport);
}
return error;
}
int
bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
{
struct bcma_corecfg *cfg;
struct bcma_erom_core core;
uint8_t first_region_type;
bus_size_t initial_offset;
u_int core_index;
int core_unit;
int error;
cfg = NULL;
initial_offset = bcma_erom_tell(erom);
if ((error = bcma_erom_parse_core(erom, &core)))
return (error);
bcma_erom_reset(erom);
core_unit = 0;
core_index = 0;
for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
struct bcma_erom_core prev_core;
error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
if (error)
return (error);
if ((error = bcma_erom_parse_core(erom, &prev_core)))
return (error);
if (core.vendor == prev_core.vendor &&
core.device == prev_core.device)
{
core_unit++;
}
error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
if (error)
return (error);
}
if ((error = bcma_erom_skip_core(erom)))
return (error);
cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
core.device, core.rev);
if (cfg == NULL)
return (ENOMEM);
KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
("unsupported wport count"));
if (bootverbose) {
EROM_LOG(erom,
"core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
core_index,
bhnd_vendor_name(core.vendor),
bhnd_find_core_name(core.vendor, core.device),
core.device, core.rev, core_unit);
}
cfg->num_master_ports = core.num_mport;
cfg->num_dev_ports = 0;
cfg->num_bridge_ports = 0;
cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
for (uint8_t i = 0; i < core.num_mport; i++) {
struct bcma_mport *mport;
struct bcma_erom_mport mpd;
error = bcma_erom_parse_mport(erom, &mpd);
if (error)
goto failed;
mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
if (mport == NULL) {
error = ENOMEM;
goto failed;
}
mport->mp_vid = mpd.port_vid;
mport->mp_num = mpd.port_num;
STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
}
if (core.num_dport > 0) {
uint32_t entry;
if ((error = bcma_erom_peek32(erom, &entry)))
goto failed;
if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
{
first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
cfg->num_dev_ports = 0;
cfg->num_bridge_ports = core.num_dport;
} else {
first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
cfg->num_dev_ports = core.num_dport;
cfg->num_bridge_ports = 0;
}
}
for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
first_region_type);
if (error)
goto failed;
}
for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
BCMA_EROM_REGION_TYPE_MWRAP);
if (error)
goto failed;
}
for (uint8_t i = 0; i < core.num_swrap; i++) {
uint8_t sp_num;
sp_num = (core.num_mwrap > 0) ?
core.num_mwrap :
((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
BCMA_EROM_REGION_TYPE_SWRAP);
if (error)
goto failed;
}
if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
if (error != ENOENT)
goto failed;
}
*result = cfg;
return (0);
failed:
if (cfg != NULL)
bcma_free_corecfg(cfg);
return error;
}
static int
bcma_erom_dump(bhnd_erom_t *erom)
{
struct bcma_erom *sc;
uint32_t entry;
int error;
sc = (struct bcma_erom *)erom;
bcma_erom_reset(sc);
while (!(error = bcma_erom_read32(sc, &entry))) {
if (entry == BCMA_EROM_TABLE_EOF) {
EROM_LOG(sc, "EOF\n");
return (0);
}
if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
return (EINVAL);
}
switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
case BCMA_EROM_ENTRY_TYPE_CORE: {
EROM_LOG(sc, "coreA (0x%x)\n", entry);
EROM_LOG(sc, "\tdesigner:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
EROM_LOG(sc, "\tid:\t\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREA_ID));
EROM_LOG(sc, "\tclass:\t\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
if ((error = bcma_erom_read32(sc, &entry))) {
EROM_LOG(sc, "error reading CoreDescB: %d\n",
error);
return (error);
}
if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
EROM_LOG(sc, "invalid core descriptor; found "
"unexpected entry %#x (type=%s)\n",
entry, bcma_erom_entry_type_name(entry));
return (EINVAL);
}
EROM_LOG(sc, "coreB (0x%x)\n", entry);
EROM_LOG(sc, "\trev:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREB_REV));
EROM_LOG(sc, "\tnummp:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
EROM_LOG(sc, "\tnumdp:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
break;
}
case BCMA_EROM_ENTRY_TYPE_MPORT:
EROM_LOG(sc, "\tmport 0x%x\n", entry);
EROM_LOG(sc, "\t\tport:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, MPORT_ID));
break;
case BCMA_EROM_ENTRY_TYPE_REGION: {
bool addr64;
uint8_t size_type;
addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
EROM_LOG(sc, "\tregion 0x%x:\n", entry);
EROM_LOG(sc, "\t\t%s:\t0x%x\n",
addr64 ? "baselo" : "base",
BCMA_EROM_GET_ATTR(entry, REGION_BASE));
EROM_LOG(sc, "\t\tport:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, REGION_PORT));
EROM_LOG(sc, "\t\ttype:\t0x%x\n",
BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
if (addr64) {
if ((error = bcma_erom_read32(sc, &entry))) {
EROM_LOG(sc, "error reading region "
"base address high bits %d\n",
error);
return (error);
}
EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
}
if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
bool size64;
if ((error = bcma_erom_read32(sc, &entry))) {
EROM_LOG(sc, "error reading region "
"size descriptor %d\n",
error);
return (error);
}
if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
size64 = true;
else
size64 = false;
EROM_LOG(sc, "\t\t%s:\t0x%x\n",
size64 ? "sizelo" : "size",
BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
if (size64) {
error = bcma_erom_read32(sc, &entry);
if (error) {
EROM_LOG(sc, "error reading "
"region size high bits: "
"%d\n", error);
return (error);
}
EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
entry);
}
}
break;
}
default:
EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
entry, bcma_erom_entry_type_name(entry));
return (EINVAL);
}
}
if (error == ENOENT)
EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
else if (error)
EROM_LOG(sc, "EROM read failed: %d\n", error);
return (error);
}
static kobj_method_t bcma_erom_methods[] = {
KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe),
KOBJMETHOD(bhnd_erom_init, bcma_erom_init),
KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini),
KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table),
KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table),
KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core),
KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr),
KOBJMETHOD(bhnd_erom_dump, bcma_erom_dump),
KOBJMETHOD_END
};
BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));