Path: blob/main/sys/contrib/dev/athk/ath10k/sdio.c
48375 views
// SPDX-License-Identifier: ISC1/*2* Copyright (c) 2004-2011 Atheros Communications Inc.3* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.4* Copyright (c) 2016-2017 Erik Stromdahl <[email protected]>5*/67#include <linux/module.h>8#include <linux/mmc/card.h>9#include <linux/mmc/mmc.h>10#include <linux/mmc/host.h>11#include <linux/mmc/sdio_func.h>12#include <linux/mmc/sdio_ids.h>13#include <linux/mmc/sdio.h>14#include <linux/mmc/sd.h>15#include <linux/bitfield.h>16#include "core.h"17#include "bmi.h"18#include "debug.h"19#include "hif.h"20#include "htc.h"21#include "mac.h"22#include "targaddrs.h"23#include "trace.h"24#include "sdio.h"25#include "coredump.h"2627void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);2829#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)3031/* inlined helper functions */3233static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,34size_t len)35{36return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);37}3839static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)40{41return (enum ath10k_htc_ep_id)pipe_id;42}4344static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)45{46dev_kfree_skb(pkt->skb);47pkt->skb = NULL;48pkt->alloc_len = 0;49pkt->act_len = 0;50pkt->trailer_only = false;51}5253static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,54size_t act_len, size_t full_len,55bool part_of_bundle,56bool last_in_bundle)57{58pkt->skb = dev_alloc_skb(full_len);59if (!pkt->skb)60return -ENOMEM;6162pkt->act_len = act_len;63pkt->alloc_len = full_len;64pkt->part_of_bundle = part_of_bundle;65pkt->last_in_bundle = last_in_bundle;66pkt->trailer_only = false;6768return 0;69}7071static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)72{73bool trailer_only = false;74struct ath10k_htc_hdr *htc_hdr =75(struct ath10k_htc_hdr *)pkt->skb->data;76u16 len = __le16_to_cpu(htc_hdr->len);7778if (len == htc_hdr->trailer_len)79trailer_only = true;8081return trailer_only;82}8384/* sdio/mmc functions */8586static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,87unsigned int address,88unsigned char val)89{90*arg = FIELD_PREP(BIT(31), write) |91FIELD_PREP(BIT(27), raw) |92FIELD_PREP(BIT(26), 1) |93FIELD_PREP(GENMASK(25, 9), address) |94FIELD_PREP(BIT(8), 1) |95FIELD_PREP(GENMASK(7, 0), val);96}9798static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,99unsigned int address,100unsigned char byte)101{102struct mmc_command io_cmd;103104memset(&io_cmd, 0, sizeof(io_cmd));105ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);106io_cmd.opcode = SD_IO_RW_DIRECT;107io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;108109return mmc_wait_for_cmd(card->host, &io_cmd, 0);110}111112static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,113unsigned int address,114unsigned char *byte)115{116struct mmc_command io_cmd;117int ret;118119memset(&io_cmd, 0, sizeof(io_cmd));120ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);121io_cmd.opcode = SD_IO_RW_DIRECT;122io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;123124ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);125if (!ret)126*byte = io_cmd.resp[0];127128return ret;129}130131static int ath10k_sdio_config(struct ath10k *ar)132{133struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);134struct sdio_func *func = ar_sdio->func;135unsigned char byte, asyncintdelay = 2;136int ret;137138ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");139140sdio_claim_host(func);141142byte = 0;143ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,144SDIO_CCCR_DRIVE_STRENGTH,145&byte);146147byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;148byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,149ATH10K_SDIO_DRIVE_DTSX_TYPE_D);150151ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,152SDIO_CCCR_DRIVE_STRENGTH,153byte);154155byte = 0;156ret = ath10k_sdio_func0_cmd52_rd_byte(157func->card,158CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,159&byte);160161byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |162CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |163CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);164165ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,166CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,167byte);168if (ret) {169ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);170goto out;171}172173byte = 0;174ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,175CCCR_SDIO_IRQ_MODE_REG_SDIO3,176&byte);177178byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;179180ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,181CCCR_SDIO_IRQ_MODE_REG_SDIO3,182byte);183if (ret) {184ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",185ret);186goto out;187}188189byte = 0;190ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,191CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,192&byte);193194byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;195byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);196197ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,198CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,199byte);200201/* give us some time to enable, in ms */202func->enable_timeout = 100;203204ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);205if (ret) {206ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",207ar_sdio->mbox_info.block_size, ret);208goto out;209}210211out:212sdio_release_host(func);213return ret;214}215216static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)217{218struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);219struct sdio_func *func = ar_sdio->func;220int ret;221222sdio_claim_host(func);223224sdio_writel(func, val, addr, &ret);225if (ret) {226ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",227val, addr, ret);228goto out;229}230231ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",232addr, val);233234out:235sdio_release_host(func);236237return ret;238}239240static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)241{242struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);243struct sdio_func *func = ar_sdio->func;244__le32 *buf;245int ret;246247buf = kzalloc(sizeof(*buf), GFP_KERNEL);248if (!buf)249return -ENOMEM;250251*buf = cpu_to_le32(val);252253sdio_claim_host(func);254255ret = sdio_writesb(func, addr, buf, sizeof(*buf));256if (ret) {257ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",258val, addr, ret);259goto out;260}261262ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",263addr, val);264265out:266sdio_release_host(func);267268kfree(buf);269270return ret;271}272273static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)274{275struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);276struct sdio_func *func = ar_sdio->func;277int ret;278279sdio_claim_host(func);280*val = sdio_readl(func, addr, &ret);281if (ret) {282ath10k_warn(ar, "failed to read from address 0x%x: %d\n",283addr, ret);284goto out;285}286287ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",288addr, *val);289290out:291sdio_release_host(func);292293return ret;294}295296static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)297{298struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);299struct sdio_func *func = ar_sdio->func;300int ret;301302sdio_claim_host(func);303304ret = sdio_memcpy_fromio(func, buf, addr, len);305if (ret) {306ath10k_warn(ar, "failed to read from address 0x%x: %d\n",307addr, ret);308goto out;309}310311ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",312addr, buf, len);313ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);314315out:316sdio_release_host(func);317318return ret;319}320321static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)322{323struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);324struct sdio_func *func = ar_sdio->func;325int ret;326327sdio_claim_host(func);328329/* For some reason toio() doesn't have const for the buffer, need330* an ugly hack to workaround that.331*/332ret = sdio_memcpy_toio(func, addr, (void *)buf, len);333if (ret) {334ath10k_warn(ar, "failed to write to address 0x%x: %d\n",335addr, ret);336goto out;337}338339ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",340addr, buf, len);341ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);342343out:344sdio_release_host(func);345346return ret;347}348349static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)350{351struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);352struct sdio_func *func = ar_sdio->func;353int ret;354355sdio_claim_host(func);356357len = round_down(len, ar_sdio->mbox_info.block_size);358359ret = sdio_readsb(func, buf, addr, len);360if (ret) {361ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",362addr, ret);363goto out;364}365366ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",367addr, buf, len);368ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);369370out:371sdio_release_host(func);372373return ret;374}375376/* HIF mbox functions */377378static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,379struct ath10k_sdio_rx_data *pkt,380u32 *lookaheads,381int *n_lookaheads)382{383struct ath10k_htc *htc = &ar->htc;384struct sk_buff *skb = pkt->skb;385struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;386bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;387enum ath10k_htc_ep_id eid;388u8 *trailer;389int ret;390391if (trailer_present) {392trailer = skb->data + skb->len - htc_hdr->trailer_len;393394eid = pipe_id_to_eid(htc_hdr->eid);395396ret = ath10k_htc_process_trailer(htc,397trailer,398htc_hdr->trailer_len,399eid,400lookaheads,401n_lookaheads);402if (ret)403return ret;404405if (is_trailer_only_msg(pkt))406pkt->trailer_only = true;407408skb_trim(skb, skb->len - htc_hdr->trailer_len);409}410411skb_pull(skb, sizeof(*htc_hdr));412413return 0;414}415416static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,417u32 lookaheads[],418int *n_lookahead)419{420struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);421struct ath10k_htc *htc = &ar->htc;422struct ath10k_sdio_rx_data *pkt;423struct ath10k_htc_ep *ep;424struct ath10k_skb_rxcb *cb;425enum ath10k_htc_ep_id id;426int ret, i, *n_lookahead_local;427u32 *lookaheads_local;428int lookahead_idx = 0;429430for (i = 0; i < ar_sdio->n_rx_pkts; i++) {431lookaheads_local = lookaheads;432n_lookahead_local = n_lookahead;433434id = ((struct ath10k_htc_hdr *)435&lookaheads[lookahead_idx++])->eid;436437if (id >= ATH10K_HTC_EP_COUNT) {438ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",439id);440ret = -ENOMEM;441goto out;442}443444ep = &htc->endpoint[id];445446if (ep->service_id == 0) {447ath10k_warn(ar, "ep %d is not connected\n", id);448ret = -ENOMEM;449goto out;450}451452pkt = &ar_sdio->rx_pkts[i];453454if (pkt->part_of_bundle && !pkt->last_in_bundle) {455/* Only read lookahead's from RX trailers456* for the last packet in a bundle.457*/458lookahead_idx--;459lookaheads_local = NULL;460n_lookahead_local = NULL;461}462463ret = ath10k_sdio_mbox_rx_process_packet(ar,464pkt,465lookaheads_local,466n_lookahead_local);467if (ret)468goto out;469470if (!pkt->trailer_only) {471cb = ATH10K_SKB_RXCB(pkt->skb);472cb->eid = id;473474skb_queue_tail(&ar_sdio->rx_head, pkt->skb);475queue_work(ar->workqueue_aux,476&ar_sdio->async_work_rx);477} else {478kfree_skb(pkt->skb);479}480481/* The RX complete handler now owns the skb...*/482pkt->skb = NULL;483pkt->alloc_len = 0;484}485486ret = 0;487488out:489/* Free all packets that was not passed on to the RX completion490* handler...491*/492for (; i < ar_sdio->n_rx_pkts; i++)493ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);494495return ret;496}497498static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,499struct ath10k_sdio_rx_data *rx_pkts,500struct ath10k_htc_hdr *htc_hdr,501size_t full_len, size_t act_len,502size_t *bndl_cnt)503{504int ret, i;505u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;506507*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);508509if (*bndl_cnt > max_msgs) {510ath10k_warn(ar,511"HTC bundle length %u exceeds maximum %u\n",512le16_to_cpu(htc_hdr->len),513max_msgs);514return -ENOMEM;515}516517/* Allocate bndl_cnt extra skb's for the bundle.518* The package containing the519* ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included520* in bndl_cnt. The skb for that packet will be521* allocated separately.522*/523for (i = 0; i < *bndl_cnt; i++) {524ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],525act_len,526full_len,527true,528false);529if (ret)530return ret;531}532533return 0;534}535536static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,537u32 lookaheads[], int n_lookaheads)538{539struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);540struct ath10k_htc_hdr *htc_hdr;541size_t full_len, act_len;542bool last_in_bundle;543int ret, i;544int pkt_cnt = 0;545546if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {547ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",548n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);549ret = -ENOMEM;550goto err;551}552553for (i = 0; i < n_lookaheads; i++) {554htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];555last_in_bundle = false;556557if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {558ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",559le16_to_cpu(htc_hdr->len),560ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);561ret = -ENOMEM;562563ath10k_core_start_recovery(ar);564ath10k_warn(ar, "exceeds length, start recovery\n");565566goto err;567}568569act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);570full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);571572if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {573ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",574htc_hdr->eid, htc_hdr->flags,575le16_to_cpu(htc_hdr->len));576ret = -EINVAL;577goto err;578}579580if (ath10k_htc_get_bundle_count(581ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {582/* HTC header indicates that every packet to follow583* has the same padded length so that it can be584* optimally fetched as a full bundle.585*/586size_t bndl_cnt;587588ret = ath10k_sdio_mbox_alloc_bundle(ar,589&ar_sdio->rx_pkts[pkt_cnt],590htc_hdr,591full_len,592act_len,593&bndl_cnt);594595if (ret) {596ath10k_warn(ar, "failed to allocate a bundle: %d\n",597ret);598goto err;599}600601pkt_cnt += bndl_cnt;602603/* next buffer will be the last in the bundle */604last_in_bundle = true;605}606607/* Allocate skb for packet. If the packet had the608* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled609* packet skb's have been allocated in the previous step.610*/611if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)612full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;613614ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],615act_len,616full_len,617last_in_bundle,618last_in_bundle);619if (ret) {620ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);621goto err;622}623624pkt_cnt++;625}626627ar_sdio->n_rx_pkts = pkt_cnt;628629return 0;630631err:632for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {633if (!ar_sdio->rx_pkts[i].alloc_len)634break;635ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);636}637638return ret;639}640641static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)642{643struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);644struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];645struct sk_buff *skb = pkt->skb;646struct ath10k_htc_hdr *htc_hdr;647int ret;648649ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,650skb->data, pkt->alloc_len);651if (ret)652goto err;653654htc_hdr = (struct ath10k_htc_hdr *)skb->data;655pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);656657if (pkt->act_len > pkt->alloc_len) {658ret = -EINVAL;659goto err;660}661662skb_put(skb, pkt->act_len);663return 0;664665err:666ar_sdio->n_rx_pkts = 0;667ath10k_sdio_mbox_free_rx_pkt(pkt);668669return ret;670}671672static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)673{674struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);675struct ath10k_sdio_rx_data *pkt;676struct ath10k_htc_hdr *htc_hdr;677int ret, i;678u32 pkt_offset, virt_pkt_len;679680virt_pkt_len = 0;681for (i = 0; i < ar_sdio->n_rx_pkts; i++)682virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;683684if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {685ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);686ret = -E2BIG;687goto err;688}689690ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,691ar_sdio->vsg_buffer, virt_pkt_len);692if (ret) {693ath10k_warn(ar, "failed to read bundle packets: %d", ret);694goto err;695}696697pkt_offset = 0;698for (i = 0; i < ar_sdio->n_rx_pkts; i++) {699pkt = &ar_sdio->rx_pkts[i];700htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);701pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);702703if (pkt->act_len > pkt->alloc_len) {704ret = -EINVAL;705goto err;706}707708skb_put_data(pkt->skb, htc_hdr, pkt->act_len);709pkt_offset += pkt->alloc_len;710}711712return 0;713714err:715/* Free all packets that was not successfully fetched. */716for (i = 0; i < ar_sdio->n_rx_pkts; i++)717ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);718719ar_sdio->n_rx_pkts = 0;720721return ret;722}723724/* This is the timeout for mailbox processing done in the sdio irq725* handler. The timeout is deliberately set quite high since SDIO dump logs726* over serial port can/will add a substantial overhead to the processing727* (if enabled).728*/729#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)730731static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,732u32 msg_lookahead, bool *done)733{734struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);735u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];736int n_lookaheads = 1;737unsigned long timeout;738int ret;739740*done = true;741742/* Copy the lookahead obtained from the HTC register table into our743* temp array as a start value.744*/745lookaheads[0] = msg_lookahead;746747timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;748do {749/* Try to allocate as many HTC RX packets indicated by750* n_lookaheads.751*/752ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,753n_lookaheads);754if (ret)755break;756757if (ar_sdio->n_rx_pkts >= 2)758/* A recv bundle was detected, force IRQ status759* re-check again.760*/761*done = false;762763if (ar_sdio->n_rx_pkts > 1)764ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);765else766ret = ath10k_sdio_mbox_rx_fetch(ar);767768/* Process fetched packets. This will potentially update769* n_lookaheads depending on if the packets contain lookahead770* reports.771*/772n_lookaheads = 0;773ret = ath10k_sdio_mbox_rx_process_packets(ar,774lookaheads,775&n_lookaheads);776777if (!n_lookaheads || ret)778break;779780/* For SYNCH processing, if we get here, we are running781* through the loop again due to updated lookaheads. Set782* flag that we should re-check IRQ status registers again783* before leaving IRQ processing, this can net better784* performance in high throughput situations.785*/786*done = false;787} while (time_before(jiffies, timeout));788789if (ret && (ret != -ECANCELED))790ath10k_warn(ar, "failed to get pending recv messages: %d\n",791ret);792793return ret;794}795796static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)797{798u32 val;799int ret;800801/* TODO: Add firmware crash handling */802ath10k_warn(ar, "firmware crashed\n");803804/* read counter to clear the interrupt, the debug error interrupt is805* counter 0.806*/807ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);808if (ret)809ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);810811return ret;812}813814static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)815{816struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);817struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;818u8 counter_int_status;819int ret;820821mutex_lock(&irq_data->mtx);822counter_int_status = irq_data->irq_proc_reg->counter_int_status &823irq_data->irq_en_reg->cntr_int_status_en;824825/* NOTE: other modules like GMBOX may use the counter interrupt for826* credit flow control on other counters, we only need to check for827* the debug assertion counter interrupt.828*/829if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)830ret = ath10k_sdio_mbox_proc_dbg_intr(ar);831else832ret = 0;833834mutex_unlock(&irq_data->mtx);835836return ret;837}838839static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)840{841struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);842struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;843u8 error_int_status;844int ret;845846ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");847848error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;849if (!error_int_status) {850ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",851error_int_status);852return -EIO;853}854855ath10k_dbg(ar, ATH10K_DBG_SDIO,856"sdio error_int_status 0x%x\n", error_int_status);857858if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,859error_int_status))860ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");861862if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,863error_int_status))864ath10k_warn(ar, "rx underflow interrupt error\n");865866if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,867error_int_status))868ath10k_warn(ar, "tx overflow interrupt error\n");869870/* Clear the interrupt */871irq_data->irq_proc_reg->error_int_status &= ~error_int_status;872873/* set W1C value to clear the interrupt, this hits the register first */874ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,875error_int_status);876if (ret) {877ath10k_warn(ar, "unable to write to error int status address: %d\n",878ret);879return ret;880}881882return 0;883}884885static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)886{887struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);888struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;889u8 cpu_int_status;890int ret;891892mutex_lock(&irq_data->mtx);893cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &894irq_data->irq_en_reg->cpu_int_status_en;895if (!cpu_int_status) {896ath10k_warn(ar, "CPU interrupt status is zero\n");897ret = -EIO;898goto out;899}900901/* Clear the interrupt */902irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;903904/* Set up the register transfer buffer to hit the register 4 times,905* this is done to make the access 4-byte aligned to mitigate issues906* with host bus interconnects that restrict bus transfer lengths to907* be a multiple of 4-bytes.908*909* Set W1C value to clear the interrupt, this hits the register first.910*/911ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,912cpu_int_status);913if (ret) {914ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",915ret);916goto out;917}918919out:920mutex_unlock(&irq_data->mtx);921if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)922ath10k_sdio_fw_crashed_dump(ar);923924return ret;925}926927static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,928u8 *host_int_status,929u32 *lookahead)930{931struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);932struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;933struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;934struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;935u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);936int ret;937938mutex_lock(&irq_data->mtx);939940*lookahead = 0;941*host_int_status = 0;942943/* int_status_en is supposed to be non zero, otherwise interrupts944* shouldn't be enabled. There is however a short time frame during945* initialization between the irq register and int_status_en init946* where this can happen.947* We silently ignore this condition.948*/949if (!irq_en_reg->int_status_en) {950ret = 0;951goto out;952}953954/* Read the first sizeof(struct ath10k_irq_proc_registers)955* bytes of the HTC register table. This956* will yield us the value of different int status957* registers and the lookahead registers.958*/959ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,960irq_proc_reg, sizeof(*irq_proc_reg));961if (ret) {962ath10k_core_start_recovery(ar);963ath10k_warn(ar, "read int status fail, start recovery\n");964goto out;965}966967/* Update only those registers that are enabled */968*host_int_status = irq_proc_reg->host_int_status &969irq_en_reg->int_status_en;970971/* Look at mbox status */972if (!(*host_int_status & htc_mbox)) {973*lookahead = 0;974ret = 0;975goto out;976}977978/* Mask out pending mbox value, we use look ahead as979* the real flag for mbox processing.980*/981*host_int_status &= ~htc_mbox;982if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {983*lookahead = le32_to_cpu(984irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);985if (!*lookahead)986ath10k_warn(ar, "sdio mbox lookahead is zero\n");987}988989out:990mutex_unlock(&irq_data->mtx);991return ret;992}993994static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,995bool *done)996{997u8 host_int_status;998u32 lookahead;999int ret;10001001/* NOTE: HIF implementation guarantees that the context of this1002* call allows us to perform SYNCHRONOUS I/O, that is we can block,1003* sleep or call any API that can block or switch thread/task1004* contexts. This is a fully schedulable context.1005*/10061007ret = ath10k_sdio_mbox_read_int_status(ar,1008&host_int_status,1009&lookahead);1010if (ret) {1011*done = true;1012goto out;1013}10141015if (!host_int_status && !lookahead) {1016ret = 0;1017*done = true;1018goto out;1019}10201021if (lookahead) {1022ath10k_dbg(ar, ATH10K_DBG_SDIO,1023"sdio pending mailbox msg lookahead 0x%08x\n",1024lookahead);10251026ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,1027lookahead,1028done);1029if (ret)1030goto out;1031}10321033/* now, handle the rest of the interrupts */1034ath10k_dbg(ar, ATH10K_DBG_SDIO,1035"sdio host_int_status 0x%x\n", host_int_status);10361037if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {1038/* CPU Interrupt */1039ret = ath10k_sdio_mbox_proc_cpu_intr(ar);1040if (ret)1041goto out;1042}10431044if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {1045/* Error Interrupt */1046ret = ath10k_sdio_mbox_proc_err_intr(ar);1047if (ret)1048goto out;1049}10501051if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))1052/* Counter Interrupt */1053ret = ath10k_sdio_mbox_proc_counter_intr(ar);10541055ret = 0;10561057out:1058/* An optimization to bypass reading the IRQ status registers1059* unnecessarily which can re-wake the target, if upper layers1060* determine that we are in a low-throughput mode, we can rely on1061* taking another interrupt rather than re-checking the status1062* registers which can re-wake the target.1063*1064* NOTE : for host interfaces that makes use of detecting pending1065* mbox messages at hif can not use this optimization due to1066* possible side effects, SPI requires the host to drain all1067* messages from the mailbox before exiting the ISR routine.1068*/10691070ath10k_dbg(ar, ATH10K_DBG_SDIO,1071"sdio pending irqs done %d status %d",1072*done, ret);10731074return ret;1075}10761077static void ath10k_sdio_set_mbox_info(struct ath10k *ar)1078{1079struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1080struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;1081u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;10821083mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;1084mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;1085mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;1086mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;1087mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;10881089mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;10901091dev_id_base = (device & 0x0F00);1092dev_id_chiprev = (device & 0x00FF);1093switch (dev_id_base) {1094case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):1095if (dev_id_chiprev < 4)1096mbox_info->ext_info[0].htc_ext_sz =1097ATH10K_HIF_MBOX0_EXT_WIDTH;1098else1099/* from QCA6174 2.0(0x504), the width has been extended1100* to 56K1101*/1102mbox_info->ext_info[0].htc_ext_sz =1103ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;1104break;1105case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):1106mbox_info->ext_info[0].htc_ext_sz =1107ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;1108break;1109default:1110mbox_info->ext_info[0].htc_ext_sz =1111ATH10K_HIF_MBOX0_EXT_WIDTH;1112}11131114mbox_info->ext_info[1].htc_ext_addr =1115mbox_info->ext_info[0].htc_ext_addr +1116mbox_info->ext_info[0].htc_ext_sz +1117ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;1118mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;1119}11201121/* BMI functions */11221123static int ath10k_sdio_bmi_credits(struct ath10k *ar)1124{1125u32 addr, cmd_credits;1126unsigned long timeout;1127int ret;11281129/* Read the counter register to get the command credits */1130addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;1131timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;1132cmd_credits = 0;11331134while (time_before(jiffies, timeout) && !cmd_credits) {1135/* Hit the credit counter with a 4-byte access, the first byte1136* read will hit the counter and cause a decrement, while the1137* remaining 3 bytes has no effect. The rationale behind this1138* is to make all HIF accesses 4-byte aligned.1139*/1140ret = ath10k_sdio_read32(ar, addr, &cmd_credits);1141if (ret) {1142ath10k_warn(ar,1143"unable to decrement the command credit count register: %d\n",1144ret);1145return ret;1146}11471148/* The counter is only 8 bits.1149* Ignore anything in the upper 3 bytes1150*/1151cmd_credits &= 0xFF;1152}11531154if (!cmd_credits) {1155ath10k_warn(ar, "bmi communication timeout\n");1156return -ETIMEDOUT;1157}11581159return 0;1160}11611162static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)1163{1164unsigned long timeout;1165u32 rx_word;1166int ret;11671168timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;1169rx_word = 0;11701171while ((time_before(jiffies, timeout)) && !rx_word) {1172ret = ath10k_sdio_read32(ar,1173MBOX_HOST_INT_STATUS_ADDRESS,1174&rx_word);1175if (ret) {1176ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);1177return ret;1178}11791180/* all we really want is one bit */1181rx_word &= 1;1182}11831184if (!rx_word) {1185ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");1186return -EINVAL;1187}11881189return ret;1190}11911192static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,1193void *req, u32 req_len,1194void *resp, u32 *resp_len)1195{1196struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1197u32 addr;1198int ret;11991200if (req) {1201ret = ath10k_sdio_bmi_credits(ar);1202if (ret)1203return ret;12041205addr = ar_sdio->mbox_info.htc_addr;12061207memcpy(ar_sdio->bmi_buf, req, req_len);1208ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);1209if (ret) {1210ath10k_warn(ar,1211"unable to send the bmi data to the device: %d\n",1212ret);1213return ret;1214}1215}12161217if (!resp || !resp_len)1218/* No response expected */1219return 0;12201221/* During normal bootup, small reads may be required.1222* Rather than issue an HIF Read and then wait as the Target1223* adds successive bytes to the FIFO, we wait here until1224* we know that response data is available.1225*1226* This allows us to cleanly timeout on an unexpected1227* Target failure rather than risk problems at the HIF level.1228* In particular, this avoids SDIO timeouts and possibly garbage1229* data on some host controllers. And on an interconnect1230* such as Compact Flash (as well as some SDIO masters) which1231* does not provide any indication on data timeout, it avoids1232* a potential hang or garbage response.1233*1234* Synchronization is more difficult for reads larger than the1235* size of the MBOX FIFO (128B), because the Target is unable1236* to push the 129th byte of data until AFTER the Host posts an1237* HIF Read and removes some FIFO data. So for large reads the1238* Host proceeds to post an HIF Read BEFORE all the data is1239* actually available to read. Fortunately, large BMI reads do1240* not occur in practice -- they're supported for debug/development.1241*1242* So Host/Target BMI synchronization is divided into these cases:1243* CASE 1: length < 41244* Should not happen1245*1246* CASE 2: 4 <= length <= 1281247* Wait for first 4 bytes to be in FIFO1248* If CONSERVATIVE_BMI_READ is enabled, also wait for1249* a BMI command credit, which indicates that the ENTIRE1250* response is available in the FIFO1251*1252* CASE 3: length > 1281253* Wait for the first 4 bytes to be in FIFO1254*1255* For most uses, a small timeout should be sufficient and we will1256* usually see a response quickly; but there may be some unusual1257* (debug) cases of BMI_EXECUTE where we want an larger timeout.1258* For now, we use an unbounded busy loop while waiting for1259* BMI_EXECUTE.1260*1261* If BMI_EXECUTE ever needs to support longer-latency execution,1262* especially in production, this code needs to be enhanced to sleep1263* and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently1264* a function of Host processor speed.1265*/1266ret = ath10k_sdio_bmi_get_rx_lookahead(ar);1267if (ret)1268return ret;12691270/* We always read from the start of the mbox address */1271addr = ar_sdio->mbox_info.htc_addr;1272ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);1273if (ret) {1274ath10k_warn(ar,1275"unable to read the bmi data from the device: %d\n",1276ret);1277return ret;1278}12791280memcpy(resp, ar_sdio->bmi_buf, *resp_len);12811282return 0;1283}12841285/* sdio async handling functions */12861287static struct ath10k_sdio_bus_request1288*ath10k_sdio_alloc_busreq(struct ath10k *ar)1289{1290struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1291struct ath10k_sdio_bus_request *bus_req;12921293spin_lock_bh(&ar_sdio->lock);12941295if (list_empty(&ar_sdio->bus_req_freeq)) {1296bus_req = NULL;1297goto out;1298}12991300bus_req = list_first_entry(&ar_sdio->bus_req_freeq,1301struct ath10k_sdio_bus_request, list);1302list_del(&bus_req->list);13031304out:1305spin_unlock_bh(&ar_sdio->lock);1306return bus_req;1307}13081309static void ath10k_sdio_free_bus_req(struct ath10k *ar,1310struct ath10k_sdio_bus_request *bus_req)1311{1312struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);13131314memset(bus_req, 0, sizeof(*bus_req));13151316spin_lock_bh(&ar_sdio->lock);1317list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);1318spin_unlock_bh(&ar_sdio->lock);1319}13201321static void __ath10k_sdio_write_async(struct ath10k *ar,1322struct ath10k_sdio_bus_request *req)1323{1324struct ath10k_htc_ep *ep;1325struct sk_buff *skb;1326int ret;13271328skb = req->skb;1329ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);1330if (ret)1331ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",1332req->address, ret);13331334if (req->htc_msg) {1335ep = &ar->htc.endpoint[req->eid];1336ath10k_htc_notify_tx_completion(ep, skb);1337} else if (req->comp) {1338complete(req->comp);1339}13401341ath10k_sdio_free_bus_req(ar, req);1342}13431344/* To improve throughput use workqueue to deliver packets to HTC layer,1345* this way SDIO bus is utilised much better.1346*/1347static void ath10k_rx_indication_async_work(struct work_struct *work)1348{1349struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,1350async_work_rx);1351struct ath10k *ar = ar_sdio->ar;1352struct ath10k_htc_ep *ep;1353struct ath10k_skb_rxcb *cb;1354struct sk_buff *skb;13551356while (true) {1357skb = skb_dequeue(&ar_sdio->rx_head);1358if (!skb)1359break;1360cb = ATH10K_SKB_RXCB(skb);1361ep = &ar->htc.endpoint[cb->eid];1362ep->ep_ops.ep_rx_complete(ar, skb);1363}13641365if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {1366local_bh_disable();1367napi_schedule(&ar->napi);1368local_bh_enable();1369}1370}13711372static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)1373{1374struct ath10k *ar = ar_sdio->ar;1375unsigned char rtc_state = 0;1376int ret = 0;13771378rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);1379if (ret) {1380ath10k_warn(ar, "failed to read rtc state: %d\n", ret);1381return ret;1382}13831384*state = rtc_state & 0x3;13851386return ret;1387}13881389static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)1390{1391struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1392u32 val;1393int retry = ATH10K_CIS_READ_RETRY, ret = 0;1394unsigned char rtc_state = 0;13951396sdio_claim_host(ar_sdio->func);13971398ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);1399if (ret) {1400ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",1401ret);1402goto release;1403}14041405if (enable_sleep) {1406val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;1407ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;1408} else {1409val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;1410ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;1411}14121413ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);1414if (ret) {1415ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",1416ret);1417}14181419if (!enable_sleep) {1420do {1421udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);1422ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);14231424if (ret) {1425ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);1426break;1427}14281429ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",1430rtc_state);14311432if (rtc_state == ATH10K_CIS_RTC_STATE_ON)1433break;14341435udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);1436retry--;1437} while (retry > 0);1438}14391440release:1441sdio_release_host(ar_sdio->func);14421443return ret;1444}14451446static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)1447{1448struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);14491450ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;1451queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);1452}14531454static void ath10k_sdio_write_async_work(struct work_struct *work)1455{1456struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,1457wr_async_work);1458struct ath10k *ar = ar_sdio->ar;1459struct ath10k_sdio_bus_request *req, *tmp_req;1460struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;14611462spin_lock_bh(&ar_sdio->wr_async_lock);14631464list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {1465list_del(&req->list);1466spin_unlock_bh(&ar_sdio->wr_async_lock);14671468if (req->address >= mbox_info->htc_addr &&1469ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {1470ath10k_sdio_set_mbox_sleep(ar, false);1471mod_timer(&ar_sdio->sleep_timer, jiffies +1472msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));1473}14741475__ath10k_sdio_write_async(ar, req);1476spin_lock_bh(&ar_sdio->wr_async_lock);1477}14781479spin_unlock_bh(&ar_sdio->wr_async_lock);14801481if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)1482ath10k_sdio_set_mbox_sleep(ar, true);1483}14841485static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,1486struct sk_buff *skb,1487struct completion *comp,1488bool htc_msg, enum ath10k_htc_ep_id eid)1489{1490struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1491struct ath10k_sdio_bus_request *bus_req;14921493/* Allocate a bus request for the message and queue it on the1494* SDIO workqueue.1495*/1496bus_req = ath10k_sdio_alloc_busreq(ar);1497if (!bus_req) {1498ath10k_warn(ar,1499"unable to allocate bus request for async request\n");1500return -ENOMEM;1501}15021503bus_req->skb = skb;1504bus_req->eid = eid;1505bus_req->address = addr;1506bus_req->htc_msg = htc_msg;1507bus_req->comp = comp;15081509spin_lock_bh(&ar_sdio->wr_async_lock);1510list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);1511spin_unlock_bh(&ar_sdio->wr_async_lock);15121513return 0;1514}15151516/* IRQ handler */15171518static void ath10k_sdio_irq_handler(struct sdio_func *func)1519{1520struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);1521struct ath10k *ar = ar_sdio->ar;1522unsigned long timeout;1523bool done = false;1524int ret;15251526/* Release the host during interrupts so we can pick it back up when1527* we process commands.1528*/1529sdio_release_host(ar_sdio->func);15301531timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;1532do {1533ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);1534if (ret)1535break;1536} while (time_before(jiffies, timeout) && !done);15371538ath10k_mac_tx_push_pending(ar);15391540sdio_claim_host(ar_sdio->func);15411542if (ret && ret != -ECANCELED)1543ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",1544ret);1545}15461547/* sdio HIF functions */15481549static int ath10k_sdio_disable_intrs(struct ath10k *ar)1550{1551struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1552struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1553struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1554int ret;15551556mutex_lock(&irq_data->mtx);15571558memset(regs, 0, sizeof(*regs));1559ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1560®s->int_status_en, sizeof(*regs));1561if (ret)1562ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);15631564mutex_unlock(&irq_data->mtx);15651566return ret;1567}15681569static int ath10k_sdio_hif_power_up(struct ath10k *ar,1570enum ath10k_firmware_mode fw_mode)1571{1572struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1573struct sdio_func *func = ar_sdio->func;1574int ret;15751576if (!ar_sdio->is_disabled)1577return 0;15781579ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");15801581ret = ath10k_sdio_config(ar);1582if (ret) {1583ath10k_err(ar, "failed to config sdio: %d\n", ret);1584return ret;1585}15861587sdio_claim_host(func);15881589ret = sdio_enable_func(func);1590if (ret) {1591ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);1592sdio_release_host(func);1593return ret;1594}15951596sdio_release_host(func);15971598/* Wait for hardware to initialise. It should take a lot less than1599* 20 ms but let's be conservative here.1600*/1601msleep(20);16021603ar_sdio->is_disabled = false;16041605ret = ath10k_sdio_disable_intrs(ar);1606if (ret)1607return ret;16081609return 0;1610}16111612static void ath10k_sdio_hif_power_down(struct ath10k *ar)1613{1614struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1615int ret;16161617if (ar_sdio->is_disabled)1618return;16191620ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");16211622del_timer_sync(&ar_sdio->sleep_timer);1623ath10k_sdio_set_mbox_sleep(ar, true);16241625/* Disable the card */1626sdio_claim_host(ar_sdio->func);16271628ret = sdio_disable_func(ar_sdio->func);1629if (ret) {1630ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);1631sdio_release_host(ar_sdio->func);1632return;1633}16341635ret = mmc_hw_reset(ar_sdio->func->card);1636if (ret)1637ath10k_warn(ar, "unable to reset sdio: %d\n", ret);16381639sdio_release_host(ar_sdio->func);16401641ar_sdio->is_disabled = true;1642}16431644static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,1645struct ath10k_hif_sg_item *items, int n_items)1646{1647struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1648enum ath10k_htc_ep_id eid;1649struct sk_buff *skb;1650int ret, i;16511652eid = pipe_id_to_eid(pipe_id);16531654for (i = 0; i < n_items; i++) {1655size_t padded_len;1656u32 address;16571658skb = items[i].transfer_context;1659padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,1660skb->len);1661skb_trim(skb, padded_len);16621663/* Write TX data to the end of the mbox address space */1664address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -1665skb->len;1666ret = ath10k_sdio_prep_async_req(ar, address, skb,1667NULL, true, eid);1668if (ret)1669return ret;1670}16711672queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);16731674return 0;1675}16761677static int ath10k_sdio_enable_intrs(struct ath10k *ar)1678{1679struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1680struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1681struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1682int ret;16831684mutex_lock(&irq_data->mtx);16851686/* Enable all but CPU interrupts */1687regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |1688FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |1689FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);16901691/* NOTE: There are some cases where HIF can do detection of1692* pending mbox messages which is disabled now.1693*/1694regs->int_status_en |=1695FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);16961697/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #01698* #0 is used for report assertion from target1699*/1700regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);17011702/* Set up the Error Interrupt status Register */1703regs->err_int_status_en =1704FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |1705FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);17061707/* Enable Counter interrupt status register to get fatal errors for1708* debugging.1709*/1710regs->cntr_int_status_en =1711FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,1712ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);17131714ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1715®s->int_status_en, sizeof(*regs));1716if (ret)1717ath10k_warn(ar,1718"failed to update mbox interrupt status register : %d\n",1719ret);17201721mutex_unlock(&irq_data->mtx);1722return ret;1723}17241725/* HIF diagnostics */17261727static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,1728size_t buf_len)1729{1730int ret;1731void *mem;17321733mem = kzalloc(buf_len, GFP_KERNEL);1734if (!mem)1735return -ENOMEM;17361737/* set window register to start read cycle */1738ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);1739if (ret) {1740ath10k_warn(ar, "failed to set mbox window read address: %d", ret);1741goto out;1742}17431744/* read the data */1745ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);1746if (ret) {1747ath10k_warn(ar, "failed to read from mbox window data address: %d\n",1748ret);1749goto out;1750}17511752memcpy(buf, mem, buf_len);17531754out:1755kfree(mem);17561757return ret;1758}17591760static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,1761u32 *value)1762{1763__le32 *val;1764int ret;17651766val = kzalloc(sizeof(*val), GFP_KERNEL);1767if (!val)1768return -ENOMEM;17691770ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));1771if (ret)1772goto out;17731774*value = __le32_to_cpu(*val);17751776out:1777kfree(val);17781779return ret;1780}17811782static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,1783const void *data, int nbytes)1784{1785int ret;17861787/* set write data */1788ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);1789if (ret) {1790ath10k_warn(ar,1791"failed to write 0x%p to mbox window data address: %d\n",1792data, ret);1793return ret;1794}17951796/* set window register, which starts the write cycle */1797ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);1798if (ret) {1799ath10k_warn(ar, "failed to set mbox window write address: %d", ret);1800return ret;1801}18021803return 0;1804}18051806static int ath10k_sdio_hif_start_post(struct ath10k *ar)1807{1808struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1809u32 addr, val;1810int ret = 0;18111812addr = host_interest_item_address(HI_ITEM(hi_acs_flags));18131814ret = ath10k_sdio_diag_read32(ar, addr, &val);1815if (ret) {1816ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);1817return ret;1818}18191820if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {1821ath10k_dbg(ar, ATH10K_DBG_SDIO,1822"sdio mailbox swap service enabled\n");1823ar_sdio->swap_mbox = true;1824} else {1825ath10k_dbg(ar, ATH10K_DBG_SDIO,1826"sdio mailbox swap service disabled\n");1827ar_sdio->swap_mbox = false;1828}18291830ath10k_sdio_set_mbox_sleep(ar, true);18311832return 0;1833}18341835static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)1836{1837u32 addr, val;1838int ret;18391840addr = host_interest_item_address(HI_ITEM(hi_acs_flags));18411842ret = ath10k_sdio_diag_read32(ar, addr, &val);1843if (ret) {1844ath10k_warn(ar,1845"unable to read hi_acs_flags for htt tx comple : %d\n", ret);1846return ret;1847}18481849ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);18501851ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",1852ret ? " " : " not ");18531854return ret;1855}18561857/* HIF start/stop */18581859static int ath10k_sdio_hif_start(struct ath10k *ar)1860{1861struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1862int ret;18631864ath10k_core_napi_enable(ar);18651866/* Sleep 20 ms before HIF interrupts are disabled.1867* This will give target plenty of time to process the BMI done1868* request before interrupts are disabled.1869*/1870msleep(20);1871ret = ath10k_sdio_disable_intrs(ar);1872if (ret)1873return ret;18741875/* eid 0 always uses the lower part of the extended mailbox address1876* space (ext_info[0].htc_ext_addr).1877*/1878ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;1879ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;18801881sdio_claim_host(ar_sdio->func);18821883/* Register the isr */1884ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);1885if (ret) {1886ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);1887sdio_release_host(ar_sdio->func);1888return ret;1889}18901891sdio_release_host(ar_sdio->func);18921893ret = ath10k_sdio_enable_intrs(ar);1894if (ret)1895ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);18961897/* Enable sleep and then disable it again */1898ret = ath10k_sdio_set_mbox_sleep(ar, true);1899if (ret)1900return ret;19011902/* Wait for 20ms for the written value to take effect */1903msleep(20);19041905ret = ath10k_sdio_set_mbox_sleep(ar, false);1906if (ret)1907return ret;19081909return 0;1910}19111912#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)19131914static void ath10k_sdio_irq_disable(struct ath10k *ar)1915{1916struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1917struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1918struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1919struct sk_buff *skb;1920struct completion irqs_disabled_comp;1921int ret;19221923skb = dev_alloc_skb(sizeof(*regs));1924if (!skb)1925return;19261927mutex_lock(&irq_data->mtx);19281929memset(regs, 0, sizeof(*regs)); /* disable all interrupts */1930memcpy(skb->data, regs, sizeof(*regs));1931skb_put(skb, sizeof(*regs));19321933mutex_unlock(&irq_data->mtx);19341935init_completion(&irqs_disabled_comp);1936ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1937skb, &irqs_disabled_comp, false, 0);1938if (ret)1939goto out;19401941queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);19421943/* Wait for the completion of the IRQ disable request.1944* If there is a timeout we will try to disable irq's anyway.1945*/1946ret = wait_for_completion_timeout(&irqs_disabled_comp,1947SDIO_IRQ_DISABLE_TIMEOUT_HZ);1948if (!ret)1949ath10k_warn(ar, "sdio irq disable request timed out\n");19501951sdio_claim_host(ar_sdio->func);19521953ret = sdio_release_irq(ar_sdio->func);1954if (ret)1955ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);19561957sdio_release_host(ar_sdio->func);19581959out:1960kfree_skb(skb);1961}19621963static void ath10k_sdio_hif_stop(struct ath10k *ar)1964{1965struct ath10k_sdio_bus_request *req, *tmp_req;1966struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1967struct sk_buff *skb;19681969ath10k_sdio_irq_disable(ar);19701971cancel_work_sync(&ar_sdio->async_work_rx);19721973while ((skb = skb_dequeue(&ar_sdio->rx_head)))1974dev_kfree_skb_any(skb);19751976cancel_work_sync(&ar_sdio->wr_async_work);19771978spin_lock_bh(&ar_sdio->wr_async_lock);19791980/* Free all bus requests that have not been handled */1981list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {1982struct ath10k_htc_ep *ep;19831984list_del(&req->list);19851986if (req->htc_msg) {1987ep = &ar->htc.endpoint[req->eid];1988ath10k_htc_notify_tx_completion(ep, req->skb);1989} else if (req->skb) {1990kfree_skb(req->skb);1991}1992ath10k_sdio_free_bus_req(ar, req);1993}19941995spin_unlock_bh(&ar_sdio->wr_async_lock);19961997ath10k_core_napi_sync_disable(ar);1998}19992000#ifdef CONFIG_PM20012002static int ath10k_sdio_hif_suspend(struct ath10k *ar)2003{2004return 0;2005}20062007static int ath10k_sdio_hif_resume(struct ath10k *ar)2008{2009switch (ar->state) {2010case ATH10K_STATE_OFF:2011ath10k_dbg(ar, ATH10K_DBG_SDIO,2012"sdio resume configuring sdio\n");20132014/* need to set sdio settings after power is cut from sdio */2015ath10k_sdio_config(ar);2016break;20172018case ATH10K_STATE_ON:2019default:2020break;2021}20222023return 0;2024}2025#endif20262027static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,2028u16 service_id,2029u8 *ul_pipe, u8 *dl_pipe)2030{2031struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);2032struct ath10k_htc *htc = &ar->htc;2033u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;2034enum ath10k_htc_ep_id eid;2035bool ep_found = false;2036int i;20372038/* For sdio, we are interested in the mapping between eid2039* and pipeid rather than service_id to pipe_id.2040* First we find out which eid has been allocated to the2041* service...2042*/2043for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {2044if (htc->endpoint[i].service_id == service_id) {2045eid = htc->endpoint[i].eid;2046ep_found = true;2047break;2048}2049}20502051if (!ep_found)2052return -EINVAL;20532054/* Then we create the simplest mapping possible between pipeid2055* and eid2056*/2057*ul_pipe = *dl_pipe = (u8)eid;20582059/* Normally, HTT will use the upper part of the extended2060* mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl2061* the lower part (ext_info[0].htc_ext_addr).2062* If fw wants swapping of mailbox addresses, the opposite is true.2063*/2064if (ar_sdio->swap_mbox) {2065htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;2066wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;2067htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;2068wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;2069} else {2070htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;2071wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;2072htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;2073wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;2074}20752076switch (service_id) {2077case ATH10K_HTC_SVC_ID_RSVD_CTRL:2078/* HTC ctrl ep mbox address has already been setup in2079* ath10k_sdio_hif_start2080*/2081break;2082case ATH10K_HTC_SVC_ID_WMI_CONTROL:2083ar_sdio->mbox_addr[eid] = wmi_addr;2084ar_sdio->mbox_size[eid] = wmi_mbox_size;2085ath10k_dbg(ar, ATH10K_DBG_SDIO,2086"sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",2087ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);2088break;2089case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:2090ar_sdio->mbox_addr[eid] = htt_addr;2091ar_sdio->mbox_size[eid] = htt_mbox_size;2092ath10k_dbg(ar, ATH10K_DBG_SDIO,2093"sdio htt data mbox_addr 0x%x mbox_size %d\n",2094ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);2095break;2096default:2097ath10k_warn(ar, "unsupported HTC service id: %d\n",2098service_id);2099return -EINVAL;2100}21012102return 0;2103}21042105static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,2106u8 *ul_pipe, u8 *dl_pipe)2107{2108ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");21092110/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our2111* case) == 02112*/2113*ul_pipe = 0;2114*dl_pipe = 0;2115}21162117static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {2118.tx_sg = ath10k_sdio_hif_tx_sg,2119.diag_read = ath10k_sdio_hif_diag_read,2120.diag_write = ath10k_sdio_hif_diag_write_mem,2121.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,2122.start = ath10k_sdio_hif_start,2123.stop = ath10k_sdio_hif_stop,2124.start_post = ath10k_sdio_hif_start_post,2125.get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,2126.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,2127.get_default_pipe = ath10k_sdio_hif_get_default_pipe,2128.power_up = ath10k_sdio_hif_power_up,2129.power_down = ath10k_sdio_hif_power_down,2130#ifdef CONFIG_PM2131.suspend = ath10k_sdio_hif_suspend,2132.resume = ath10k_sdio_hif_resume,2133#endif2134};21352136#ifdef CONFIG_PM_SLEEP21372138/* Empty handlers so that mmc subsystem doesn't remove us entirely during2139* suspend. We instead follow cfg80211 suspend/resume handlers.2140*/2141static int ath10k_sdio_pm_suspend(struct device *device)2142{2143struct sdio_func *func = dev_to_sdio_func(device);2144struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);2145struct ath10k *ar = ar_sdio->ar;2146mmc_pm_flag_t pm_flag, pm_caps;2147int ret;21482149if (!device_may_wakeup(ar->dev))2150return 0;21512152ath10k_sdio_set_mbox_sleep(ar, true);21532154pm_flag = MMC_PM_KEEP_POWER;21552156ret = sdio_set_host_pm_flags(func, pm_flag);2157if (ret) {2158pm_caps = sdio_get_host_pm_caps(func);2159ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",2160pm_flag, pm_caps, ret);2161return ret;2162}21632164return ret;2165}21662167static int ath10k_sdio_pm_resume(struct device *device)2168{2169return 0;2170}21712172static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,2173ath10k_sdio_pm_resume);21742175#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)21762177#else21782179#define ATH10K_SDIO_PM_OPS NULL21802181#endif /* CONFIG_PM_SLEEP */21822183static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)2184{2185struct ath10k *ar = container_of(ctx, struct ath10k, napi);2186int done;21872188done = ath10k_htt_rx_hl_indication(ar, budget);2189ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);21902191if (done < budget)2192napi_complete_done(ctx, done);21932194return done;2195}21962197static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,2198u32 item_offset,2199u32 *val)2200{2201u32 addr;2202int ret;22032204addr = host_interest_item_address(item_offset);22052206ret = ath10k_sdio_diag_read32(ar, addr, val);22072208if (ret)2209ath10k_warn(ar, "unable to read host interest offset %d value\n",2210item_offset);22112212return ret;2213}22142215static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,2216u32 buf_len)2217{2218u32 val;2219int i, ret;22202221for (i = 0; i < buf_len; i += 4) {2222ret = ath10k_sdio_diag_read32(ar, address + i, &val);2223if (ret) {2224ath10k_warn(ar, "unable to read mem %d value\n", address + i);2225break;2226}2227memcpy(buf + i, &val, 4);2228}22292230return ret;2231}22322233static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)2234{2235u32 param;22362237ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), ¶m);22382239ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);22402241return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);2242}22432244static void ath10k_sdio_dump_registers(struct ath10k *ar,2245struct ath10k_fw_crash_data *crash_data,2246bool fast_dump)2247{2248u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};2249int i, ret;2250u32 reg_dump_area;22512252ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),2253®_dump_area);2254if (ret) {2255ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);2256return;2257}22582259if (fast_dump)2260ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,2261sizeof(reg_dump_values));2262else2263ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,2264sizeof(reg_dump_values));22652266if (ret) {2267ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);2268return;2269}22702271ath10k_err(ar, "firmware register dump:\n");2272for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)2273ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",2274i,2275reg_dump_values[i],2276reg_dump_values[i + 1],2277reg_dump_values[i + 2],2278reg_dump_values[i + 3]);22792280if (!crash_data)2281return;22822283for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)2284crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);2285}22862287static int ath10k_sdio_dump_memory_section(struct ath10k *ar,2288const struct ath10k_mem_region *mem_region,2289u8 *buf, size_t buf_len)2290{2291const struct ath10k_mem_section *cur_section, *next_section;2292unsigned int count, section_size, skip_size;2293int ret, i, j;22942295if (!mem_region || !buf)2296return 0;22972298cur_section = &mem_region->section_table.sections[0];22992300if (mem_region->start > cur_section->start) {2301ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",2302mem_region->start, cur_section->start);2303return 0;2304}23052306skip_size = cur_section->start - mem_region->start;23072308/* fill the gap between the first register section and register2309* start address2310*/2311for (i = 0; i < skip_size; i++) {2312*buf = ATH10K_MAGIC_NOT_COPIED;2313buf++;2314}23152316count = 0;2317i = 0;2318for (; cur_section; cur_section = next_section) {2319section_size = cur_section->end - cur_section->start;23202321if (section_size <= 0) {2322ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",2323cur_section->start,2324cur_section->end);2325break;2326}23272328if (++i == mem_region->section_table.size) {2329/* last section */2330next_section = NULL;2331skip_size = 0;2332} else {2333next_section = cur_section + 1;23342335if (cur_section->end > next_section->start) {2336ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",2337next_section->start,2338cur_section->end);2339break;2340}23412342skip_size = next_section->start - cur_section->end;2343}23442345if (buf_len < (skip_size + section_size)) {2346ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);2347break;2348}23492350buf_len -= skip_size + section_size;23512352/* read section to dest memory */2353ret = ath10k_sdio_read_mem(ar, cur_section->start,2354buf, section_size);2355if (ret) {2356ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",2357cur_section->start, ret);2358break;2359}23602361buf += section_size;2362count += section_size;23632364/* fill in the gap between this section and the next */2365for (j = 0; j < skip_size; j++) {2366*buf = ATH10K_MAGIC_NOT_COPIED;2367buf++;2368}23692370count += skip_size;2371}23722373return count;2374}23752376/* if an error happened returns < 0, otherwise the length */2377static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,2378const struct ath10k_mem_region *current_region,2379u8 *buf,2380bool fast_dump)2381{2382int ret;23832384if (current_region->section_table.size > 0)2385/* Copy each section individually. */2386return ath10k_sdio_dump_memory_section(ar,2387current_region,2388buf,2389current_region->len);23902391/* No individiual memory sections defined so we can2392* copy the entire memory region.2393*/2394if (fast_dump)2395ret = ath10k_bmi_read_memory(ar,2396current_region->start,2397buf,2398current_region->len);2399else2400ret = ath10k_sdio_read_mem(ar,2401current_region->start,2402buf,2403current_region->len);24042405if (ret) {2406ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",2407current_region->name, ret);2408return ret;2409}24102411return current_region->len;2412}24132414static void ath10k_sdio_dump_memory(struct ath10k *ar,2415struct ath10k_fw_crash_data *crash_data,2416bool fast_dump)2417{2418const struct ath10k_hw_mem_layout *mem_layout;2419const struct ath10k_mem_region *current_region;2420struct ath10k_dump_ram_data_hdr *hdr;2421u32 count;2422size_t buf_len;2423int ret, i;2424u8 *buf;24252426if (!crash_data)2427return;24282429mem_layout = ath10k_coredump_get_mem_layout(ar);2430if (!mem_layout)2431return;24322433current_region = &mem_layout->region_table.regions[0];24342435buf = crash_data->ramdump_buf;2436buf_len = crash_data->ramdump_buf_len;24372438memset(buf, 0, buf_len);24392440for (i = 0; i < mem_layout->region_table.size; i++) {2441count = 0;24422443if (current_region->len > buf_len) {2444ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",2445current_region->name,2446current_region->len,2447buf_len);2448break;2449}24502451/* Reserve space for the header. */2452hdr = (void *)buf;2453buf += sizeof(*hdr);2454buf_len -= sizeof(*hdr);24552456ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,2457fast_dump);2458if (ret >= 0)2459count = ret;24602461hdr->region_type = cpu_to_le32(current_region->type);2462hdr->start = cpu_to_le32(current_region->start);2463hdr->length = cpu_to_le32(count);24642465if (count == 0)2466/* Note: the header remains, just with zero length. */2467break;24682469buf += count;2470buf_len -= count;24712472current_region++;2473}2474}24752476void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)2477{2478struct ath10k_fw_crash_data *crash_data;2479char guid[UUID_STRING_LEN + 1];2480bool fast_dump;24812482fast_dump = ath10k_sdio_is_fast_dump_supported(ar);24832484if (fast_dump)2485ath10k_bmi_start(ar);24862487ar->stats.fw_crash_counter++;24882489ath10k_sdio_disable_intrs(ar);24902491crash_data = ath10k_coredump_new(ar);24922493if (crash_data)2494scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);2495else2496scnprintf(guid, sizeof(guid), "n/a");24972498ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);2499ath10k_print_driver_info(ar);2500ath10k_sdio_dump_registers(ar, crash_data, fast_dump);2501ath10k_sdio_dump_memory(ar, crash_data, fast_dump);25022503ath10k_sdio_enable_intrs(ar);25042505ath10k_core_start_recovery(ar);2506}25072508static int ath10k_sdio_probe(struct sdio_func *func,2509const struct sdio_device_id *id)2510{2511struct ath10k_sdio *ar_sdio;2512struct ath10k *ar;2513enum ath10k_hw_rev hw_rev;2514u32 dev_id_base;2515struct ath10k_bus_params bus_params = {};2516int ret, i;25172518/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.2519* If there will be newer chipsets that does not use the hw reg2520* setup as defined in qca6174_regs and qca6174_values, this2521* assumption is no longer valid and hw_rev must be setup differently2522* depending on chipset.2523*/2524hw_rev = ATH10K_HW_QCA6174;25252526ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,2527hw_rev, &ath10k_sdio_hif_ops);2528if (!ar) {2529dev_err(&func->dev, "failed to allocate core\n");2530return -ENOMEM;2531}25322533netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);25342535ath10k_dbg(ar, ATH10K_DBG_BOOT,2536"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",2537func->num, func->vendor, func->device,2538func->max_blksize, func->cur_blksize);25392540ar_sdio = ath10k_sdio_priv(ar);25412542ar_sdio->irq_data.irq_proc_reg =2543devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),2544GFP_KERNEL);2545if (!ar_sdio->irq_data.irq_proc_reg) {2546ret = -ENOMEM;2547goto err_core_destroy;2548}25492550ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);2551if (!ar_sdio->vsg_buffer) {2552ret = -ENOMEM;2553goto err_core_destroy;2554}25552556ar_sdio->irq_data.irq_en_reg =2557devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),2558GFP_KERNEL);2559if (!ar_sdio->irq_data.irq_en_reg) {2560ret = -ENOMEM;2561goto err_core_destroy;2562}25632564ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);2565if (!ar_sdio->bmi_buf) {2566ret = -ENOMEM;2567goto err_core_destroy;2568}25692570ar_sdio->func = func;2571sdio_set_drvdata(func, ar_sdio);25722573ar_sdio->is_disabled = true;2574ar_sdio->ar = ar;25752576spin_lock_init(&ar_sdio->lock);2577spin_lock_init(&ar_sdio->wr_async_lock);2578mutex_init(&ar_sdio->irq_data.mtx);25792580INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);2581INIT_LIST_HEAD(&ar_sdio->wr_asyncq);25822583INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);2584ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");2585if (!ar_sdio->workqueue) {2586ret = -ENOMEM;2587goto err_core_destroy;2588}25892590for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)2591ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);25922593skb_queue_head_init(&ar_sdio->rx_head);2594INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);25952596dev_id_base = (id->device & 0x0F00);2597if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&2598dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {2599ret = -ENODEV;2600ath10k_err(ar, "unsupported device id %u (0x%x)\n",2601dev_id_base, id->device);2602goto err_free_wq;2603}26042605ar->dev_id = QCA9377_1_0_DEVICE_ID;2606ar->id.vendor = id->vendor;2607ar->id.device = id->device;26082609ath10k_sdio_set_mbox_info(ar);26102611bus_params.dev_type = ATH10K_DEV_TYPE_HL;2612/* TODO: don't know yet how to get chip_id with SDIO */2613bus_params.chip_id = 0;2614bus_params.hl_msdu_ids = true;26152616ar->hw->max_mtu = ETH_DATA_LEN;26172618ret = ath10k_core_register(ar, &bus_params);2619if (ret) {2620ath10k_err(ar, "failed to register driver core: %d\n", ret);2621goto err_free_wq;2622}26232624timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);26252626return 0;26272628err_free_wq:2629destroy_workqueue(ar_sdio->workqueue);2630err_core_destroy:2631ath10k_core_destroy(ar);26322633return ret;2634}26352636static void ath10k_sdio_remove(struct sdio_func *func)2637{2638struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);2639struct ath10k *ar = ar_sdio->ar;26402641ath10k_dbg(ar, ATH10K_DBG_BOOT,2642"sdio removed func %d vendor 0x%x device 0x%x\n",2643func->num, func->vendor, func->device);26442645ath10k_core_unregister(ar);26462647netif_napi_del(&ar->napi);26482649ath10k_core_destroy(ar);26502651destroy_workqueue(ar_sdio->workqueue);2652}26532654static const struct sdio_device_id ath10k_sdio_devices[] = {2655{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},2656{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},2657{},2658};26592660MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);26612662static struct sdio_driver ath10k_sdio_driver = {2663.name = "ath10k_sdio",2664.id_table = ath10k_sdio_devices,2665.probe = ath10k_sdio_probe,2666.remove = ath10k_sdio_remove,2667.drv = {2668.owner = THIS_MODULE,2669.pm = ATH10K_SDIO_PM_OPS,2670},2671};26722673static int __init ath10k_sdio_init(void)2674{2675int ret;26762677ret = sdio_register_driver(&ath10k_sdio_driver);2678if (ret)2679pr_err("sdio driver registration failed: %d\n", ret);26802681return ret;2682}26832684static void __exit ath10k_sdio_exit(void)2685{2686sdio_unregister_driver(&ath10k_sdio_driver);2687}26882689module_init(ath10k_sdio_init);2690module_exit(ath10k_sdio_exit);26912692MODULE_AUTHOR("Qualcomm Atheros");2693MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");2694MODULE_LICENSE("Dual BSD/GPL");269526962697