Path: blob/main/sys/contrib/dev/athk/ath10k/sdio.c
105688 views
// SPDX-License-Identifier: ISC1/*2* Copyright (c) 2004-2011 Atheros Communications Inc.3* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.4* Copyright (c) 2016-2017 Erik Stromdahl <[email protected]>5* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.6*/78#include <linux/module.h>9#include <linux/mmc/card.h>10#include <linux/mmc/mmc.h>11#include <linux/mmc/host.h>12#include <linux/mmc/sdio_func.h>13#include <linux/mmc/sdio_ids.h>14#include <linux/mmc/sdio.h>15#include <linux/mmc/sd.h>16#include <linux/bitfield.h>17#include "core.h"18#include "bmi.h"19#include "debug.h"20#include "hif.h"21#include "htc.h"22#include "mac.h"23#include "targaddrs.h"24#include "trace.h"25#include "sdio.h"26#include "coredump.h"2728void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);2930#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)3132/* inlined helper functions */3334static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,35size_t len)36{37return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);38}3940static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)41{42return (enum ath10k_htc_ep_id)pipe_id;43}4445static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)46{47dev_kfree_skb(pkt->skb);48pkt->skb = NULL;49pkt->alloc_len = 0;50pkt->act_len = 0;51pkt->trailer_only = false;52}5354static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,55size_t act_len, size_t full_len,56bool part_of_bundle,57bool last_in_bundle)58{59pkt->skb = dev_alloc_skb(full_len);60if (!pkt->skb)61return -ENOMEM;6263pkt->act_len = act_len;64pkt->alloc_len = full_len;65pkt->part_of_bundle = part_of_bundle;66pkt->last_in_bundle = last_in_bundle;67pkt->trailer_only = false;6869return 0;70}7172static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)73{74bool trailer_only = false;75struct ath10k_htc_hdr *htc_hdr =76(struct ath10k_htc_hdr *)pkt->skb->data;77u16 len = __le16_to_cpu(htc_hdr->len);7879if (len == htc_hdr->trailer_len)80trailer_only = true;8182return trailer_only;83}8485/* sdio/mmc functions */8687static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,88unsigned int address,89unsigned char val)90{91*arg = FIELD_PREP(BIT(31), write) |92FIELD_PREP(BIT(27), raw) |93FIELD_PREP(BIT(26), 1) |94FIELD_PREP(GENMASK(25, 9), address) |95FIELD_PREP(BIT(8), 1) |96FIELD_PREP(GENMASK(7, 0), val);97}9899static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,100unsigned int address,101unsigned char byte)102{103struct mmc_command io_cmd;104105memset(&io_cmd, 0, sizeof(io_cmd));106ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);107io_cmd.opcode = SD_IO_RW_DIRECT;108io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;109110return mmc_wait_for_cmd(card->host, &io_cmd, 0);111}112113static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,114unsigned int address,115unsigned char *byte)116{117struct mmc_command io_cmd;118int ret;119120memset(&io_cmd, 0, sizeof(io_cmd));121ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);122io_cmd.opcode = SD_IO_RW_DIRECT;123io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;124125ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);126if (!ret)127*byte = io_cmd.resp[0];128129return ret;130}131132static int ath10k_sdio_config(struct ath10k *ar)133{134struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);135struct sdio_func *func = ar_sdio->func;136unsigned char byte, asyncintdelay = 2;137int ret;138139ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");140141sdio_claim_host(func);142143byte = 0;144ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,145SDIO_CCCR_DRIVE_STRENGTH,146&byte);147148byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;149byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,150ATH10K_SDIO_DRIVE_DTSX_TYPE_D);151152ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,153SDIO_CCCR_DRIVE_STRENGTH,154byte);155156byte = 0;157ret = ath10k_sdio_func0_cmd52_rd_byte(158func->card,159CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,160&byte);161162byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |163CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |164CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);165166ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,167CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,168byte);169if (ret) {170ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);171goto out;172}173174byte = 0;175ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,176CCCR_SDIO_IRQ_MODE_REG_SDIO3,177&byte);178179byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;180181ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,182CCCR_SDIO_IRQ_MODE_REG_SDIO3,183byte);184if (ret) {185ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",186ret);187goto out;188}189190byte = 0;191ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,192CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,193&byte);194195byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;196byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);197198ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,199CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,200byte);201202/* give us some time to enable, in ms */203func->enable_timeout = 100;204205ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);206if (ret) {207ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",208ar_sdio->mbox_info.block_size, ret);209goto out;210}211212out:213sdio_release_host(func);214return ret;215}216217static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)218{219struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);220struct sdio_func *func = ar_sdio->func;221int ret;222223sdio_claim_host(func);224225sdio_writel(func, val, addr, &ret);226if (ret) {227ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",228val, addr, ret);229goto out;230}231232ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",233addr, val);234235out:236sdio_release_host(func);237238return ret;239}240241static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)242{243struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);244struct sdio_func *func = ar_sdio->func;245__le32 *buf;246int ret;247248buf = kzalloc(sizeof(*buf), GFP_KERNEL);249if (!buf)250return -ENOMEM;251252*buf = cpu_to_le32(val);253254sdio_claim_host(func);255256ret = sdio_writesb(func, addr, buf, sizeof(*buf));257if (ret) {258ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",259val, addr, ret);260goto out;261}262263ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",264addr, val);265266out:267sdio_release_host(func);268269kfree(buf);270271return ret;272}273274static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)275{276struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);277struct sdio_func *func = ar_sdio->func;278int ret;279280sdio_claim_host(func);281*val = sdio_readl(func, addr, &ret);282if (ret) {283ath10k_warn(ar, "failed to read from address 0x%x: %d\n",284addr, ret);285goto out;286}287288ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",289addr, *val);290291out:292sdio_release_host(func);293294return ret;295}296297static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)298{299struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);300struct sdio_func *func = ar_sdio->func;301int ret;302303sdio_claim_host(func);304305ret = sdio_memcpy_fromio(func, buf, addr, len);306if (ret) {307ath10k_warn(ar, "failed to read from address 0x%x: %d\n",308addr, ret);309goto out;310}311312ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",313addr, buf, len);314ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);315316out:317sdio_release_host(func);318319return ret;320}321322static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)323{324struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);325struct sdio_func *func = ar_sdio->func;326int ret;327328sdio_claim_host(func);329330/* For some reason toio() doesn't have const for the buffer, need331* an ugly hack to workaround that.332*/333ret = sdio_memcpy_toio(func, addr, (void *)buf, len);334if (ret) {335ath10k_warn(ar, "failed to write to address 0x%x: %d\n",336addr, ret);337goto out;338}339340ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",341addr, buf, len);342ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);343344out:345sdio_release_host(func);346347return ret;348}349350static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)351{352struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);353struct sdio_func *func = ar_sdio->func;354int ret;355356sdio_claim_host(func);357358len = round_down(len, ar_sdio->mbox_info.block_size);359360ret = sdio_readsb(func, buf, addr, len);361if (ret) {362ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",363addr, ret);364goto out;365}366367ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",368addr, buf, len);369ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);370371out:372sdio_release_host(func);373374return ret;375}376377/* HIF mbox functions */378379static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,380struct ath10k_sdio_rx_data *pkt,381u32 *lookaheads,382int *n_lookaheads)383{384struct ath10k_htc *htc = &ar->htc;385struct sk_buff *skb = pkt->skb;386struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;387bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;388enum ath10k_htc_ep_id eid;389u8 *trailer;390int ret;391392if (trailer_present) {393trailer = skb->data + skb->len - htc_hdr->trailer_len;394395eid = pipe_id_to_eid(htc_hdr->eid);396397ret = ath10k_htc_process_trailer(htc,398trailer,399htc_hdr->trailer_len,400eid,401lookaheads,402n_lookaheads);403if (ret)404return ret;405406if (is_trailer_only_msg(pkt))407pkt->trailer_only = true;408409skb_trim(skb, skb->len - htc_hdr->trailer_len);410}411412skb_pull(skb, sizeof(*htc_hdr));413414return 0;415}416417static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,418u32 lookaheads[],419int *n_lookahead)420{421struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);422struct ath10k_htc *htc = &ar->htc;423struct ath10k_sdio_rx_data *pkt;424struct ath10k_htc_ep *ep;425struct ath10k_skb_rxcb *cb;426enum ath10k_htc_ep_id id;427int ret, i, *n_lookahead_local;428u32 *lookaheads_local;429int lookahead_idx = 0;430431for (i = 0; i < ar_sdio->n_rx_pkts; i++) {432lookaheads_local = lookaheads;433n_lookahead_local = n_lookahead;434435id = ((struct ath10k_htc_hdr *)436&lookaheads[lookahead_idx++])->eid;437438if (id >= ATH10K_HTC_EP_COUNT) {439ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",440id);441ret = -ENOMEM;442goto out;443}444445ep = &htc->endpoint[id];446447if (ep->service_id == 0) {448ath10k_warn(ar, "ep %d is not connected\n", id);449ret = -ENOMEM;450goto out;451}452453pkt = &ar_sdio->rx_pkts[i];454455if (pkt->part_of_bundle && !pkt->last_in_bundle) {456/* Only read lookahead's from RX trailers457* for the last packet in a bundle.458*/459lookahead_idx--;460lookaheads_local = NULL;461n_lookahead_local = NULL;462}463464ret = ath10k_sdio_mbox_rx_process_packet(ar,465pkt,466lookaheads_local,467n_lookahead_local);468if (ret)469goto out;470471if (!pkt->trailer_only) {472cb = ATH10K_SKB_RXCB(pkt->skb);473cb->eid = id;474475skb_queue_tail(&ar_sdio->rx_head, pkt->skb);476queue_work(ar->workqueue_aux,477&ar_sdio->async_work_rx);478} else {479kfree_skb(pkt->skb);480}481482/* The RX complete handler now owns the skb...*/483pkt->skb = NULL;484pkt->alloc_len = 0;485}486487ret = 0;488489out:490/* Free all packets that was not passed on to the RX completion491* handler...492*/493for (; i < ar_sdio->n_rx_pkts; i++)494ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);495496return ret;497}498499static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,500struct ath10k_sdio_rx_data *rx_pkts,501struct ath10k_htc_hdr *htc_hdr,502size_t full_len, size_t act_len,503size_t *bndl_cnt)504{505int ret, i;506u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;507508*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);509510if (*bndl_cnt > max_msgs) {511ath10k_warn(ar,512"HTC bundle length %u exceeds maximum %u\n",513le16_to_cpu(htc_hdr->len),514max_msgs);515return -ENOMEM;516}517518/* Allocate bndl_cnt extra skb's for the bundle.519* The package containing the520* ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included521* in bndl_cnt. The skb for that packet will be522* allocated separately.523*/524for (i = 0; i < *bndl_cnt; i++) {525ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],526act_len,527full_len,528true,529false);530if (ret)531return ret;532}533534return 0;535}536537static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,538u32 lookaheads[], int n_lookaheads)539{540struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);541struct ath10k_htc_hdr *htc_hdr;542size_t full_len, act_len;543bool last_in_bundle;544int ret, i;545int pkt_cnt = 0;546547if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {548ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",549n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);550ret = -ENOMEM;551goto err;552}553554for (i = 0; i < n_lookaheads; i++) {555htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];556last_in_bundle = false;557558if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {559ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",560le16_to_cpu(htc_hdr->len),561ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);562ret = -ENOMEM;563564ath10k_core_start_recovery(ar);565ath10k_warn(ar, "exceeds length, start recovery\n");566567goto err;568}569570act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);571full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);572573if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {574ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",575htc_hdr->eid, htc_hdr->flags,576le16_to_cpu(htc_hdr->len));577ret = -EINVAL;578goto err;579}580581if (ath10k_htc_get_bundle_count(582ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {583/* HTC header indicates that every packet to follow584* has the same padded length so that it can be585* optimally fetched as a full bundle.586*/587size_t bndl_cnt;588589ret = ath10k_sdio_mbox_alloc_bundle(ar,590&ar_sdio->rx_pkts[pkt_cnt],591htc_hdr,592full_len,593act_len,594&bndl_cnt);595596if (ret) {597ath10k_warn(ar, "failed to allocate a bundle: %d\n",598ret);599goto err;600}601602pkt_cnt += bndl_cnt;603604/* next buffer will be the last in the bundle */605last_in_bundle = true;606}607608/* Allocate skb for packet. If the packet had the609* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled610* packet skb's have been allocated in the previous step.611*/612if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)613full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;614615ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],616act_len,617full_len,618last_in_bundle,619last_in_bundle);620if (ret) {621ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);622goto err;623}624625pkt_cnt++;626}627628ar_sdio->n_rx_pkts = pkt_cnt;629630return 0;631632err:633for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {634if (!ar_sdio->rx_pkts[i].alloc_len)635break;636ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);637}638639return ret;640}641642static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)643{644struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);645struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];646struct sk_buff *skb = pkt->skb;647struct ath10k_htc_hdr *htc_hdr;648int ret;649650ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,651skb->data, pkt->alloc_len);652if (ret)653goto err;654655htc_hdr = (struct ath10k_htc_hdr *)skb->data;656pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);657658if (pkt->act_len > pkt->alloc_len) {659ret = -EINVAL;660goto err;661}662663skb_put(skb, pkt->act_len);664return 0;665666err:667ar_sdio->n_rx_pkts = 0;668ath10k_sdio_mbox_free_rx_pkt(pkt);669670return ret;671}672673static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)674{675struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);676struct ath10k_sdio_rx_data *pkt;677struct ath10k_htc_hdr *htc_hdr;678int ret, i;679u32 pkt_offset, virt_pkt_len;680681virt_pkt_len = 0;682for (i = 0; i < ar_sdio->n_rx_pkts; i++)683virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;684685if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {686ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);687ret = -E2BIG;688goto err;689}690691ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,692ar_sdio->vsg_buffer, virt_pkt_len);693if (ret) {694ath10k_warn(ar, "failed to read bundle packets: %d", ret);695goto err;696}697698pkt_offset = 0;699for (i = 0; i < ar_sdio->n_rx_pkts; i++) {700pkt = &ar_sdio->rx_pkts[i];701htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);702pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);703704if (pkt->act_len > pkt->alloc_len) {705ret = -EINVAL;706goto err;707}708709skb_put_data(pkt->skb, htc_hdr, pkt->act_len);710pkt_offset += pkt->alloc_len;711}712713return 0;714715err:716/* Free all packets that was not successfully fetched. */717for (i = 0; i < ar_sdio->n_rx_pkts; i++)718ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);719720ar_sdio->n_rx_pkts = 0;721722return ret;723}724725/* This is the timeout for mailbox processing done in the sdio irq726* handler. The timeout is deliberately set quite high since SDIO dump logs727* over serial port can/will add a substantial overhead to the processing728* (if enabled).729*/730#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)731732static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,733u32 msg_lookahead, bool *done)734{735struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);736u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];737int n_lookaheads = 1;738unsigned long timeout;739int ret;740741*done = true;742743/* Copy the lookahead obtained from the HTC register table into our744* temp array as a start value.745*/746lookaheads[0] = msg_lookahead;747748timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;749do {750/* Try to allocate as many HTC RX packets indicated by751* n_lookaheads.752*/753ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,754n_lookaheads);755if (ret)756break;757758if (ar_sdio->n_rx_pkts >= 2)759/* A recv bundle was detected, force IRQ status760* re-check again.761*/762*done = false;763764if (ar_sdio->n_rx_pkts > 1)765ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);766else767ret = ath10k_sdio_mbox_rx_fetch(ar);768769/* Process fetched packets. This will potentially update770* n_lookaheads depending on if the packets contain lookahead771* reports.772*/773n_lookaheads = 0;774ret = ath10k_sdio_mbox_rx_process_packets(ar,775lookaheads,776&n_lookaheads);777778if (!n_lookaheads || ret)779break;780781/* For SYNCH processing, if we get here, we are running782* through the loop again due to updated lookaheads. Set783* flag that we should re-check IRQ status registers again784* before leaving IRQ processing, this can net better785* performance in high throughput situations.786*/787*done = false;788} while (time_before(jiffies, timeout));789790if (ret && (ret != -ECANCELED))791ath10k_warn(ar, "failed to get pending recv messages: %d\n",792ret);793794return ret;795}796797static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)798{799u32 val;800int ret;801802/* TODO: Add firmware crash handling */803ath10k_warn(ar, "firmware crashed\n");804805/* read counter to clear the interrupt, the debug error interrupt is806* counter 0.807*/808ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);809if (ret)810ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);811812return ret;813}814815static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)816{817struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);818struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;819u8 counter_int_status;820int ret;821822mutex_lock(&irq_data->mtx);823counter_int_status = irq_data->irq_proc_reg->counter_int_status &824irq_data->irq_en_reg->cntr_int_status_en;825826/* NOTE: other modules like GMBOX may use the counter interrupt for827* credit flow control on other counters, we only need to check for828* the debug assertion counter interrupt.829*/830if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)831ret = ath10k_sdio_mbox_proc_dbg_intr(ar);832else833ret = 0;834835mutex_unlock(&irq_data->mtx);836837return ret;838}839840static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)841{842struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);843struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;844u8 error_int_status;845int ret;846847ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");848849error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;850if (!error_int_status) {851ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",852error_int_status);853return -EIO;854}855856ath10k_dbg(ar, ATH10K_DBG_SDIO,857"sdio error_int_status 0x%x\n", error_int_status);858859if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,860error_int_status))861ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");862863if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,864error_int_status))865ath10k_warn(ar, "rx underflow interrupt error\n");866867if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,868error_int_status))869ath10k_warn(ar, "tx overflow interrupt error\n");870871/* Clear the interrupt */872irq_data->irq_proc_reg->error_int_status &= ~error_int_status;873874/* set W1C value to clear the interrupt, this hits the register first */875ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,876error_int_status);877if (ret) {878ath10k_warn(ar, "unable to write to error int status address: %d\n",879ret);880return ret;881}882883return 0;884}885886static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)887{888struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);889struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;890u8 cpu_int_status;891int ret;892893mutex_lock(&irq_data->mtx);894cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &895irq_data->irq_en_reg->cpu_int_status_en;896if (!cpu_int_status) {897ath10k_warn(ar, "CPU interrupt status is zero\n");898ret = -EIO;899goto out;900}901902/* Clear the interrupt */903irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;904905/* Set up the register transfer buffer to hit the register 4 times,906* this is done to make the access 4-byte aligned to mitigate issues907* with host bus interconnects that restrict bus transfer lengths to908* be a multiple of 4-bytes.909*910* Set W1C value to clear the interrupt, this hits the register first.911*/912ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,913cpu_int_status);914if (ret) {915ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",916ret);917goto out;918}919920out:921mutex_unlock(&irq_data->mtx);922if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)923ath10k_sdio_fw_crashed_dump(ar);924925return ret;926}927928static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,929u8 *host_int_status,930u32 *lookahead)931{932struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);933struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;934struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;935struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;936u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);937int ret;938939mutex_lock(&irq_data->mtx);940941*lookahead = 0;942*host_int_status = 0;943944/* int_status_en is supposed to be non zero, otherwise interrupts945* shouldn't be enabled. There is however a short time frame during946* initialization between the irq register and int_status_en init947* where this can happen.948* We silently ignore this condition.949*/950if (!irq_en_reg->int_status_en) {951ret = 0;952goto out;953}954955/* Read the first sizeof(struct ath10k_irq_proc_registers)956* bytes of the HTC register table. This957* will yield us the value of different int status958* registers and the lookahead registers.959*/960ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,961irq_proc_reg, sizeof(*irq_proc_reg));962if (ret) {963ath10k_core_start_recovery(ar);964ath10k_warn(ar, "read int status fail, start recovery\n");965goto out;966}967968/* Update only those registers that are enabled */969*host_int_status = irq_proc_reg->host_int_status &970irq_en_reg->int_status_en;971972/* Look at mbox status */973if (!(*host_int_status & htc_mbox)) {974*lookahead = 0;975ret = 0;976goto out;977}978979/* Mask out pending mbox value, we use look ahead as980* the real flag for mbox processing.981*/982*host_int_status &= ~htc_mbox;983if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {984*lookahead = le32_to_cpu(985irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);986if (!*lookahead)987ath10k_warn(ar, "sdio mbox lookahead is zero\n");988}989990out:991mutex_unlock(&irq_data->mtx);992return ret;993}994995static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,996bool *done)997{998u8 host_int_status;999u32 lookahead;1000int ret;10011002/* NOTE: HIF implementation guarantees that the context of this1003* call allows us to perform SYNCHRONOUS I/O, that is we can block,1004* sleep or call any API that can block or switch thread/task1005* contexts. This is a fully schedulable context.1006*/10071008ret = ath10k_sdio_mbox_read_int_status(ar,1009&host_int_status,1010&lookahead);1011if (ret) {1012*done = true;1013goto out;1014}10151016if (!host_int_status && !lookahead) {1017ret = 0;1018*done = true;1019goto out;1020}10211022if (lookahead) {1023ath10k_dbg(ar, ATH10K_DBG_SDIO,1024"sdio pending mailbox msg lookahead 0x%08x\n",1025lookahead);10261027ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,1028lookahead,1029done);1030if (ret)1031goto out;1032}10331034/* now, handle the rest of the interrupts */1035ath10k_dbg(ar, ATH10K_DBG_SDIO,1036"sdio host_int_status 0x%x\n", host_int_status);10371038if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {1039/* CPU Interrupt */1040ret = ath10k_sdio_mbox_proc_cpu_intr(ar);1041if (ret)1042goto out;1043}10441045if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {1046/* Error Interrupt */1047ret = ath10k_sdio_mbox_proc_err_intr(ar);1048if (ret)1049goto out;1050}10511052if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))1053/* Counter Interrupt */1054ret = ath10k_sdio_mbox_proc_counter_intr(ar);10551056ret = 0;10571058out:1059/* An optimization to bypass reading the IRQ status registers1060* unnecessarily which can re-wake the target, if upper layers1061* determine that we are in a low-throughput mode, we can rely on1062* taking another interrupt rather than re-checking the status1063* registers which can re-wake the target.1064*1065* NOTE : for host interfaces that makes use of detecting pending1066* mbox messages at hif can not use this optimization due to1067* possible side effects, SPI requires the host to drain all1068* messages from the mailbox before exiting the ISR routine.1069*/10701071ath10k_dbg(ar, ATH10K_DBG_SDIO,1072"sdio pending irqs done %d status %d",1073*done, ret);10741075return ret;1076}10771078static void ath10k_sdio_set_mbox_info(struct ath10k *ar)1079{1080struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1081struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;1082u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;10831084mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;1085mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;1086mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;1087mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;1088mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;10891090mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;10911092dev_id_base = (device & 0x0F00);1093dev_id_chiprev = (device & 0x00FF);1094switch (dev_id_base) {1095case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):1096if (dev_id_chiprev < 4)1097mbox_info->ext_info[0].htc_ext_sz =1098ATH10K_HIF_MBOX0_EXT_WIDTH;1099else1100/* from QCA6174 2.0(0x504), the width has been extended1101* to 56K1102*/1103mbox_info->ext_info[0].htc_ext_sz =1104ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;1105break;1106case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):1107mbox_info->ext_info[0].htc_ext_sz =1108ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;1109break;1110default:1111mbox_info->ext_info[0].htc_ext_sz =1112ATH10K_HIF_MBOX0_EXT_WIDTH;1113}11141115mbox_info->ext_info[1].htc_ext_addr =1116mbox_info->ext_info[0].htc_ext_addr +1117mbox_info->ext_info[0].htc_ext_sz +1118ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;1119mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;1120}11211122/* BMI functions */11231124static int ath10k_sdio_bmi_credits(struct ath10k *ar)1125{1126u32 addr, cmd_credits;1127unsigned long timeout;1128int ret;11291130/* Read the counter register to get the command credits */1131addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;1132timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;1133cmd_credits = 0;11341135while (time_before(jiffies, timeout) && !cmd_credits) {1136/* Hit the credit counter with a 4-byte access, the first byte1137* read will hit the counter and cause a decrement, while the1138* remaining 3 bytes has no effect. The rationale behind this1139* is to make all HIF accesses 4-byte aligned.1140*/1141ret = ath10k_sdio_read32(ar, addr, &cmd_credits);1142if (ret) {1143ath10k_warn(ar,1144"unable to decrement the command credit count register: %d\n",1145ret);1146return ret;1147}11481149/* The counter is only 8 bits.1150* Ignore anything in the upper 3 bytes1151*/1152cmd_credits &= 0xFF;1153}11541155if (!cmd_credits) {1156ath10k_warn(ar, "bmi communication timeout\n");1157return -ETIMEDOUT;1158}11591160return 0;1161}11621163static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)1164{1165unsigned long timeout;1166u32 rx_word;1167int ret;11681169timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;1170rx_word = 0;11711172while ((time_before(jiffies, timeout)) && !rx_word) {1173ret = ath10k_sdio_read32(ar,1174MBOX_HOST_INT_STATUS_ADDRESS,1175&rx_word);1176if (ret) {1177ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);1178return ret;1179}11801181/* all we really want is one bit */1182rx_word &= 1;1183}11841185if (!rx_word) {1186ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");1187return -EINVAL;1188}11891190return ret;1191}11921193static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,1194void *req, u32 req_len,1195void *resp, u32 *resp_len)1196{1197struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1198u32 addr;1199int ret;12001201if (req) {1202ret = ath10k_sdio_bmi_credits(ar);1203if (ret)1204return ret;12051206addr = ar_sdio->mbox_info.htc_addr;12071208memcpy(ar_sdio->bmi_buf, req, req_len);1209ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);1210if (ret) {1211ath10k_warn(ar,1212"unable to send the bmi data to the device: %d\n",1213ret);1214return ret;1215}1216}12171218if (!resp || !resp_len)1219/* No response expected */1220return 0;12211222/* During normal bootup, small reads may be required.1223* Rather than issue an HIF Read and then wait as the Target1224* adds successive bytes to the FIFO, we wait here until1225* we know that response data is available.1226*1227* This allows us to cleanly timeout on an unexpected1228* Target failure rather than risk problems at the HIF level.1229* In particular, this avoids SDIO timeouts and possibly garbage1230* data on some host controllers. And on an interconnect1231* such as Compact Flash (as well as some SDIO masters) which1232* does not provide any indication on data timeout, it avoids1233* a potential hang or garbage response.1234*1235* Synchronization is more difficult for reads larger than the1236* size of the MBOX FIFO (128B), because the Target is unable1237* to push the 129th byte of data until AFTER the Host posts an1238* HIF Read and removes some FIFO data. So for large reads the1239* Host proceeds to post an HIF Read BEFORE all the data is1240* actually available to read. Fortunately, large BMI reads do1241* not occur in practice -- they're supported for debug/development.1242*1243* So Host/Target BMI synchronization is divided into these cases:1244* CASE 1: length < 41245* Should not happen1246*1247* CASE 2: 4 <= length <= 1281248* Wait for first 4 bytes to be in FIFO1249* If CONSERVATIVE_BMI_READ is enabled, also wait for1250* a BMI command credit, which indicates that the ENTIRE1251* response is available in the FIFO1252*1253* CASE 3: length > 1281254* Wait for the first 4 bytes to be in FIFO1255*1256* For most uses, a small timeout should be sufficient and we will1257* usually see a response quickly; but there may be some unusual1258* (debug) cases of BMI_EXECUTE where we want an larger timeout.1259* For now, we use an unbounded busy loop while waiting for1260* BMI_EXECUTE.1261*1262* If BMI_EXECUTE ever needs to support longer-latency execution,1263* especially in production, this code needs to be enhanced to sleep1264* and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently1265* a function of Host processor speed.1266*/1267ret = ath10k_sdio_bmi_get_rx_lookahead(ar);1268if (ret)1269return ret;12701271/* We always read from the start of the mbox address */1272addr = ar_sdio->mbox_info.htc_addr;1273ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);1274if (ret) {1275ath10k_warn(ar,1276"unable to read the bmi data from the device: %d\n",1277ret);1278return ret;1279}12801281memcpy(resp, ar_sdio->bmi_buf, *resp_len);12821283return 0;1284}12851286/* sdio async handling functions */12871288static struct ath10k_sdio_bus_request1289*ath10k_sdio_alloc_busreq(struct ath10k *ar)1290{1291struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1292struct ath10k_sdio_bus_request *bus_req;12931294spin_lock_bh(&ar_sdio->lock);12951296if (list_empty(&ar_sdio->bus_req_freeq)) {1297bus_req = NULL;1298goto out;1299}13001301bus_req = list_first_entry(&ar_sdio->bus_req_freeq,1302struct ath10k_sdio_bus_request, list);1303list_del(&bus_req->list);13041305out:1306spin_unlock_bh(&ar_sdio->lock);1307return bus_req;1308}13091310static void ath10k_sdio_free_bus_req(struct ath10k *ar,1311struct ath10k_sdio_bus_request *bus_req)1312{1313struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);13141315memset(bus_req, 0, sizeof(*bus_req));13161317spin_lock_bh(&ar_sdio->lock);1318list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);1319spin_unlock_bh(&ar_sdio->lock);1320}13211322static void __ath10k_sdio_write_async(struct ath10k *ar,1323struct ath10k_sdio_bus_request *req)1324{1325struct ath10k_htc_ep *ep;1326struct sk_buff *skb;1327int ret;13281329skb = req->skb;1330ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);1331if (ret)1332ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",1333req->address, ret);13341335if (req->htc_msg) {1336ep = &ar->htc.endpoint[req->eid];1337ath10k_htc_notify_tx_completion(ep, skb);1338} else if (req->comp) {1339complete(req->comp);1340}13411342ath10k_sdio_free_bus_req(ar, req);1343}13441345/* To improve throughput use workqueue to deliver packets to HTC layer,1346* this way SDIO bus is utilised much better.1347*/1348static void ath10k_rx_indication_async_work(struct work_struct *work)1349{1350struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,1351async_work_rx);1352struct ath10k *ar = ar_sdio->ar;1353struct ath10k_htc_ep *ep;1354struct ath10k_skb_rxcb *cb;1355struct sk_buff *skb;13561357while (true) {1358skb = skb_dequeue(&ar_sdio->rx_head);1359if (!skb)1360break;1361cb = ATH10K_SKB_RXCB(skb);1362ep = &ar->htc.endpoint[cb->eid];1363ep->ep_ops.ep_rx_complete(ar, skb);1364}13651366if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {1367local_bh_disable();1368napi_schedule(&ar->napi);1369local_bh_enable();1370}1371}13721373static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)1374{1375struct ath10k *ar = ar_sdio->ar;1376unsigned char rtc_state = 0;1377int ret = 0;13781379rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);1380if (ret) {1381ath10k_warn(ar, "failed to read rtc state: %d\n", ret);1382return ret;1383}13841385*state = rtc_state & 0x3;13861387return ret;1388}13891390static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)1391{1392struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1393u32 val;1394int retry = ATH10K_CIS_READ_RETRY, ret = 0;1395unsigned char rtc_state = 0;13961397sdio_claim_host(ar_sdio->func);13981399ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);1400if (ret) {1401ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",1402ret);1403goto release;1404}14051406if (enable_sleep) {1407val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;1408ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;1409} else {1410val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;1411ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;1412}14131414ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);1415if (ret) {1416ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",1417ret);1418}14191420if (!enable_sleep) {1421do {1422udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);1423ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);14241425if (ret) {1426ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);1427break;1428}14291430ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",1431rtc_state);14321433if (rtc_state == ATH10K_CIS_RTC_STATE_ON)1434break;14351436udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);1437retry--;1438} while (retry > 0);1439}14401441release:1442sdio_release_host(ar_sdio->func);14431444return ret;1445}14461447static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)1448{1449struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t,1450sleep_timer);14511452ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;1453queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);1454}14551456static void ath10k_sdio_write_async_work(struct work_struct *work)1457{1458struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,1459wr_async_work);1460struct ath10k *ar = ar_sdio->ar;1461struct ath10k_sdio_bus_request *req, *tmp_req;1462struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;14631464spin_lock_bh(&ar_sdio->wr_async_lock);14651466list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {1467list_del(&req->list);1468spin_unlock_bh(&ar_sdio->wr_async_lock);14691470if (req->address >= mbox_info->htc_addr &&1471ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {1472ath10k_sdio_set_mbox_sleep(ar, false);1473mod_timer(&ar_sdio->sleep_timer, jiffies +1474msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));1475}14761477__ath10k_sdio_write_async(ar, req);1478spin_lock_bh(&ar_sdio->wr_async_lock);1479}14801481spin_unlock_bh(&ar_sdio->wr_async_lock);14821483if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)1484ath10k_sdio_set_mbox_sleep(ar, true);1485}14861487static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,1488struct sk_buff *skb,1489struct completion *comp,1490bool htc_msg, enum ath10k_htc_ep_id eid)1491{1492struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1493struct ath10k_sdio_bus_request *bus_req;14941495/* Allocate a bus request for the message and queue it on the1496* SDIO workqueue.1497*/1498bus_req = ath10k_sdio_alloc_busreq(ar);1499if (!bus_req) {1500ath10k_warn(ar,1501"unable to allocate bus request for async request\n");1502return -ENOMEM;1503}15041505bus_req->skb = skb;1506bus_req->eid = eid;1507bus_req->address = addr;1508bus_req->htc_msg = htc_msg;1509bus_req->comp = comp;15101511spin_lock_bh(&ar_sdio->wr_async_lock);1512list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);1513spin_unlock_bh(&ar_sdio->wr_async_lock);15141515return 0;1516}15171518/* IRQ handler */15191520static void ath10k_sdio_irq_handler(struct sdio_func *func)1521{1522struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);1523struct ath10k *ar = ar_sdio->ar;1524unsigned long timeout;1525bool done = false;1526int ret;15271528/* Release the host during interrupts so we can pick it back up when1529* we process commands.1530*/1531sdio_release_host(ar_sdio->func);15321533timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;1534do {1535ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);1536if (ret)1537break;1538} while (time_before(jiffies, timeout) && !done);15391540ath10k_mac_tx_push_pending(ar);15411542sdio_claim_host(ar_sdio->func);15431544if (ret && ret != -ECANCELED)1545ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",1546ret);1547}15481549/* sdio HIF functions */15501551static int ath10k_sdio_disable_intrs(struct ath10k *ar)1552{1553struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1554struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1555struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1556int ret;15571558mutex_lock(&irq_data->mtx);15591560memset(regs, 0, sizeof(*regs));1561ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1562®s->int_status_en, sizeof(*regs));1563if (ret)1564ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);15651566mutex_unlock(&irq_data->mtx);15671568return ret;1569}15701571static int ath10k_sdio_hif_power_up(struct ath10k *ar,1572enum ath10k_firmware_mode fw_mode)1573{1574struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1575struct sdio_func *func = ar_sdio->func;1576int ret;15771578if (!ar_sdio->is_disabled)1579return 0;15801581ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");15821583ret = ath10k_sdio_config(ar);1584if (ret) {1585ath10k_err(ar, "failed to config sdio: %d\n", ret);1586return ret;1587}15881589sdio_claim_host(func);15901591ret = sdio_enable_func(func);1592if (ret) {1593ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);1594sdio_release_host(func);1595return ret;1596}15971598sdio_release_host(func);15991600/* Wait for hardware to initialise. It should take a lot less than1601* 20 ms but let's be conservative here.1602*/1603msleep(20);16041605ar_sdio->is_disabled = false;16061607ret = ath10k_sdio_disable_intrs(ar);1608if (ret)1609return ret;16101611return 0;1612}16131614static void ath10k_sdio_hif_power_down(struct ath10k *ar)1615{1616struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1617int ret;16181619if (ar_sdio->is_disabled)1620return;16211622ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");16231624timer_delete_sync(&ar_sdio->sleep_timer);1625ath10k_sdio_set_mbox_sleep(ar, true);16261627/* Disable the card */1628sdio_claim_host(ar_sdio->func);16291630ret = sdio_disable_func(ar_sdio->func);1631if (ret) {1632ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);1633sdio_release_host(ar_sdio->func);1634return;1635}16361637ret = mmc_hw_reset(ar_sdio->func->card);1638if (ret)1639ath10k_warn(ar, "unable to reset sdio: %d\n", ret);16401641sdio_release_host(ar_sdio->func);16421643ar_sdio->is_disabled = true;1644}16451646static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,1647struct ath10k_hif_sg_item *items, int n_items)1648{1649struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1650enum ath10k_htc_ep_id eid;1651struct sk_buff *skb;1652int ret, i;16531654eid = pipe_id_to_eid(pipe_id);16551656for (i = 0; i < n_items; i++) {1657size_t padded_len;1658u32 address;16591660skb = items[i].transfer_context;1661padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,1662skb->len);1663skb_trim(skb, padded_len);16641665/* Write TX data to the end of the mbox address space */1666address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -1667skb->len;1668ret = ath10k_sdio_prep_async_req(ar, address, skb,1669NULL, true, eid);1670if (ret)1671return ret;1672}16731674queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);16751676return 0;1677}16781679static int ath10k_sdio_enable_intrs(struct ath10k *ar)1680{1681struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1682struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1683struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1684int ret;16851686mutex_lock(&irq_data->mtx);16871688/* Enable all but CPU interrupts */1689regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |1690FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |1691FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);16921693/* NOTE: There are some cases where HIF can do detection of1694* pending mbox messages which is disabled now.1695*/1696regs->int_status_en |=1697FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);16981699/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #01700* #0 is used for report assertion from target1701*/1702regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);17031704/* Set up the Error Interrupt status Register */1705regs->err_int_status_en =1706FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |1707FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);17081709/* Enable Counter interrupt status register to get fatal errors for1710* debugging.1711*/1712regs->cntr_int_status_en =1713FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,1714ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);17151716ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1717®s->int_status_en, sizeof(*regs));1718if (ret)1719ath10k_warn(ar,1720"failed to update mbox interrupt status register : %d\n",1721ret);17221723mutex_unlock(&irq_data->mtx);1724return ret;1725}17261727/* HIF diagnostics */17281729static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,1730size_t buf_len)1731{1732int ret;1733void *mem;17341735mem = kzalloc(buf_len, GFP_KERNEL);1736if (!mem)1737return -ENOMEM;17381739/* set window register to start read cycle */1740ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);1741if (ret) {1742ath10k_warn(ar, "failed to set mbox window read address: %d", ret);1743goto out;1744}17451746/* read the data */1747ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);1748if (ret) {1749ath10k_warn(ar, "failed to read from mbox window data address: %d\n",1750ret);1751goto out;1752}17531754memcpy(buf, mem, buf_len);17551756out:1757kfree(mem);17581759return ret;1760}17611762static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,1763u32 *value)1764{1765__le32 *val;1766int ret;17671768val = kzalloc(sizeof(*val), GFP_KERNEL);1769if (!val)1770return -ENOMEM;17711772ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));1773if (ret)1774goto out;17751776*value = __le32_to_cpu(*val);17771778out:1779kfree(val);17801781return ret;1782}17831784static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,1785const void *data, int nbytes)1786{1787int ret;17881789/* set write data */1790ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);1791if (ret) {1792ath10k_warn(ar,1793"failed to write 0x%p to mbox window data address: %d\n",1794data, ret);1795return ret;1796}17971798/* set window register, which starts the write cycle */1799ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);1800if (ret) {1801ath10k_warn(ar, "failed to set mbox window write address: %d", ret);1802return ret;1803}18041805return 0;1806}18071808static int ath10k_sdio_hif_start_post(struct ath10k *ar)1809{1810struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1811u32 addr, val;1812int ret = 0;18131814addr = host_interest_item_address(HI_ITEM(hi_acs_flags));18151816ret = ath10k_sdio_diag_read32(ar, addr, &val);1817if (ret) {1818ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);1819return ret;1820}18211822if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {1823ath10k_dbg(ar, ATH10K_DBG_SDIO,1824"sdio mailbox swap service enabled\n");1825ar_sdio->swap_mbox = true;1826} else {1827ath10k_dbg(ar, ATH10K_DBG_SDIO,1828"sdio mailbox swap service disabled\n");1829ar_sdio->swap_mbox = false;1830}18311832ath10k_sdio_set_mbox_sleep(ar, true);18331834return 0;1835}18361837static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)1838{1839u32 addr, val;1840int ret;18411842addr = host_interest_item_address(HI_ITEM(hi_acs_flags));18431844ret = ath10k_sdio_diag_read32(ar, addr, &val);1845if (ret) {1846ath10k_warn(ar,1847"unable to read hi_acs_flags for htt tx complete: %d\n", ret);1848return ret;1849}18501851ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);18521853ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",1854ret ? " " : " not ");18551856return ret;1857}18581859/* HIF start/stop */18601861static int ath10k_sdio_hif_start(struct ath10k *ar)1862{1863struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1864int ret;18651866ath10k_core_napi_enable(ar);18671868/* Sleep 20 ms before HIF interrupts are disabled.1869* This will give target plenty of time to process the BMI done1870* request before interrupts are disabled.1871*/1872msleep(20);1873ret = ath10k_sdio_disable_intrs(ar);1874if (ret)1875return ret;18761877/* eid 0 always uses the lower part of the extended mailbox address1878* space (ext_info[0].htc_ext_addr).1879*/1880ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;1881ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;18821883sdio_claim_host(ar_sdio->func);18841885/* Register the isr */1886ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);1887if (ret) {1888ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);1889sdio_release_host(ar_sdio->func);1890return ret;1891}18921893sdio_release_host(ar_sdio->func);18941895ret = ath10k_sdio_enable_intrs(ar);1896if (ret)1897ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);18981899/* Enable sleep and then disable it again */1900ret = ath10k_sdio_set_mbox_sleep(ar, true);1901if (ret)1902return ret;19031904/* Wait for 20ms for the written value to take effect */1905msleep(20);19061907ret = ath10k_sdio_set_mbox_sleep(ar, false);1908if (ret)1909return ret;19101911return 0;1912}19131914#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)19151916static void ath10k_sdio_irq_disable(struct ath10k *ar)1917{1918struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1919struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;1920struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;1921struct sk_buff *skb;1922struct completion irqs_disabled_comp;1923int ret;19241925skb = dev_alloc_skb(sizeof(*regs));1926if (!skb)1927return;19281929mutex_lock(&irq_data->mtx);19301931memset(regs, 0, sizeof(*regs)); /* disable all interrupts */1932memcpy(skb->data, regs, sizeof(*regs));1933skb_put(skb, sizeof(*regs));19341935mutex_unlock(&irq_data->mtx);19361937init_completion(&irqs_disabled_comp);1938ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,1939skb, &irqs_disabled_comp, false, 0);1940if (ret)1941goto out;19421943queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);19441945/* Wait for the completion of the IRQ disable request.1946* If there is a timeout we will try to disable irq's anyway.1947*/1948ret = wait_for_completion_timeout(&irqs_disabled_comp,1949SDIO_IRQ_DISABLE_TIMEOUT_HZ);1950if (!ret)1951ath10k_warn(ar, "sdio irq disable request timed out\n");19521953sdio_claim_host(ar_sdio->func);19541955ret = sdio_release_irq(ar_sdio->func);1956if (ret)1957ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);19581959sdio_release_host(ar_sdio->func);19601961out:1962kfree_skb(skb);1963}19641965static void ath10k_sdio_hif_stop(struct ath10k *ar)1966{1967struct ath10k_sdio_bus_request *req, *tmp_req;1968struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);1969struct sk_buff *skb;19701971ath10k_sdio_irq_disable(ar);19721973cancel_work_sync(&ar_sdio->async_work_rx);19741975while ((skb = skb_dequeue(&ar_sdio->rx_head)))1976dev_kfree_skb_any(skb);19771978cancel_work_sync(&ar_sdio->wr_async_work);19791980spin_lock_bh(&ar_sdio->wr_async_lock);19811982/* Free all bus requests that have not been handled */1983list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {1984struct ath10k_htc_ep *ep;19851986list_del(&req->list);19871988if (req->htc_msg) {1989ep = &ar->htc.endpoint[req->eid];1990ath10k_htc_notify_tx_completion(ep, req->skb);1991} else if (req->skb) {1992kfree_skb(req->skb);1993}1994ath10k_sdio_free_bus_req(ar, req);1995}19961997spin_unlock_bh(&ar_sdio->wr_async_lock);19981999ath10k_core_napi_sync_disable(ar);2000}20012002#ifdef CONFIG_PM20032004static int ath10k_sdio_hif_suspend(struct ath10k *ar)2005{2006return 0;2007}20082009static int ath10k_sdio_hif_resume(struct ath10k *ar)2010{2011switch (ar->state) {2012case ATH10K_STATE_OFF:2013ath10k_dbg(ar, ATH10K_DBG_SDIO,2014"sdio resume configuring sdio\n");20152016/* need to set sdio settings after power is cut from sdio */2017ath10k_sdio_config(ar);2018break;20192020case ATH10K_STATE_ON:2021default:2022break;2023}20242025return 0;2026}2027#endif20282029static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,2030u16 service_id,2031u8 *ul_pipe, u8 *dl_pipe)2032{2033struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);2034struct ath10k_htc *htc = &ar->htc;2035u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;2036enum ath10k_htc_ep_id eid;2037bool ep_found = false;2038int i;20392040/* For sdio, we are interested in the mapping between eid2041* and pipeid rather than service_id to pipe_id.2042* First we find out which eid has been allocated to the2043* service...2044*/2045for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {2046if (htc->endpoint[i].service_id == service_id) {2047eid = htc->endpoint[i].eid;2048ep_found = true;2049break;2050}2051}20522053if (!ep_found)2054return -EINVAL;20552056/* Then we create the simplest mapping possible between pipeid2057* and eid2058*/2059*ul_pipe = *dl_pipe = (u8)eid;20602061/* Normally, HTT will use the upper part of the extended2062* mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl2063* the lower part (ext_info[0].htc_ext_addr).2064* If fw wants swapping of mailbox addresses, the opposite is true.2065*/2066if (ar_sdio->swap_mbox) {2067htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;2068wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;2069htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;2070wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;2071} else {2072htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;2073wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;2074htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;2075wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;2076}20772078switch (service_id) {2079case ATH10K_HTC_SVC_ID_RSVD_CTRL:2080/* HTC ctrl ep mbox address has already been setup in2081* ath10k_sdio_hif_start2082*/2083break;2084case ATH10K_HTC_SVC_ID_WMI_CONTROL:2085ar_sdio->mbox_addr[eid] = wmi_addr;2086ar_sdio->mbox_size[eid] = wmi_mbox_size;2087ath10k_dbg(ar, ATH10K_DBG_SDIO,2088"sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",2089ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);2090break;2091case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:2092ar_sdio->mbox_addr[eid] = htt_addr;2093ar_sdio->mbox_size[eid] = htt_mbox_size;2094ath10k_dbg(ar, ATH10K_DBG_SDIO,2095"sdio htt data mbox_addr 0x%x mbox_size %d\n",2096ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);2097break;2098default:2099ath10k_warn(ar, "unsupported HTC service id: %d\n",2100service_id);2101return -EINVAL;2102}21032104return 0;2105}21062107static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,2108u8 *ul_pipe, u8 *dl_pipe)2109{2110ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");21112112/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our2113* case) == 02114*/2115*ul_pipe = 0;2116*dl_pipe = 0;2117}21182119static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {2120.tx_sg = ath10k_sdio_hif_tx_sg,2121.diag_read = ath10k_sdio_hif_diag_read,2122.diag_write = ath10k_sdio_hif_diag_write_mem,2123.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,2124.start = ath10k_sdio_hif_start,2125.stop = ath10k_sdio_hif_stop,2126.start_post = ath10k_sdio_hif_start_post,2127.get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,2128.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,2129.get_default_pipe = ath10k_sdio_hif_get_default_pipe,2130.power_up = ath10k_sdio_hif_power_up,2131.power_down = ath10k_sdio_hif_power_down,2132#ifdef CONFIG_PM2133.suspend = ath10k_sdio_hif_suspend,2134.resume = ath10k_sdio_hif_resume,2135#endif2136};21372138#ifdef CONFIG_PM_SLEEP21392140/* Empty handlers so that mmc subsystem doesn't remove us entirely during2141* suspend. We instead follow cfg80211 suspend/resume handlers.2142*/2143static int ath10k_sdio_pm_suspend(struct device *device)2144{2145struct sdio_func *func = dev_to_sdio_func(device);2146struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);2147struct ath10k *ar = ar_sdio->ar;2148mmc_pm_flag_t pm_flag, pm_caps;2149int ret;21502151if (!device_may_wakeup(ar->dev))2152return 0;21532154ath10k_sdio_set_mbox_sleep(ar, true);21552156pm_flag = MMC_PM_KEEP_POWER;21572158ret = sdio_set_host_pm_flags(func, pm_flag);2159if (ret) {2160pm_caps = sdio_get_host_pm_caps(func);2161ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",2162pm_flag, pm_caps, ret);2163return ret;2164}21652166return ret;2167}21682169static int ath10k_sdio_pm_resume(struct device *device)2170{2171return 0;2172}21732174static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,2175ath10k_sdio_pm_resume);21762177#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)21782179#else21802181#define ATH10K_SDIO_PM_OPS NULL21822183#endif /* CONFIG_PM_SLEEP */21842185static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)2186{2187struct ath10k *ar = container_of(ctx, struct ath10k, napi);2188int done;21892190done = ath10k_htt_rx_hl_indication(ar, budget);2191ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);21922193if (done < budget)2194napi_complete_done(ctx, done);21952196return done;2197}21982199static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,2200u32 item_offset,2201u32 *val)2202{2203u32 addr;2204int ret;22052206addr = host_interest_item_address(item_offset);22072208ret = ath10k_sdio_diag_read32(ar, addr, val);22092210if (ret)2211ath10k_warn(ar, "unable to read host interest offset %d value\n",2212item_offset);22132214return ret;2215}22162217static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,2218u32 buf_len)2219{2220u32 val;2221int i, ret;22222223for (i = 0; i < buf_len; i += 4) {2224ret = ath10k_sdio_diag_read32(ar, address + i, &val);2225if (ret) {2226ath10k_warn(ar, "unable to read mem %d value\n", address + i);2227break;2228}2229memcpy(buf + i, &val, 4);2230}22312232return ret;2233}22342235static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)2236{2237u32 param;22382239ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), ¶m);22402241ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);22422243return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);2244}22452246static void ath10k_sdio_dump_registers(struct ath10k *ar,2247struct ath10k_fw_crash_data *crash_data,2248bool fast_dump)2249{2250u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};2251int i, ret;2252u32 reg_dump_area;22532254ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),2255®_dump_area);2256if (ret) {2257ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);2258return;2259}22602261if (fast_dump)2262ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,2263sizeof(reg_dump_values));2264else2265ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,2266sizeof(reg_dump_values));22672268if (ret) {2269ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);2270return;2271}22722273ath10k_err(ar, "firmware register dump:\n");2274for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)2275ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",2276i,2277reg_dump_values[i],2278reg_dump_values[i + 1],2279reg_dump_values[i + 2],2280reg_dump_values[i + 3]);22812282if (!crash_data)2283return;22842285for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)2286crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);2287}22882289static int ath10k_sdio_dump_memory_section(struct ath10k *ar,2290const struct ath10k_mem_region *mem_region,2291u8 *buf, size_t buf_len)2292{2293const struct ath10k_mem_section *cur_section, *next_section;2294unsigned int count, section_size, skip_size;2295int ret, i, j;22962297if (!mem_region || !buf)2298return 0;22992300cur_section = &mem_region->section_table.sections[0];23012302if (mem_region->start > cur_section->start) {2303ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",2304mem_region->start, cur_section->start);2305return 0;2306}23072308skip_size = cur_section->start - mem_region->start;23092310/* fill the gap between the first register section and register2311* start address2312*/2313for (i = 0; i < skip_size; i++) {2314*buf = ATH10K_MAGIC_NOT_COPIED;2315buf++;2316}23172318count = 0;2319i = 0;2320for (; cur_section; cur_section = next_section) {2321section_size = cur_section->end - cur_section->start;23222323if (section_size <= 0) {2324ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",2325cur_section->start,2326cur_section->end);2327break;2328}23292330if (++i == mem_region->section_table.size) {2331/* last section */2332next_section = NULL;2333skip_size = 0;2334} else {2335next_section = cur_section + 1;23362337if (cur_section->end > next_section->start) {2338ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",2339next_section->start,2340cur_section->end);2341break;2342}23432344skip_size = next_section->start - cur_section->end;2345}23462347if (buf_len < (skip_size + section_size)) {2348ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);2349break;2350}23512352buf_len -= skip_size + section_size;23532354/* read section to dest memory */2355ret = ath10k_sdio_read_mem(ar, cur_section->start,2356buf, section_size);2357if (ret) {2358ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",2359cur_section->start, ret);2360break;2361}23622363buf += section_size;2364count += section_size;23652366/* fill in the gap between this section and the next */2367for (j = 0; j < skip_size; j++) {2368*buf = ATH10K_MAGIC_NOT_COPIED;2369buf++;2370}23712372count += skip_size;2373}23742375return count;2376}23772378/* if an error happened returns < 0, otherwise the length */2379static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,2380const struct ath10k_mem_region *current_region,2381u8 *buf,2382bool fast_dump)2383{2384int ret;23852386if (current_region->section_table.size > 0)2387/* Copy each section individually. */2388return ath10k_sdio_dump_memory_section(ar,2389current_region,2390buf,2391current_region->len);23922393/* No individual memory sections defined so we can2394* copy the entire memory region.2395*/2396if (fast_dump)2397ret = ath10k_bmi_read_memory(ar,2398current_region->start,2399buf,2400current_region->len);2401else2402ret = ath10k_sdio_read_mem(ar,2403current_region->start,2404buf,2405current_region->len);24062407if (ret) {2408ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",2409current_region->name, ret);2410return ret;2411}24122413return current_region->len;2414}24152416static void ath10k_sdio_dump_memory(struct ath10k *ar,2417struct ath10k_fw_crash_data *crash_data,2418bool fast_dump)2419{2420const struct ath10k_hw_mem_layout *mem_layout;2421const struct ath10k_mem_region *current_region;2422struct ath10k_dump_ram_data_hdr *hdr;2423u32 count;2424size_t buf_len;2425int ret, i;2426u8 *buf;24272428if (!crash_data)2429return;24302431mem_layout = ath10k_coredump_get_mem_layout(ar);2432if (!mem_layout)2433return;24342435current_region = &mem_layout->region_table.regions[0];24362437buf = crash_data->ramdump_buf;2438buf_len = crash_data->ramdump_buf_len;24392440memset(buf, 0, buf_len);24412442for (i = 0; i < mem_layout->region_table.size; i++) {2443count = 0;24442445if (current_region->len > buf_len) {2446ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",2447current_region->name,2448current_region->len,2449buf_len);2450break;2451}24522453/* Reserve space for the header. */2454hdr = (void *)buf;2455buf += sizeof(*hdr);2456buf_len -= sizeof(*hdr);24572458ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,2459fast_dump);2460if (ret >= 0)2461count = ret;24622463hdr->region_type = cpu_to_le32(current_region->type);2464hdr->start = cpu_to_le32(current_region->start);2465hdr->length = cpu_to_le32(count);24662467if (count == 0)2468/* Note: the header remains, just with zero length. */2469break;24702471buf += count;2472buf_len -= count;24732474current_region++;2475}2476}24772478void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)2479{2480struct ath10k_fw_crash_data *crash_data;2481char guid[UUID_STRING_LEN + 1];2482bool fast_dump;24832484fast_dump = ath10k_sdio_is_fast_dump_supported(ar);24852486if (fast_dump)2487ath10k_bmi_start(ar);24882489ar->stats.fw_crash_counter++;24902491ath10k_sdio_disable_intrs(ar);24922493crash_data = ath10k_coredump_new(ar);24942495if (crash_data)2496scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);2497else2498scnprintf(guid, sizeof(guid), "n/a");24992500ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);2501ath10k_print_driver_info(ar);2502ath10k_sdio_dump_registers(ar, crash_data, fast_dump);2503ath10k_sdio_dump_memory(ar, crash_data, fast_dump);25042505ath10k_sdio_enable_intrs(ar);25062507ath10k_core_start_recovery(ar);2508}25092510static int ath10k_sdio_probe(struct sdio_func *func,2511const struct sdio_device_id *id)2512{2513struct ath10k_sdio *ar_sdio;2514struct ath10k *ar;2515enum ath10k_hw_rev hw_rev;2516u32 dev_id_base;2517struct ath10k_bus_params bus_params = {};2518int ret, i;25192520/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.2521* If there will be newer chipsets that does not use the hw reg2522* setup as defined in qca6174_regs and qca6174_values, this2523* assumption is no longer valid and hw_rev must be setup differently2524* depending on chipset.2525*/2526hw_rev = ATH10K_HW_QCA6174;25272528ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,2529hw_rev, &ath10k_sdio_hif_ops);2530if (!ar) {2531dev_err(&func->dev, "failed to allocate core\n");2532return -ENOMEM;2533}25342535netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);25362537ath10k_dbg(ar, ATH10K_DBG_BOOT,2538"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",2539func->num, func->vendor, func->device,2540func->max_blksize, func->cur_blksize);25412542ar_sdio = ath10k_sdio_priv(ar);25432544ar_sdio->irq_data.irq_proc_reg =2545devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),2546GFP_KERNEL);2547if (!ar_sdio->irq_data.irq_proc_reg) {2548ret = -ENOMEM;2549goto err_core_destroy;2550}25512552ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);2553if (!ar_sdio->vsg_buffer) {2554ret = -ENOMEM;2555goto err_core_destroy;2556}25572558ar_sdio->irq_data.irq_en_reg =2559devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),2560GFP_KERNEL);2561if (!ar_sdio->irq_data.irq_en_reg) {2562ret = -ENOMEM;2563goto err_core_destroy;2564}25652566ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);2567if (!ar_sdio->bmi_buf) {2568ret = -ENOMEM;2569goto err_core_destroy;2570}25712572ar_sdio->func = func;2573sdio_set_drvdata(func, ar_sdio);25742575ar_sdio->is_disabled = true;2576ar_sdio->ar = ar;25772578spin_lock_init(&ar_sdio->lock);2579spin_lock_init(&ar_sdio->wr_async_lock);2580mutex_init(&ar_sdio->irq_data.mtx);25812582INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);2583INIT_LIST_HEAD(&ar_sdio->wr_asyncq);25842585INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);2586ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");2587if (!ar_sdio->workqueue) {2588ret = -ENOMEM;2589goto err_core_destroy;2590}25912592for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)2593ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);25942595skb_queue_head_init(&ar_sdio->rx_head);2596INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);25972598dev_id_base = (id->device & 0x0F00);2599if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&2600dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {2601ret = -ENODEV;2602ath10k_err(ar, "unsupported device id %u (0x%x)\n",2603dev_id_base, id->device);2604goto err_free_wq;2605}26062607ar->dev_id = QCA9377_1_0_DEVICE_ID;2608ar->id.vendor = id->vendor;2609ar->id.device = id->device;26102611ath10k_sdio_set_mbox_info(ar);26122613bus_params.dev_type = ATH10K_DEV_TYPE_HL;2614/* TODO: don't know yet how to get chip_id with SDIO */2615bus_params.chip_id = 0;2616bus_params.hl_msdu_ids = true;26172618ar->hw->max_mtu = ETH_DATA_LEN;26192620ret = ath10k_core_register(ar, &bus_params);2621if (ret) {2622ath10k_err(ar, "failed to register driver core: %d\n", ret);2623goto err_free_wq;2624}26252626timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);26272628return 0;26292630err_free_wq:2631destroy_workqueue(ar_sdio->workqueue);2632err_core_destroy:2633ath10k_core_destroy(ar);26342635return ret;2636}26372638static void ath10k_sdio_remove(struct sdio_func *func)2639{2640struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);2641struct ath10k *ar = ar_sdio->ar;26422643ath10k_dbg(ar, ATH10K_DBG_BOOT,2644"sdio removed func %d vendor 0x%x device 0x%x\n",2645func->num, func->vendor, func->device);26462647ath10k_core_unregister(ar);26482649netif_napi_del(&ar->napi);26502651destroy_workqueue(ar_sdio->workqueue);26522653ath10k_core_destroy(ar);2654}26552656static const struct sdio_device_id ath10k_sdio_devices[] = {2657{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},2658{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},2659{},2660};26612662MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);26632664static struct sdio_driver ath10k_sdio_driver = {2665.name = "ath10k_sdio",2666.id_table = ath10k_sdio_devices,2667.probe = ath10k_sdio_probe,2668.remove = ath10k_sdio_remove,2669.drv = {2670.pm = ATH10K_SDIO_PM_OPS,2671},2672};2673module_sdio_driver(ath10k_sdio_driver);26742675MODULE_AUTHOR("Qualcomm Atheros");2676MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");2677MODULE_LICENSE("Dual BSD/GPL");267826792680