diff options
Diffstat (limited to 'drivers')
34 files changed, 540 insertions, 278 deletions
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 4a4573fa7b0f..acf36676e388 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -1105,7 +1105,7 @@ static u16 msb_get_free_block(struct msb_data *msb, int zone) dbg_verbose("result of the free blocks scan: pba %d", pba); if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) { - pr_err("BUG: cant get a free block"); + pr_err("BUG: can't get a free block"); msb->read_only = true; return MS_BLOCK_INVALID; } diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 615a83782e55..e79a0218c492 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -293,7 +293,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev) /* TODO: hidden assumption about nenth beeing always 1 */ sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? - PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + DMA_TO_DEVICE : DMA_FROM_DEVICE); if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) { message("problem in dma_map_sg"); @@ -310,8 +310,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev) } dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? - PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - + DMA_TO_DEVICE : DMA_FROM_DEVICE); return dev->dma_error; } @@ -877,7 +876,7 @@ static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume); MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl); -static struct pci_driver r852_pci_driver = { +static struct pci_driver r592_pci_driver = { .name = DRV_NAME, .id_table = r592_pci_id_tbl, .probe = r592_probe, @@ -885,7 +884,7 @@ static struct pci_driver r852_pci_driver = { .driver.pm = &r592_pm_ops, }; -module_pci_driver(r852_pci_driver); +module_pci_driver(r592_pci_driver); module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index 57145374f6ac..c272453670be 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -279,8 +279,8 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host) if (host->use_dma) { if (1 != tifm_map_sg(sock, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE - : PCI_DMA_TODEVICE)) { + ? DMA_FROM_DEVICE + : DMA_TO_DEVICE)) { host->req->error = -ENOMEM; return host->req->error; } @@ -350,8 +350,8 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host) if (host->use_dma) { tifm_unmap_sg(sock, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE - : PCI_DMA_TODEVICE); + ? DMA_FROM_DEVICE + : DMA_TO_DEVICE); } writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), @@ -607,8 +607,8 @@ static void tifm_ms_remove(struct tifm_dev *sock) if (host->use_dma) tifm_unmap_sg(sock, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); host->req->error = -ETIME; do { diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index ae8b69aee619..6f25c34e4fec 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -15,7 +15,7 @@ config PWRSEQ_EMMC config PWRSEQ_SD8787 tristate "HW reset support for SD8787 BT + Wifi module" - depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO) + depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO || WILC1000_SDIO) help This selects hardware reset support for the SD8787 BT + Wifi module. By default this option is set to n. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 6a15fdf6e5f2..431af5e8be2f 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -98,6 +98,11 @@ static int max_devices; static DEFINE_IDA(mmc_blk_ida); static DEFINE_IDA(mmc_rpmb_ida); +struct mmc_blk_busy_data { + struct mmc_card *card; + u32 status; +}; + /* * There is one mmc_blk_data per slot. */ @@ -456,42 +461,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, - u32 *resp_errs) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - int err = 0; - u32 status; - - do { - bool done = time_after(jiffies, timeout); - - err = __mmc_send_status(card, &status, 5); - if (err) { - dev_err(mmc_dev(card->host), - "error %d requesting status\n", err); - return err; - } - - /* Accumulate any response error bits seen */ - if (resp_errs) - *resp_errs |= status; - - /* - * Timeout if the device never becomes ready for data and never - * leaves the program state. - */ - if (done) { - dev_err(mmc_dev(card->host), - "Card stuck in wrong state! %s status: %#x\n", - __func__, status); - return -ETIMEDOUT; - } - } while (!mmc_ready_for_data(status)); - - return err; -} - static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_blk_ioc_data *idata) { @@ -588,6 +557,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return mmc_sanitize(card, idata->ic.cmd_timeout_ms); mmc_wait_for_req(card->host, &mrq); + memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", @@ -637,14 +607,13 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); - memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Ensure RPMB/R1B command has completed by polling CMD13 * "Send Status". */ - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL); + err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false, + MMC_BUSY_IO); } return err; @@ -1696,7 +1665,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) mmc_blk_send_stop(card, timeout); - err = card_busy_detect(card, timeout, NULL); + err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO); mmc_retune_release(card->host); @@ -1911,28 +1880,48 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } +static int mmc_blk_busy_cb(void *cb_data, bool *busy) +{ + struct mmc_blk_busy_data *data = cb_data; + u32 status = 0; + int err; + + err = mmc_send_status(data->card, &status); + if (err) + return err; + + /* Accumulate response error bits. */ + data->status |= status; + + *busy = !mmc_ready_for_data(status); + return 0; +} + static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); - u32 status = 0; + struct mmc_blk_busy_data cb_data; int err; if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status); + cb_data.card = card; + cb_data.status = 0; + err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb, + &cb_data); /* * Do not assume data transferred correctly if there are any error bits * set. */ - if (status & mmc_blk_stop_err_bits(&mqrq->brq)) { + if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) { mqrq->brq.data.bytes_xfered = 0; err = err ? err : -EIO; } /* Copy the exception bit so it will be seen later on */ - if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT) + if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; return err; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 605f5e8648c1..240c5af793dc 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -936,15 +936,16 @@ int mmc_execute_tuning(struct mmc_card *card) opcode = MMC_SEND_TUNING_BLOCK; err = host->ops->execute_tuning(host, opcode); + if (!err) { + mmc_retune_clear(host); + mmc_retune_enable(host); + return 0; + } - if (err) { + /* Only print error when we don't check for card removal */ + if (!host->detect_change) pr_err("%s: tuning execution failed: %d\n", mmc_hostname(host), err); - } else { - host->retune_now = 0; - host->need_retune = 0; - mmc_retune_enable(host); - } return err; } diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c index 419a368f8402..67557808cada 100644 --- a/drivers/mmc/core/crypto.c +++ b/drivers/mmc/core/crypto.c @@ -31,18 +31,11 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) struct request *req = mmc_queue_req_to_req(mqrq); struct mmc_request *mrq = &mqrq->brq.mrq; - if (!req->crypt_keyslot) + if (!req->crypt_ctx) return; - mrq->crypto_enabled = true; - mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot); - - /* - * For now we assume that all MMC drivers set max_dun_bytes_supported=4, - * which is the limit for CQHCI crypto. So all DUNs should be 32-bit. - */ - WARN_ON_ONCE(req->crypt_ctx->bc_dun[0] > U32_MAX); - - mrq->data_unit_num = req->crypt_ctx->bc_dun[0]; + mrq->crypto_ctx = req->crypt_ctx; + if (req->crypt_keyslot) + mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot); } EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 0475d96047c4..d4683b1d263f 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -96,6 +96,10 @@ void mmc_unregister_host_class(void) class_unregister(&mmc_host_class); } +/** + * mmc_retune_enable() - enter a transfer mode that requires retuning + * @host: host which should retune now + */ void mmc_retune_enable(struct mmc_host *host) { host->can_retune = 1; @@ -127,13 +131,18 @@ void mmc_retune_unpause(struct mmc_host *host) } EXPORT_SYMBOL(mmc_retune_unpause); +/** + * mmc_retune_disable() - exit a transfer mode that requires retuning + * @host: host which should not retune anymore + * + * It is not meant for temporarily preventing retuning! + */ void mmc_retune_disable(struct mmc_host *host) { mmc_retune_unpause(host); host->can_retune = 0; del_timer_sync(&host->retune_timer); - host->retune_now = 0; - host->need_retune = 0; + mmc_retune_clear(host); } void mmc_retune_timer_stop(struct mmc_host *host) diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index ba407617ed23..48c4952512a5 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -21,6 +21,12 @@ int mmc_retune(struct mmc_host *host); void mmc_retune_pause(struct mmc_host *host); void mmc_retune_unpause(struct mmc_host *host); +static inline void mmc_retune_clear(struct mmc_host *host) +{ + host->retune_now = 0; + host->need_retune = 0; +} + static inline void mmc_retune_hold_now(struct mmc_host *host) { host->retune_now = 0; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 973756ed4016..0c54858e89c0 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -435,7 +435,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy) u32 status = 0; int err; - if (host->ops->card_busy) { + if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { *busy = host->ops->card_busy(host); return 0; } @@ -457,6 +457,7 @@ static int mmc_busy_cb(void *cb_data, bool *busy) break; case MMC_BUSY_HPI: case MMC_BUSY_EXTR_SINGLE: + case MMC_BUSY_IO: break; default: err = -EINVAL; @@ -509,6 +510,7 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, return 0; } +EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, bool retry_crc_err, enum mmc_busy_cmd busy_cmd) @@ -521,6 +523,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data); } +EXPORT_SYMBOL_GPL(mmc_poll_for_busy); bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, unsigned int timeout_ms) @@ -956,8 +959,15 @@ void mmc_run_bkops(struct mmc_card *card) */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); - if (err) - pr_warn("%s: Error %d starting bkops\n", + /* + * If the BKOPS timed out, the card is probably still busy in the + * R1_STATE_PRG. Rather than continue to wait, let's try to abort + * it with a HPI command to get back into R1_STATE_TRAN. + */ + if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) + pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); + else if (err) + pr_warn("%s: Error %d running bkops\n", mmc_hostname(card->host), err); mmc_retune_release(card->host); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 41ab4f573a31..ae25ffc2e870 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -15,6 +15,7 @@ enum mmc_busy_cmd { MMC_BUSY_ERASE, MMC_BUSY_HPI, MMC_BUSY_EXTR_SINGLE, + MMC_BUSY_IO, }; struct mmc_host; diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c index 68a826f1c0a1..2e120ad83020 100644 --- a/drivers/mmc/core/pwrseq_sd8787.c +++ b/drivers/mmc/core/pwrseq_sd8787.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/err.h> @@ -27,6 +28,7 @@ struct mmc_pwrseq_sd8787 { struct mmc_pwrseq pwrseq; struct gpio_desc *reset_gpio; struct gpio_desc *pwrdn_gpio; + u32 reset_pwrdwn_delay_ms; }; #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq) @@ -37,7 +39,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host) gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); - msleep(300); + msleep(pwrseq->reset_pwrdwn_delay_ms); gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); } @@ -54,8 +56,12 @@ static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = { .power_off = mmc_pwrseq_sd8787_power_off, }; +static const u32 sd8787_delay_ms = 300; +static const u32 wilc1000_delay_ms = 5; + static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = { - { .compatible = "mmc-pwrseq-sd8787",}, + { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms }, + { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match); @@ -64,11 +70,15 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev) { struct mmc_pwrseq_sd8787 *pwrseq; struct device *dev = &pdev->dev; + const struct of_device_id *match; pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL); if (!pwrseq) return -ENOMEM; + match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node); + pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data; + pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(pwrseq->pwrdn_gpio)) return PTR_ERR(pwrseq->pwrdn_gpio); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index cc3261777637..b15c034b42fb 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -163,7 +163,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work) blk_mq_run_hw_queues(q, true); } -static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) +static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp) { struct scatterlist *sg; @@ -193,33 +193,29 @@ static void mmc_queue_setup_discard(struct request_queue *q, blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } -static unsigned int mmc_get_max_segments(struct mmc_host *host) +static unsigned short mmc_get_max_segments(struct mmc_host *host) { return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : host->max_segs; } -/** - * mmc_init_request() - initialize the MMC-specific per-request data - * @mq: the request queue - * @req: the request - * @gfp: memory allocation policy - */ -static int __mmc_init_request(struct mmc_queue *mq, struct request *req, - gfp_t gfp) +static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx, unsigned int numa_node) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); + struct mmc_queue *mq = set->driver_data; struct mmc_card *card = mq->card; struct mmc_host *host = card->host; - mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp); + mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL); if (!mq_rq->sg) return -ENOMEM; return 0; } -static void mmc_exit_request(struct request_queue *q, struct request *req) +static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); @@ -227,20 +223,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) mq_rq->sg = NULL; } -static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, - unsigned int hctx_idx, unsigned int numa_node) -{ - return __mmc_init_request(set->driver_data, req, GFP_KERNEL); -} - -static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, - unsigned int hctx_idx) -{ - struct mmc_queue *mq = set->driver_data; - - mmc_exit_request(mq->queue, req); -} - static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index b23773583179..a705ba6eff5b 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -330,13 +330,25 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) prev = &this->next; if (ret == -ENOENT) { + if (time_after(jiffies, timeout)) break; - /* warn about unknown tuples */ - pr_warn_ratelimited("%s: queuing unknown" - " CIS tuple 0x%02x (%u bytes)\n", - mmc_hostname(card->host), - tpl_code, tpl_link); + +#define FMT(type) "%s: queuing " type " CIS tuple 0x%02x [%*ph] (%u bytes)\n" + /* + * Tuples in this range are reserved for + * vendors, so don't warn about them + */ + if (tpl_code >= 0x80 && tpl_code <= 0x8f) + pr_debug_ratelimited(FMT("vendor"), + mmc_hostname(card->host), + tpl_code, tpl_link, this->data, + tpl_link); + else + pr_warn_ratelimited(FMT("unknown"), + mmc_hostname(card->host), + tpl_code, tpl_link, this->data, + tpl_link); } /* keep on analyzing tuples */ diff --git a/drivers/mmc/host/cqhci-crypto.h b/drivers/mmc/host/cqhci-crypto.h index 60b58ee0e625..d7fb084f563b 100644 --- a/drivers/mmc/host/cqhci-crypto.h +++ b/drivers/mmc/host/cqhci-crypto.h @@ -22,12 +22,15 @@ int cqhci_crypto_init(struct cqhci_host *host); */ static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq) { - if (!mrq->crypto_enabled) + if (!mrq->crypto_ctx) return 0; + /* We set max_dun_bytes_supported=4, so all DUNs should be 32-bit. */ + WARN_ON_ONCE(mrq->crypto_ctx->bc_dun[0] > U32_MAX); + return CQHCI_CRYPTO_ENABLE_BIT | CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) | - mrq->data_unit_num; + mrq->crypto_ctx->bc_dun[0]; } #else /* CONFIG_MMC_CRYPTO */ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index c3229d8c7041..6578cc64ae9e 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -17,9 +17,11 @@ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/ioport.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/prandom.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> @@ -181,6 +183,9 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot) &host->pending_events); debugfs_create_xul("completed_events", S_IRUSR, root, &host->completed_events); +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); +#endif } #endif /* defined(CONFIG_DEBUG_FS) */ @@ -782,6 +787,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ + memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -1788,6 +1794,68 @@ static const struct mmc_host_ops dw_mci_ops = { .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, }; +#ifdef CONFIG_FAULT_INJECTION +static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) +{ + struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + + if (!host->data_status) + host->data_status = SDMMC_INT_DCRC; + set_bit(EVENT_DATA_ERROR, &host->pending_events); + tasklet_schedule(&host->tasklet); + + spin_unlock_irqrestore(&host->irq_lock, flags); + + return HRTIMER_NORESTART; +} + +static void dw_mci_start_fault_timer(struct dw_mci *host) +{ + struct mmc_data *data = host->data; + + if (!data || data->blocks <= 1) + return; + + if (!should_fail(&host->fail_data_crc, 1)) + return; + + /* + * Try to inject the error at random points during the data transfer. + */ + hrtimer_start(&host->fault_timer, + ms_to_ktime(prandom_u32() % 25), + HRTIMER_MODE_REL); +} + +static void dw_mci_stop_fault_timer(struct dw_mci *host) +{ + hrtimer_cancel(&host->fault_timer); +} + +static void dw_mci_init_fault(struct dw_mci *host) +{ + host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; + + hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + host->fault_timer.function = dw_mci_fault_timer; +} +#else +static void dw_mci_init_fault(struct dw_mci *host) +{ +} + +static void dw_mci_start_fault_timer(struct dw_mci *host) +{ +} + +static void dw_mci_stop_fault_timer(struct dw_mci *host) +{ +} +#endif + static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) __releases(&host->lock) __acquires(&host->lock) @@ -2102,6 +2170,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) break; } + dw_mci_stop_fault_timer(host); host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); err = dw_mci_data_complete(host, data); @@ -2151,6 +2220,7 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) if (mrq->cmd->error && mrq->data) dw_mci_reset(host); + dw_mci_stop_fault_timer(host); host->cmd = NULL; host->data = NULL; @@ -2600,6 +2670,8 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) set_bit(EVENT_CMD_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); + + dw_mci_start_fault_timer(host); } static void dw_mci_handle_cd(struct dw_mci *host) @@ -3223,6 +3295,8 @@ int dw_mci_probe(struct dw_mci *host) spin_lock_init(&host->irq_lock); INIT_LIST_HEAD(&host->queue); + dw_mci_init_fault(host); + /* * Get the host data width - this assumes that HCON has been set with * the correct values. diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index da5923a92e60..ce05d81477d9 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -14,6 +14,8 @@ #include <linux/mmc/core.h> #include <linux/dmaengine.h> #include <linux/reset.h> +#include <linux/fault-inject.h> +#include <linux/hrtimer.h> #include <linux/interrupt.h> enum dw_mci_state { @@ -230,6 +232,11 @@ struct dw_mci { struct timer_list cmd11_timer; struct timer_list cto_timer; struct timer_list dto_timer; + +#ifdef CONFIG_FAULT_INJECTION + struct fault_attr fail_data_crc; + struct hrtimer fault_timer; +#endif }; /* DMA ops for Internal/External DMAC interface */ diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 65c65bb5737f..a1bcde3395a6 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -180,7 +180,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, u8 *cp = host->data->status; unsigned long start = jiffies; - while (1) { + do { int status; unsigned i; @@ -193,16 +193,9 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, return cp[i]; } - if (time_is_before_jiffies(start + timeout)) - break; - - /* If we need long timeouts, we may release the CPU. - * We use jiffies here because we want to have a relation - * between elapsed time and the blocking of the scheduler. - */ - if (time_is_before_jiffies(start + 1)) - schedule(); - } + /* If we need long timeouts, we may release the CPU */ + cond_resched(); + } while (time_is_after_jiffies(start + timeout)); return -ETIMEDOUT; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 984d35055156..3765e2f4ad98 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2126,6 +2126,9 @@ static int mmci_probe(struct amba_device *dev, ret = PTR_ERR(host->rst); goto clk_disable; } + ret = reset_control_deassert(host->rst); + if (ret) + dev_err(mmc_dev(mmc), "failed to de-assert reset\n"); /* Get regulators and the supported OCR mask */ ret = mmc_regulator_get_supply(mmc); diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index bde298887579..6c9d38132f74 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -628,6 +628,7 @@ static int moxart_probe(struct platform_device *pdev) host->dma_chan_tx, host->dma_chan_rx); host->have_dma = true; + memset(&cfg, 0, sizeof(cfg)); cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index 53eded81a53e..0c45e82ff0de 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -42,6 +42,11 @@ struct renesas_sdhi_quirks { const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX]; }; +struct renesas_sdhi_of_data_with_quirks { + const struct renesas_sdhi_of_data *of_data; + const struct renesas_sdhi_quirks *quirks; +}; + struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); @@ -78,6 +83,8 @@ struct renesas_sdhi { container_of((host)->pdata, struct renesas_sdhi, mmc_data) int renesas_sdhi_probe(struct platform_device *pdev, - const struct tmio_mmc_dma_ops *dma_ops); + const struct tmio_mmc_dma_ops *dma_ops, + const struct renesas_sdhi_of_data *of_data, + const struct renesas_sdhi_quirks *quirks); int renesas_sdhi_remove(struct platform_device *pdev); #endif diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index e49ca0f7fe9a..6fc4cf3c9dce 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -305,27 +305,6 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc, #define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f #define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7) -static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = { - { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15, - 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 }, - { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11, - 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 } -}; - -static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = { - { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 }, - { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 } -}; - -static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = { - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10, - 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 } -}; - static inline u32 sd_scc_read32(struct tmio_mmc_host *host, struct renesas_sdhi *priv, int addr) { @@ -895,69 +874,12 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable) renesas_sdhi_sdbuf_width(host, enable ? width : 16); } -static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = { - .hs400_disabled = true, - .hs400_4taps = true, -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_4tap = { - .hs400_4taps = true, - .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { - .hs400_disabled = true, -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = { - .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7), -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = { - .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = { - .hs400_4taps = true, - .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), - .hs400_calib_table = r8a7796_es13_calib_table, -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = { - .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), - .hs400_calib_table = r8a77965_calib_table, -}; - -static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = { - .hs400_calib_table = r8a77990_calib_table, -}; - -/* - * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now. - * So, we want to treat them equally and only have a match for ES1.2 to enforce - * this if there ever will be a way to distinguish ES1.2. - */ -static const struct soc_device_attribute sdhi_quirks_match[] = { - { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, - { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 }, - { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 }, - { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 }, - { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 }, - { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, - { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 }, - { /* Sentinel. */ }, -}; - int renesas_sdhi_probe(struct platform_device *pdev, - const struct tmio_mmc_dma_ops *dma_ops) + const struct tmio_mmc_dma_ops *dma_ops, + const struct renesas_sdhi_of_data *of_data, + const struct renesas_sdhi_quirks *quirks) { struct tmio_mmc_data *mmd = pdev->dev.platform_data; - const struct renesas_sdhi_quirks *quirks = NULL; - const struct renesas_sdhi_of_data *of_data; - const struct soc_device_attribute *attr; struct tmio_mmc_data *mmc_data; struct tmio_mmc_dma *dma_priv; struct tmio_mmc_host *host; @@ -966,12 +888,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, struct resource *res; u16 ver; - of_data = of_device_get_match_data(&pdev->dev); - - attr = soc_device_match(sdhi_quirks_match); - if (attr) - quirks = attr->data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index e8f4863d8f1a..7660f7ea74dd 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -15,6 +15,7 @@ #include <linux/mmc/host.h> #include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> #include <linux/sys_soc.h> @@ -92,7 +93,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { }, }; -static const struct renesas_sdhi_of_data of_rza2_compatible = { +static const struct renesas_sdhi_of_data of_data_rza2 = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY, .tmio_ocr_mask = MMC_VDD_32_33, @@ -107,7 +108,11 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = { .max_segs = 1, }; -static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { +static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = { + .of_data = &of_data_rza2, +}; + +static const struct renesas_sdhi_of_data of_data_rcar_gen3 = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | @@ -122,11 +127,116 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { .max_segs = 1, }; +static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15, + 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 }, + { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11, + 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 } +}; + +static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 }, + { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 } +}; + +static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10, + 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 } +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = { + .hs400_disabled = true, + .hs400_4taps = true, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_4tap = { + .hs400_4taps = true, + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { + .hs400_disabled = true, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = { + .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = { + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = { + .hs400_4taps = true, + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), + .hs400_calib_table = r8a7796_es13_calib_table, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = { + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), + .hs400_calib_table = r8a77965_calib_table, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = { + .hs400_calib_table = r8a77990_calib_table, +}; + +/* + * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now. + * So, we want to treat them equally and only have a match for ES1.2 to enforce + * this if there ever will be a way to distinguish ES1.2. + */ +static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, + { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 }, + { /* Sentinel. */ }, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = { + .of_data = &of_data_rcar_gen3, + .quirks = &sdhi_quirks_bad_taps2367, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = { + .of_data = &of_data_rcar_gen3, + .quirks = &sdhi_quirks_bad_taps1357, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = { + .of_data = &of_data_rcar_gen3, + .quirks = &sdhi_quirks_r8a77965, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_r8a77980_compatible = { + .of_data = &of_data_rcar_gen3, + .quirks = &sdhi_quirks_nohs400, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = { + .of_data = &of_data_rcar_gen3, + .quirks = &sdhi_quirks_r8a77990, +}; + +static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = { + .of_data = &of_data_rcar_gen3, +}; + static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, }, { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, }, - { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, }, { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, }, + { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, }, + { .compatible = "renesas,sdhi-r8a77980", .data = &of_r8a77980_compatible, }, + { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, }, { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, {}, }; @@ -405,16 +515,27 @@ static const struct soc_device_attribute soc_dma_quirks[] = { static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); + const struct soc_device_attribute *attr; + const struct renesas_sdhi_of_data_with_quirks *of_data_quirks; + const struct renesas_sdhi_quirks *quirks; struct device *dev = &pdev->dev; - if (soc) - global_flags |= (unsigned long)soc->data; + of_data_quirks = of_device_get_match_data(&pdev->dev); + quirks = of_data_quirks->quirks; + + attr = soc_device_match(soc_dma_quirks); + if (attr) + global_flags |= (unsigned long)attr->data; + + attr = soc_device_match(sdhi_quirks_match); + if (attr) + quirks = attr->data; /* value is max of SD_SECCNT. Confirmed by HW engineers */ dma_set_max_seg_size(dev, 0xffffffff); - return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops, + of_data_quirks->of_data, quirks); } static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = { diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index ffa64211f4de..99e3426df702 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -108,9 +108,9 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) renesas_sdhi_sys_dmac_enable_dma(host, false); if (host->chan_rx) - dmaengine_terminate_all(host->chan_rx); + dmaengine_terminate_sync(host->chan_rx); if (host->chan_tx) - dmaengine_terminate_all(host->chan_tx); + dmaengine_terminate_sync(host->chan_tx); renesas_sdhi_sys_dmac_enable_dma(host, true); } @@ -451,7 +451,8 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) { - return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); + return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops, + of_device_get_match_data(&pdev->dev), NULL); } static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 4ca937415734..58cfaffa3c2d 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -542,9 +542,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host, return 0; } +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; + int err; if (host->sg_count < 0) { data->error = host->sg_count; @@ -553,22 +566,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) return data->error; } - if (data->flags & MMC_DATA_READ) - return sd_read_long_data(host, mrq); + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); - return sd_write_long_data(host, mrq); -} + err = sd_read_long_data(host, mrq); -static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); -} + if (host->initial_mode) + sd_enable_initial_mode(host); -static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); + return err; + } + + return sd_write_long_data(host, mrq); } static void sd_normal_rw(struct realtek_pci_sdmmc *host, diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 72c0bf0c1887..f18d169bc8ff 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -24,7 +24,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> -#include <linux/platform_data/mmc-esdhc-imx.h> #include <linux/pm_runtime.h> #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" @@ -95,6 +94,11 @@ #define ESDHC_VEND_SPEC2 0xc8 #define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8) +#define ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN (1 << 4) +#define ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN (0 << 4) +#define ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN (2 << 4) +#define ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN (1 << 6) +#define ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK (7 << 4) #define ESDHC_TUNING_CTRL 0xcc #define ESDHC_STD_TUNING_EN (1 << 24) @@ -115,6 +119,7 @@ #define ESDHC_CTRL_4BITBUS (0x1 << 1) #define ESDHC_CTRL_8BITBUS (0x2 << 1) #define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) +#define USDHC_GET_BUSWIDTH(c) (c & ESDHC_CTRL_BUSWIDTH_MASK) /* * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC: @@ -191,6 +196,38 @@ */ #define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16) +enum wp_types { + ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ + ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ + ESDHC_WP_GPIO, /* external gpio pin for WP */ +}; + +enum cd_types { + ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ + ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ + ESDHC_CD_GPIO, /* external gpio pin for CD */ + ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ +}; + +/* + * struct esdhc_platform_data - platform data for esdhc on i.MX + * + * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. + * + * @wp_type: type of write_protect method (see wp_types enum above) + * @cd_type: type of card_detect method (see cd_types enum above) + */ + +struct esdhc_platform_data { + enum wp_types wp_type; + enum cd_types cd_type; + int max_bus_width; + unsigned int delay_line; + unsigned int tuning_step; /* The delay cell steps in tuning procedure */ + unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ + unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ +}; + struct esdhc_soc_data { u32 flags; }; @@ -376,6 +413,30 @@ static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host) dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__); } +/* Enable the auto tuning circuit to check the CMD line and BUS line */ +static inline void usdhc_auto_tuning_mode_sel(struct sdhci_host *host) +{ + u32 buswidth, auto_tune_buswidth; + + buswidth = USDHC_GET_BUSWIDTH(readl(host->ioaddr + SDHCI_HOST_CONTROL)); + + switch (buswidth) { + case ESDHC_CTRL_8BITBUS: + auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_8BIT_EN; + break; + case ESDHC_CTRL_4BITBUS: + auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_4BIT_EN; + break; + default: /* 1BITBUS */ + auto_tune_buswidth = ESDHC_VEND_SPEC2_AUTO_TUNE_1BIT_EN; + break; + } + + esdhc_clrset_le(host, ESDHC_VEND_SPEC2_AUTO_TUNE_MODE_MASK, + auto_tune_buswidth | ESDHC_VEND_SPEC2_AUTO_TUNE_CMD_EN, + ESDHC_VEND_SPEC2); +} + static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -597,17 +658,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) else new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { - new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); - if (val & SDHCI_CTRL_TUNED_CLK) { - new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; - new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; - } else { - new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; - new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; - } - writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); - } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); if (val & SDHCI_CTRL_TUNED_CLK) { @@ -622,6 +673,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) v |= ESDHC_MIX_CTRL_EXE_TUNE; m |= ESDHC_MIX_CTRL_FBCLK_SEL; m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; + usdhc_auto_tuning_mode_sel(host); } else { v &= ~ESDHC_MIX_CTRL_EXE_TUNE; } @@ -991,6 +1043,8 @@ static void esdhc_post_tuning(struct sdhci_host *host) { u32 reg; + usdhc_auto_tuning_mode_sel(host); + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 290a14cdc1cf..50c71e0ba5e4 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2714,6 +2714,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + /* Set the timeout value to max possible */ + host->max_timeout_count = 0xF; + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 0e7c07ed9690..737e2bfdedc2 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -159,6 +159,12 @@ struct sdhci_arasan_data { /* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the * internal clock even when the clock isn't stable */ #define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1) +/* + * Some of the Arasan variations might not have timing requirements + * met at 25MHz for Default Speed mode, those controllers work at + * 19MHz instead + */ +#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2) }; struct sdhci_arasan_of_data { @@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) * through low speeds without power cycling. */ sdhci_set_clock(host, host->max_clk); - phy_power_on(sdhci_arasan->phy); + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + sdhci_arasan->is_phy_on = true; /* @@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_arasan->is_phy_on = false; } + if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) { + /* + * Some of the Arasan variations might not have timing + * requirements met at 25MHz for Default Speed mode, + * those controllers work at 19MHz instead. + */ + if (clock == DEFAULT_SPEED_MAX_DTR) + clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25; + } + /* Set the Input and Output Clock Phase Delays */ if (clk_data->set_clk_delays) clk_data->set_clk_delays(host); @@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) msleep(20); if (ctrl_phy) { - phy_power_on(sdhci_arasan->phy); + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + sdhci_arasan->is_phy_on = true; } } @@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev) ret = phy_power_off(sdhci_arasan->phy); if (ret) { dev_err(dev, "Cannot power off phy.\n"); - sdhci_resume_host(host); + if (sdhci_resume_host(host)) + dev_err(dev, "Cannot resume host.\n"); + return ret; } sdhci_arasan->is_phy_on = false; @@ -878,6 +906,10 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode) NODE_SD_1; int err; + /* ZynqMP SD controller does not perform auto tuning in DDR50 mode */ + if (mmc->ios.timing == MMC_TIMING_UHS_DDR50) + return 0; + arasan_zynqmp_dll_reset(host, device_id); err = sdhci_execute_tuning(mmc, opcode); @@ -952,7 +984,7 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host) struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); const struct sdhci_arasan_soc_ctl_map *soc_ctl_map = sdhci_arasan->soc_ctl_map; - u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000); + u32 mhz = DIV_ROUND_CLOSEST_ULL(clk_get_rate(pltfm_host->clk), 1000000); /* Having a map is optional */ if (!soc_ctl_map) @@ -986,14 +1018,16 @@ static void arasan_dt_read_clk_phase(struct device *dev, { struct device_node *np = dev->of_node; - int clk_phase[2] = {0}; + u32 clk_phase[2] = {0}; + int ret; /* * Read Tap Delay values from DT, if the DT does not contain the * Tap Values then use the pre-defined values. */ - if (of_property_read_variable_u32_array(np, prop, &clk_phase[0], - 2, 0)) { + ret = of_property_read_variable_u32_array(np, prop, &clk_phase[0], + 2, 0); + if (ret < 0) { dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n", prop, clk_data->clk_phase_in[timing], clk_data->clk_phase_out[timing]); @@ -1608,6 +1642,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) { host->mmc_host_ops.execute_tuning = arasan_zynqmp_execute_tuning; + + sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN; + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; } arasan_dt_parse_clk_phases(dev, &sdhci_arasan->clk_data); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index aba6e10b8605..8eefa7d5fe85 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -934,21 +934,21 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, /* * If the host controller provides us with an incorrect timeout - * value, just skip the check and use 0xE. The hardware may take + * value, just skip the check and use the maximum. The hardware may take * longer to time out, but that's much better than having a too-short * timeout value. */ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) - return 0xE; + return host->max_timeout_count; /* Unspecified command, asume max */ if (cmd == NULL) - return 0xE; + return host->max_timeout_count; data = cmd->data; /* Unspecified timeout, assume max */ if (!data && !cmd->busy_timeout) - return 0xE; + return host->max_timeout_count; /* timeout in us */ target_timeout = sdhci_target_timeout(host, cmd, data); @@ -968,15 +968,15 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, while (current_timeout < target_timeout) { count++; current_timeout <<= 1; - if (count >= 0xF) + if (count > host->max_timeout_count) break; } - if (count >= 0xF) { + if (count > host->max_timeout_count) { if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) DBG("Too large timeout 0x%x requested for CMD%d!\n", count, cmd->opcode); - count = 0xE; + count = host->max_timeout_count; } else { *too_big = false; } @@ -1222,6 +1222,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host, if (!host->mapbase) return -EINVAL; + memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = host->mapbase + SDHCI_BUFFER; cfg.dst_addr = host->mapbase + SDHCI_BUFFER; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -3278,8 +3279,14 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { u32 command; - /* CMD19 generates _only_ Buffer Read Ready interrupt */ - if (intmask & SDHCI_INT_DATA_AVAIL) { + /* + * CMD19 generates _only_ Buffer Read Ready interrupt if + * use sdhci_send_tuning. + * Need to exclude this case: PIO mode and use mmc_send_tuning, + * If not, sdhci_transfer_pio will never be called, make the + * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. + */ + if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); if (command == MMC_SEND_TUNING_BLOCK || command == MMC_SEND_TUNING_BLOCK_HS200) { @@ -3940,6 +3947,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, */ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + host->max_timeout_count = 0xE; + return host; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 074dc182b184..e8d04e42a5af 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -517,6 +517,7 @@ struct sdhci_host { unsigned int max_clk; /* Max possible freq (MHz) */ unsigned int timeout_clk; /* Timeout freq (KHz) */ + u8 max_timeout_count; /* Vendor specific max timeout count */ unsigned int clk_mul; /* Clock Muliplier value */ unsigned int clock; /* Current clock (MHz) */ diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index e5e457037235..bcc595c70a9f 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1164,9 +1164,9 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) data->bytes_xfered = 0; /* Abort DMA */ if (data->flags & MMC_DATA_READ) - dmaengine_terminate_all(host->chan_rx); + dmaengine_terminate_sync(host->chan_rx); else - dmaengine_terminate_all(host->chan_tx); + dmaengine_terminate_sync(host->chan_tx); } return false; diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 9fdf7ea06e3f..63917070b1a7 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -669,8 +669,8 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, r_data->flags & MMC_DATA_WRITE - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE)) { + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE)) { pr_err("%s : scatterlist map failed\n", dev_name(&sock->dev)); mrq->cmd->error = -ENOMEM; @@ -680,15 +680,15 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) r_data->sg_len, r_data->flags & MMC_DATA_WRITE - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); if (host->sg_len < 1) { pr_err("%s : scatterlist map failed\n", dev_name(&sock->dev)); tifm_unmap_sg(sock, &host->bounce_buf, 1, r_data->flags & MMC_DATA_WRITE - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); mrq->cmd->error = -ENOMEM; goto err_out; } @@ -762,10 +762,10 @@ static void tifm_sd_end_cmd(struct tasklet_struct *t) } else { tifm_unmap_sg(sock, &host->bounce_buf, 1, (r_data->flags & MMC_DATA_WRITE) - ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, (r_data->flags & MMC_DATA_WRITE) - ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } r_data->bytes_xfered = r_data->blocks diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b9b79b1089a0..99515be6e5e5 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -631,9 +631,9 @@ static void usdhi6_dma_kill(struct usdhi6_host *host) __func__, data->sg_len, data->blocks, data->blksz); /* Abort DMA */ if (data->flags & MMC_DATA_READ) - dmaengine_terminate_all(host->chan_rx); + dmaengine_terminate_sync(host->chan_rx); else - dmaengine_terminate_all(host->chan_tx); + dmaengine_terminate_sync(host->chan_tx); } static void usdhi6_dma_check_error(struct usdhi6_host *host) @@ -1186,6 +1186,15 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } +static int usdhi6_card_busy(struct mmc_host *mmc) +{ + struct usdhi6_host *host = mmc_priv(mmc); + u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2); + + /* Card is busy if it is pulling dat[0] low */ + return !(tmp & USDHI6_SD_INFO2_SDDAT0); +} + static const struct mmc_host_ops usdhi6_ops = { .request = usdhi6_request, .set_ios = usdhi6_set_ios, @@ -1193,6 +1202,7 @@ static const struct mmc_host_ops usdhi6_ops = { .get_ro = usdhi6_get_ro, .enable_sdio_irq = usdhi6_enable_sdio_irq, .start_signal_voltage_switch = usdhi6_sig_volt_switch, + .card_busy = usdhi6_card_busy, }; /* State machine handlers */ diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index c32df5530b94..88662a90ed96 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -491,7 +491,7 @@ static void via_sdc_preparedata(struct via_crdr_mmc_host *host, count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ((data->flags & MMC_DATA_READ) ? - PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); + DMA_FROM_DEVICE : DMA_TO_DEVICE)); BUG_ON(count != 1); via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg), @@ -638,7 +638,7 @@ static void via_sdc_finish_data(struct via_crdr_mmc_host *host) dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ((data->flags & MMC_DATA_READ) ? - PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); + DMA_FROM_DEVICE : DMA_TO_DEVICE)); if (data->stop) via_sdc_send_command(host, data->stop); |