*
* Copyright 2002 Hewlett-Packard Company
* Copyright 2005-2008 Pierre Ossman
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Use consistent with the GNU GPL is permitted,
* provided that this copyright notice is
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
int ret = 0;
+ if (host->en_periodic_cflush && host->flush_timeout &&
+ !host->cache_flush_needed) {
+ blk_end_request(req, 0, 0);
+ return 0;
+ }
+
ret = mmc_flush_cache(card);
if (ret)
ret = -EIO;
#endif
blk_end_request_all(req, ret);
+ if (host->en_periodic_cflush && host->flush_timeout && !ret) {
+ host->cache_flush_needed = false;
+ mod_timer(&host->flush_timer, jiffies +
+ msecs_to_jiffies(host->flush_timeout));
+ }
return ret ? 0 : 1;
}
BUG_ON(!card);
host = card->host;
BUG_ON(!host);
+
+ if (host->en_periodic_cflush && host->flush_timeout &&
+ !host->cache_flush_needed) {
+ blk_end_request(req, 0, 0);
+ return 0;
+ }
+
BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
return err;
err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
+ if (host->en_periodic_cflush && host->flush_timeout && !err) {
+ host->cache_flush_needed = false;
+ mod_timer(&host->flush_timer, jiffies +
+ msecs_to_jiffies(host->flush_timeout));
+ }
return err;
}
EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ card->host->cache_flush_needed = true;
}
if (card->cmdq_init) {
return mmc_execute_tuning(card);
}
+static void cache_flush_handler(unsigned long data)
+{
+ struct mmc_host *host = (struct mmc_host *)data;
+
+ host->cache_flush_needed = true;
+}
+
/*
* Handle the detection and initialisation of a card.
*
}
}
+ if (!oldcard && card->ext_csd.cache_ctrl &&
+ (host->caps2 & MMC_CAP2_PERIODIC_CACHE_FLUSH)) {
+ host->cache_flush_needed = true;
+ host->en_periodic_cflush = true;
+ setup_timer(&host->flush_timer, cache_flush_handler,
+ (unsigned long)host);
+ pr_info("%s: periodic cache flush enabled\n",
+ mmc_hostname(host));
+ }
return 0;
free_card:
if (err)
goto out;
+ if (host->card->ext_csd.cache_ctrl && host->en_periodic_cflush)
+ del_timer(&host->flush_timer);
+
if (mmc_can_poweroff_notify(host->card) &&
((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
err = mmc_poweroff_notify(host->card, notify_type);
mmc_card_clr_suspended(host->card);
}
+ if (host->card->ext_csd.cache_ctrl && host->en_periodic_cflush)
+ mod_timer(&host->flush_timer, host->flush_timeout);
+
out:
mmc_release_host(host);
return err;
#define MMC_CAP2_NO_SLEEP_CMD (1 << 20) /* cannot support sleep mode */
#define MMC_CAP2_HW_CQ (1 << 23) /* support eMMC command queue */
#define MMC_CAP2_CMDQ_QBR (1 << 24) /* CMDQ Queue barrier supported */
+#define MMC_CAP2_PERIODIC_CACHE_FLUSH (1 << 26)
mmc_pm_flag_t pm_caps; /* supported pm features */
int num_funcs;
} embedded_sdio_data;
#endif
-
+ bool cache_flush_needed;
+ bool en_periodic_cflush;
+ unsigned int flush_timeout;
+ struct timer_list flush_timer;
unsigned long private[0] ____cacheline_aligned;
};