Created
July 25, 2023 15:36
-
-
Save pdp7/8d85d736dea24957c017eefdeb882668 to your computer and use it in GitHub Desktop.
drivers/mmc changes in Linux_SDK_V0.9.5
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
commit de23366effeb0e701cb856485d7b2c86da92ce87 | |
Author: Robert Nelson <robertcnelson@gmail.com> | |
Date: Mon Oct 3 20:49:36 2022 -0500 | |
thead: Linux_SDK_V0.9.5 | |
Signed-off-by: Robert Nelson <robertcnelson@gmail.com> | |
--- | |
drivers/mmc/core/Kconfig | 8 + | |
drivers/mmc/core/Makefile | 1 + | |
drivers/mmc/core/block.c | 11 + | |
drivers/mmc/core/core.c | 6 + | |
drivers/mmc/core/core.h | 4 +- | |
drivers/mmc/core/crypto.c | 41 +++ | |
drivers/mmc/core/crypto.h | 40 +++ | |
drivers/mmc/core/host.c | 1 + | |
drivers/mmc/core/mmc.c | 20 +- | |
drivers/mmc/core/queue.c | 4 + | |
drivers/mmc/core/sd.c | 6 + | |
drivers/mmc/core/slot-gpio.c | 7 + | |
drivers/mmc/host/Kconfig | 12 + | |
drivers/mmc/host/Makefile | 3 + | |
drivers/mmc/host/{cqhci.c => cqhci-core.c} | 69 +++- | |
drivers/mmc/host/cqhci-crypto.c | 244 +++++++++++++ | |
drivers/mmc/host/cqhci-crypto.h | 50 +++ | |
drivers/mmc/host/cqhci.h | 84 ++++- | |
drivers/mmc/host/sdhci-msm.c | 276 +++++++++++++- | |
drivers/mmc/host/sdhci-of-dwcmshc.c | 332 ++++++++++++++++- | |
drivers/mmc/host/sdhci-of-dwcmshc.h | 75 ++++ | |
drivers/mmc/host/sdhci-of-light-mpw.c | 558 +++++++++++++++++++++++++++++ | |
drivers/mmc/host/sdhci.c | 33 +- | |
drivers/mmc/host/sdhci.h | 9 +- | |
24 files changed, 1845 insertions(+), 49 deletions(-) | |
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig | |
index c12fe13e4b14..ae8b69aee619 100644 | |
--- a/drivers/mmc/core/Kconfig | |
+++ b/drivers/mmc/core/Kconfig | |
@@ -81,3 +81,11 @@ config MMC_TEST | |
This driver is only of interest to those developing or | |
testing a host driver. Most people should say N here. | |
+config MMC_CRYPTO | |
+ bool "MMC Crypto Engine Support" | |
+ depends on BLK_INLINE_ENCRYPTION | |
+ help | |
+ Enable Crypto Engine Support in MMC. | |
+ Enabling this makes it possible for the kernel to use the crypto | |
+ capabilities of the MMC device (if present) to perform crypto | |
+ operations on data being transferred to/from the device. | |
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile | |
index 95ffe008ebdf..6a907736cd7a 100644 | |
--- a/drivers/mmc/core/Makefile | |
+++ b/drivers/mmc/core/Makefile | |
@@ -18,3 +18,4 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o | |
mmc_block-objs := block.o queue.o | |
obj-$(CONFIG_MMC_TEST) += mmc_test.o | |
obj-$(CONFIG_SDIO_UART) += sdio_uart.o | |
+mmc_core-$(CONFIG_MMC_CRYPTO) += crypto.o | |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c | |
index 99b981a05b6c..14dddf05f0ac 100644 | |
--- a/drivers/mmc/core/block.c | |
+++ b/drivers/mmc/core/block.c | |
@@ -47,10 +47,13 @@ | |
#include <linux/uaccess.h> | |
+#include <trace/hooks/mmc_core.h> | |
+ | |
#include "queue.h" | |
#include "block.h" | |
#include "core.h" | |
#include "card.h" | |
+#include "crypto.h" | |
#include "host.h" | |
#include "bus.h" | |
#include "mmc_ops.h" | |
@@ -961,6 +964,11 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, | |
struct mmc_blk_data *main_md = | |
dev_get_drvdata(&host->card->dev); | |
int part_err; | |
+ bool allow = true; | |
+ | |
+ trace_android_vh_mmc_blk_reset(host, err, &allow); | |
+ if (!allow) | |
+ return -ENODEV; | |
main_md->part_curr = main_md->part_type; | |
part_err = mmc_blk_part_switch(host->card, md->part_type); | |
@@ -1266,6 +1274,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, | |
memset(brq, 0, sizeof(struct mmc_blk_request)); | |
+ mmc_crypto_prepare_req(mqrq); | |
+ | |
brq->mrq.data = &brq->data; | |
brq->mrq.tag = req->tag; | |
@@ -1799,6 +1809,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) | |
err && mmc_blk_reset(md, card->host, type)) { | |
pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); | |
mqrq->retries = MMC_NO_RETRIES; | |
+ trace_android_vh_mmc_blk_mq_rw_recovery(card); | |
return; | |
} | |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c | |
index eb82f6aac951..bfa73b045769 100644 | |
--- a/drivers/mmc/core/core.c | |
+++ b/drivers/mmc/core/core.c | |
@@ -37,6 +37,7 @@ | |
#include "core.h" | |
#include "card.h" | |
+#include "crypto.h" | |
#include "bus.h" | |
#include "host.h" | |
#include "sdio_bus.h" | |
@@ -916,6 +917,7 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz) | |
host->ios.clock = hz; | |
mmc_set_ios(host); | |
} | |
+EXPORT_SYMBOL_GPL(mmc_set_clock); | |
int mmc_execute_tuning(struct mmc_card *card) | |
{ | |
@@ -995,7 +997,10 @@ void mmc_set_initial_state(struct mmc_host *host) | |
host->ops->hs400_enhanced_strobe(host, &host->ios); | |
mmc_set_ios(host); | |
+ | |
+ mmc_crypto_set_initial_state(host); | |
} | |
+EXPORT_SYMBOL_GPL(mmc_set_initial_state); | |
/** | |
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number | |
@@ -1259,6 +1264,7 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) | |
host->ios.timing = timing; | |
mmc_set_ios(host); | |
} | |
+EXPORT_SYMBOL_GPL(mmc_set_timing); | |
/* | |
* Select appropriate driver type for host. | |
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h | |
index a6c814fdbf0a..dffd9e8d8770 100644 | |
--- a/drivers/mmc/core/core.h | |
+++ b/drivers/mmc/core/core.h | |
@@ -30,6 +30,8 @@ struct mmc_bus_ops { | |
int (*hw_reset)(struct mmc_host *); | |
int (*sw_reset)(struct mmc_host *); | |
bool (*cache_enabled)(struct mmc_host *); | |
+ | |
+ ANDROID_VENDOR_DATA_ARRAY(1, 2); | |
}; | |
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); | |
@@ -41,7 +43,7 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host, | |
void mmc_init_erase(struct mmc_card *card); | |
void mmc_set_chip_select(struct mmc_host *host, int mode); | |
-void mmc_set_clock(struct mmc_host *host, unsigned int hz); | |
+extern void mmc_set_clock(struct mmc_host *host, unsigned int hz); | |
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); | |
void mmc_set_bus_width(struct mmc_host *host, unsigned int width); | |
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); | |
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c | |
new file mode 100644 | |
index 000000000000..67557808cada | |
--- /dev/null | |
+++ b/drivers/mmc/core/crypto.c | |
@@ -0,0 +1,41 @@ | |
+// SPDX-License-Identifier: GPL-2.0-only | |
+/* | |
+ * MMC crypto engine (inline encryption) support | |
+ * | |
+ * Copyright 2020 Google LLC | |
+ */ | |
+ | |
+#include <linux/blk-crypto.h> | |
+#include <linux/mmc/host.h> | |
+ | |
+#include "core.h" | |
+#include "crypto.h" | |
+#include "queue.h" | |
+ | |
+void mmc_crypto_set_initial_state(struct mmc_host *host) | |
+{ | |
+ /* Reset might clear all keys, so reprogram all the keys. */ | |
+ if (host->caps2 & MMC_CAP2_CRYPTO) | |
+ blk_ksm_reprogram_all_keys(&host->ksm); | |
+} | |
+ | |
+void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host) | |
+{ | |
+ if (host->caps2 & MMC_CAP2_CRYPTO) | |
+ blk_ksm_register(&host->ksm, q); | |
+} | |
+EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue); | |
+ | |
+void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) | |
+{ | |
+ struct request *req = mmc_queue_req_to_req(mqrq); | |
+ struct mmc_request *mrq = &mqrq->brq.mrq; | |
+ | |
+ if (!req->crypt_ctx) | |
+ return; | |
+ | |
+ mrq->crypto_ctx = req->crypt_ctx; | |
+ if (req->crypt_keyslot) | |
+ mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot); | |
+} | |
+EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req); | |
diff --git a/drivers/mmc/core/crypto.h b/drivers/mmc/core/crypto.h | |
new file mode 100644 | |
index 000000000000..fbe9a520bf90 | |
--- /dev/null | |
+++ b/drivers/mmc/core/crypto.h | |
@@ -0,0 +1,40 @@ | |
+/* SPDX-License-Identifier: GPL-2.0-only */ | |
+/* | |
+ * MMC crypto engine (inline encryption) support | |
+ * | |
+ * Copyright 2020 Google LLC | |
+ */ | |
+ | |
+#ifndef _MMC_CORE_CRYPTO_H | |
+#define _MMC_CORE_CRYPTO_H | |
+ | |
+struct mmc_host; | |
+struct mmc_queue_req; | |
+struct request_queue; | |
+ | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ | |
+void mmc_crypto_set_initial_state(struct mmc_host *host); | |
+ | |
+void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host); | |
+ | |
+void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq); | |
+ | |
+#else /* CONFIG_MMC_CRYPTO */ | |
+ | |
+static inline void mmc_crypto_set_initial_state(struct mmc_host *host) | |
+{ | |
+} | |
+ | |
+static inline void mmc_crypto_setup_queue(struct request_queue *q, | |
+ struct mmc_host *host) | |
+{ | |
+} | |
+ | |
+static inline void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) | |
+{ | |
+} | |
+ | |
+#endif /* !CONFIG_MMC_CRYPTO */ | |
+ | |
+#endif /* _MMC_CORE_CRYPTO_H */ | |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c | |
index 03e2f965a96a..b7717fd8cb2a 100644 | |
--- a/drivers/mmc/core/host.c | |
+++ b/drivers/mmc/core/host.c | |
@@ -25,6 +25,7 @@ | |
#include <linux/mmc/slot-gpio.h> | |
#include "core.h" | |
+#include "crypto.h" | |
#include "host.h" | |
#include "slot-gpio.h" | |
#include "pwrseq.h" | |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c | |
index 7494d595035e..aca62d703b21 100644 | |
--- a/drivers/mmc/core/mmc.c | |
+++ b/drivers/mmc/core/mmc.c | |
@@ -972,7 +972,7 @@ static int mmc_select_powerclass(struct mmc_card *card) | |
/* | |
* Set the bus speed for the selected speed mode. | |
*/ | |
-static void mmc_set_bus_speed(struct mmc_card *card) | |
+void mmc_set_bus_speed(struct mmc_card *card) | |
{ | |
unsigned int max_dtr = (unsigned int)-1; | |
@@ -992,7 +992,7 @@ static void mmc_set_bus_speed(struct mmc_card *card) | |
* If the bus width is changed successfully, return the selected width value. | |
* Zero is returned instead of error value if the wide width is not supported. | |
*/ | |
-static int mmc_select_bus_width(struct mmc_card *card) | |
+int mmc_select_bus_width(struct mmc_card *card) | |
{ | |
static unsigned ext_csd_bits[] = { | |
EXT_CSD_BUS_WIDTH_8, | |
@@ -1057,11 +1057,12 @@ static int mmc_select_bus_width(struct mmc_card *card) | |
return err; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_select_bus_width); | |
/* | |
* Switch to the high-speed mode | |
*/ | |
-static int mmc_select_hs(struct mmc_card *card) | |
+int mmc_select_hs(struct mmc_card *card) | |
{ | |
int err; | |
@@ -1075,11 +1076,12 @@ static int mmc_select_hs(struct mmc_card *card) | |
return err; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_select_hs); | |
/* | |
* Activate wide bus and DDR if supported. | |
*/ | |
-static int mmc_select_hs_ddr(struct mmc_card *card) | |
+int mmc_select_hs_ddr(struct mmc_card *card) | |
{ | |
struct mmc_host *host = card->host; | |
u32 bus_width, ext_csd_bits; | |
@@ -1148,8 +1150,9 @@ static int mmc_select_hs_ddr(struct mmc_card *card) | |
return err; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_select_hs_ddr); | |
-static int mmc_select_hs400(struct mmc_card *card) | |
+int mmc_select_hs400(struct mmc_card *card) | |
{ | |
struct mmc_host *host = card->host; | |
unsigned int max_dtr; | |
@@ -1235,6 +1238,7 @@ static int mmc_select_hs400(struct mmc_card *card) | |
__func__, err); | |
return err; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_select_hs400); | |
int mmc_hs200_to_hs400(struct mmc_card *card) | |
{ | |
@@ -1505,7 +1509,7 @@ static int mmc_select_hs200(struct mmc_card *card) | |
/* | |
* Activate High Speed, HS200 or HS400ES mode if supported. | |
*/ | |
-static int mmc_select_timing(struct mmc_card *card) | |
+int mmc_select_timing(struct mmc_card *card) | |
{ | |
int err = 0; | |
@@ -1530,12 +1534,13 @@ static int mmc_select_timing(struct mmc_card *card) | |
mmc_set_bus_speed(card); | |
return 0; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_select_timing); | |
/* | |
* Execute tuning sequence to seek the proper bus operating | |
* conditions for HS200 and HS400, which sends CMD21 to the device. | |
*/ | |
-static int mmc_hs200_tuning(struct mmc_card *card) | |
+int mmc_hs200_tuning(struct mmc_card *card) | |
{ | |
struct mmc_host *host = card->host; | |
@@ -1550,6 +1555,7 @@ static int mmc_hs200_tuning(struct mmc_card *card) | |
return mmc_execute_tuning(card); | |
} | |
+EXPORT_SYMBOL_GPL(mmc_hs200_tuning); | |
/* | |
* Handle the detection and initialisation of a card. | |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c | |
index 002426e3cf76..c817f6decd6a 100644 | |
--- a/drivers/mmc/core/queue.c | |
+++ b/drivers/mmc/core/queue.c | |
@@ -19,6 +19,7 @@ | |
#include "block.h" | |
#include "core.h" | |
#include "card.h" | |
+#include "crypto.h" | |
#include "host.h" | |
#define MMC_DMA_MAP_MERGE_SEGMENTS 512 | |
@@ -70,6 +71,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) | |
return MMC_ISSUE_SYNC; | |
} | |
+EXPORT_SYMBOL_GPL(mmc_issue_type); | |
static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) | |
{ | |
@@ -407,6 +409,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) | |
mutex_init(&mq->complete_lock); | |
init_waitqueue_head(&mq->wait); | |
+ | |
+ mmc_crypto_setup_queue(mq->queue, host); | |
} | |
static inline bool mmc_merge_capable(struct mmc_host *host) | |
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c | |
index bac343a8d569..2819e6b42a67 100644 | |
--- a/drivers/mmc/core/sd.c | |
+++ b/drivers/mmc/core/sd.c | |
@@ -18,6 +18,8 @@ | |
#include <linux/mmc/mmc.h> | |
#include <linux/mmc/sd.h> | |
+#include <trace/hooks/mmc_core.h> | |
+ | |
#include "core.h" | |
#include "card.h" | |
#include "host.h" | |
@@ -462,6 +464,8 @@ static void sd_update_bus_speed_mode(struct mmc_card *card) | |
SD_MODE_UHS_SDR12)) { | |
card->sd_bus_speed = UHS_SDR12_BUS_SPEED; | |
} | |
+ | |
+ trace_android_vh_sd_update_bus_speed_mode(card); | |
} | |
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) | |
@@ -1405,5 +1409,7 @@ int mmc_attach_sd(struct mmc_host *host) | |
pr_err("%s: error %d whilst initialising SD card\n", | |
mmc_hostname(host), err); | |
+ trace_android_vh_mmc_attach_sd(host, ocr, err); | |
+ | |
return err; | |
} | |
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c | |
index 05e907451df9..298877a50af5 100644 | |
--- a/drivers/mmc/core/slot-gpio.c | |
+++ b/drivers/mmc/core/slot-gpio.c | |
@@ -14,6 +14,8 @@ | |
#include <linux/module.h> | |
#include <linux/slab.h> | |
+#include <trace/hooks/mmc_core.h> | |
+ | |
#include "slot-gpio.h" | |
struct mmc_gpio { | |
@@ -30,6 +32,11 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) | |
/* Schedule a card detection after a debounce timeout */ | |
struct mmc_host *host = dev_id; | |
struct mmc_gpio *ctx = host->slot.handler_priv; | |
+ bool allow = true; | |
+ | |
+ trace_android_vh_mmc_gpio_cd_irqt(host, &allow); | |
+ if (!allow) | |
+ return IRQ_HANDLED; | |
host->trigger_card_event = true; | |
mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms)); | |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig | |
index 30ff42fd173e..45331d698ad0 100644 | |
--- a/drivers/mmc/host/Kconfig | |
+++ b/drivers/mmc/host/Kconfig | |
@@ -213,6 +213,17 @@ config MMC_SDHCI_OF_DWCMSHC | |
If you have a controller with this interface, say Y or M here. | |
If unsure, say N. | |
+config MMC_SDHCI_OF_LIGHT_MPW | |
+ tristate "SDHCI OF support for the Synopsys DWC MSHC of Light MPW" | |
+ depends on MMC_SDHCI_PLTFM | |
+ depends on OF | |
+ depends on COMMON_CLK | |
+ help | |
+ This selects Synopsys DesignWare Cores Mobile Storage Controller | |
+ support. | |
+ If you have a controller with this interface, say Y or M here. | |
+ If unsure, say N. | |
+ | |
config MMC_SDHCI_OF_SPARX5 | |
tristate "SDHCI OF support for the MCHP Sparx5 SoC" | |
depends on MMC_SDHCI_PLTFM | |
@@ -544,6 +555,7 @@ config MMC_SDHCI_MSM | |
depends on MMC_SDHCI_PLTFM | |
select MMC_SDHCI_IO_ACCESSORS | |
select MMC_CQHCI | |
+ select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM | |
help | |
This selects the Secure Digital Host Controller Interface (SDHCI) | |
support present in Qualcomm SOCs. The controller supports | |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile | |
index 451c25fc2c69..45908ae6a9d4 100644 | |
--- a/drivers/mmc/host/Makefile | |
+++ b/drivers/mmc/host/Makefile | |
@@ -94,6 +94,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o | |
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o | |
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o | |
obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o | |
+obj-$(CONFIG_MMC_SDHCI_OF_LIGHT_MPW) += sdhci-of-light-mpw.o | |
obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o | |
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o | |
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o | |
@@ -104,6 +105,8 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o | |
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o | |
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o | |
obj-$(CONFIG_MMC_CQHCI) += cqhci.o | |
+cqhci-y += cqhci-core.o | |
+cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o | |
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o | |
ifeq ($(CONFIG_CB710_DEBUG),y) | |
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci-core.c | |
similarity index 94% | |
rename from drivers/mmc/host/cqhci.c | |
rename to drivers/mmc/host/cqhci-core.c | |
index 7ba4f714106f..e3d6de1d86ef 100644 | |
--- a/drivers/mmc/host/cqhci.c | |
+++ b/drivers/mmc/host/cqhci-core.c | |
@@ -18,6 +18,7 @@ | |
#include <linux/mmc/card.h> | |
#include "cqhci.h" | |
+#include "cqhci-crypto.h" | |
#define DCMD_SLOT 31 | |
#define NUM_SLOTS 32 | |
@@ -258,6 +259,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host) | |
if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) | |
cqcfg |= CQHCI_TASK_DESC_SZ; | |
+ if (mmc->caps2 & MMC_CAP2_CRYPTO) | |
+ cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE; | |
+ | |
cqhci_writel(cq_host, cqcfg, CQHCI_CFG); | |
cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), | |
@@ -411,13 +415,15 @@ static void cqhci_disable(struct mmc_host *mmc) | |
} | |
static void cqhci_prep_task_desc(struct mmc_request *mrq, | |
- u64 *data, bool intr) | |
+ struct cqhci_host *cq_host, int tag) | |
{ | |
+ __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag); | |
u32 req_flags = mrq->data->flags; | |
+ u64 desc0; | |
- *data = CQHCI_VALID(1) | | |
+ desc0 = CQHCI_VALID(1) | | |
CQHCI_END(1) | | |
- CQHCI_INT(intr) | | |
+ CQHCI_INT(1) | | |
CQHCI_ACT(0x5) | | |
CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | | |
CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | | |
@@ -428,8 +434,19 @@ static void cqhci_prep_task_desc(struct mmc_request *mrq, | |
CQHCI_BLK_COUNT(mrq->data->blocks) | | |
CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); | |
- pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", | |
- mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); | |
+ task_desc[0] = cpu_to_le64(desc0); | |
+ | |
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { | |
+ u64 desc1 = cqhci_crypto_prep_task_desc(mrq); | |
+ | |
+ task_desc[1] = cpu_to_le64(desc1); | |
+ | |
+ pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n", | |
+ mmc_hostname(mrq->host), mrq->tag, desc1, desc0); | |
+ } else { | |
+ pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", | |
+ mmc_hostname(mrq->host), mrq->tag, desc0); | |
+ } | |
} | |
static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) | |
@@ -570,8 +587,6 @@ static inline int cqhci_tag(struct mmc_request *mrq) | |
static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |
{ | |
int err = 0; | |
- u64 data = 0; | |
- u64 *task_desc = NULL; | |
int tag = cqhci_tag(mrq); | |
struct cqhci_host *cq_host = mmc->cqe_private; | |
unsigned long flags; | |
@@ -601,9 +616,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |
} | |
if (mrq->data) { | |
- task_desc = (__le64 __force *)get_desc(cq_host, tag); | |
- cqhci_prep_task_desc(mrq, &data, 1); | |
- *task_desc = cpu_to_le64(data); | |
+ cqhci_prep_task_desc(mrq, cq_host, tag); | |
+ | |
err = cqhci_prep_tran_desc(mrq, cq_host, tag); | |
if (err) { | |
pr_err("%s: cqhci: failed to setup tx desc: %d\n", | |
@@ -674,6 +688,7 @@ static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, | |
struct cqhci_host *cq_host = mmc->cqe_private; | |
struct cqhci_slot *slot; | |
u32 terri; | |
+ u32 tdpe; | |
int tag; | |
spin_lock(&cq_host->lock); | |
@@ -712,6 +727,30 @@ static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, | |
} | |
} | |
+ /* | |
+ * Handle ICCE ("Invalid Crypto Configuration Error"). This should | |
+ * never happen, since the block layer ensures that all crypto-enabled | |
+ * I/O requests have a valid keyslot before they reach the driver. | |
+ * | |
+ * Note that GCE ("General Crypto Error") is different; it already got | |
+ * handled above by checking TERRI. | |
+ */ | |
+ if (status & CQHCI_IS_ICCE) { | |
+ tdpe = cqhci_readl(cq_host, CQHCI_TDPE); | |
+ WARN_ONCE(1, | |
+ "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n", | |
+ mmc_hostname(mmc), status, tdpe); | |
+ while (tdpe != 0) { | |
+ tag = __ffs(tdpe); | |
+ tdpe &= ~(1 << tag); | |
+ slot = &cq_host->slot[tag]; | |
+ if (!slot->mrq) | |
+ continue; | |
+ slot->flags = cqhci_error_flags(data_error, cmd_error); | |
+ cqhci_recovery_needed(mmc, slot->mrq, true); | |
+ } | |
+ } | |
+ | |
if (!cq_host->recovery_halt) { | |
/* | |
* The only way to guarantee forward progress is to mark at | |
@@ -777,7 +816,8 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, | |
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); | |
- if ((status & CQHCI_IS_RED) || cmd_error || data_error) | |
+ if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) || | |
+ cmd_error || data_error) | |
cqhci_error_irq(mmc, status, cmd_error, data_error); | |
if (status & CQHCI_IS_TCC) { | |
@@ -1144,6 +1184,13 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, | |
goto out_err; | |
} | |
+ err = cqhci_crypto_init(cq_host); | |
+ if (err) { | |
+ pr_err("%s: CQHCI crypto initialization failed\n", | |
+ mmc_hostname(mmc)); | |
+ goto out_err; | |
+ } | |
+ | |
spin_lock_init(&cq_host->lock); | |
init_completion(&cq_host->halt_comp); | |
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c | |
new file mode 100644 | |
index 000000000000..5698bc48ac5b | |
--- /dev/null | |
+++ b/drivers/mmc/host/cqhci-crypto.c | |
@@ -0,0 +1,244 @@ | |
+// SPDX-License-Identifier: GPL-2.0-only | |
+/* | |
+ * CQHCI crypto engine (inline encryption) support | |
+ * | |
+ * Copyright 2020 Google LLC | |
+ */ | |
+ | |
+#include <linux/blk-crypto.h> | |
+#include <linux/keyslot-manager.h> | |
+#include <linux/mmc/host.h> | |
+ | |
+#include "cqhci-crypto.h" | |
+ | |
+/* Map from blk-crypto modes to CQHCI crypto algorithm IDs and key sizes */ | |
+static const struct cqhci_crypto_alg_entry { | |
+ enum cqhci_crypto_alg alg; | |
+ enum cqhci_crypto_key_size key_size; | |
+} cqhci_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = { | |
+ [BLK_ENCRYPTION_MODE_AES_256_XTS] = { | |
+ .alg = CQHCI_CRYPTO_ALG_AES_XTS, | |
+ .key_size = CQHCI_CRYPTO_KEY_SIZE_256, | |
+ }, | |
+}; | |
+ | |
+static inline struct cqhci_host * | |
+cqhci_host_from_ksm(struct blk_keyslot_manager *ksm) | |
+{ | |
+ struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm); | |
+ | |
+ return mmc->cqe_private; | |
+} | |
+ | |
+static int cqhci_crypto_program_key(struct cqhci_host *cq_host, | |
+ const union cqhci_crypto_cfg_entry *cfg, | |
+ int slot) | |
+{ | |
+ u32 slot_offset = cq_host->crypto_cfg_register + slot * sizeof(*cfg); | |
+ int i; | |
+ | |
+ if (cq_host->ops->program_key) | |
+ return cq_host->ops->program_key(cq_host, cfg, slot); | |
+ | |
+ /* Clear CFGE */ | |
+ cqhci_writel(cq_host, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); | |
+ | |
+ /* Write the key */ | |
+ for (i = 0; i < 16; i++) { | |
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[i]), | |
+ slot_offset + i * sizeof(cfg->reg_val[0])); | |
+ } | |
+ /* Write dword 17 */ | |
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[17]), | |
+ slot_offset + 17 * sizeof(cfg->reg_val[0])); | |
+ /* Write dword 16, which includes the new value of CFGE */ | |
+ cqhci_writel(cq_host, le32_to_cpu(cfg->reg_val[16]), | |
+ slot_offset + 16 * sizeof(cfg->reg_val[0])); | |
+ return 0; | |
+} | |
+ | |
+static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm, | |
+ const struct blk_crypto_key *key, | |
+ unsigned int slot) | |
+ | |
+{ | |
+ struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm); | |
+ const union cqhci_crypto_cap_entry *ccap_array = | |
+ cq_host->crypto_cap_array; | |
+ const struct cqhci_crypto_alg_entry *alg = | |
+ &cqhci_crypto_algs[key->crypto_cfg.crypto_mode]; | |
+ u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512; | |
+ int i; | |
+ int cap_idx = -1; | |
+ union cqhci_crypto_cfg_entry cfg = {}; | |
+ int err; | |
+ | |
+ BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0); | |
+ for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) { | |
+ if (ccap_array[i].algorithm_id == alg->alg && | |
+ ccap_array[i].key_size == alg->key_size && | |
+ (ccap_array[i].sdus_mask & data_unit_mask)) { | |
+ cap_idx = i; | |
+ break; | |
+ } | |
+ } | |
+ if (WARN_ON(cap_idx < 0)) | |
+ return -EOPNOTSUPP; | |
+ | |
+ cfg.data_unit_size = data_unit_mask; | |
+ cfg.crypto_cap_idx = cap_idx; | |
+ cfg.config_enable = CQHCI_CRYPTO_CONFIGURATION_ENABLE; | |
+ | |
+ if (ccap_array[cap_idx].algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS) { | |
+ /* In XTS mode, the blk_crypto_key's size is already doubled */ | |
+ memcpy(cfg.crypto_key, key->raw, key->size/2); | |
+ memcpy(cfg.crypto_key + CQHCI_CRYPTO_KEY_MAX_SIZE/2, | |
+ key->raw + key->size/2, key->size/2); | |
+ } else { | |
+ memcpy(cfg.crypto_key, key->raw, key->size); | |
+ } | |
+ | |
+ err = cqhci_crypto_program_key(cq_host, &cfg, slot); | |
+ | |
+ memzero_explicit(&cfg, sizeof(cfg)); | |
+ return err; | |
+} | |
+ | |
+static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot) | |
+{ | |
+ /* | |
+ * Clear the crypto cfg on the device. Clearing CFGE | |
+ * might not be sufficient, so just clear the entire cfg. | |
+ */ | |
+ union cqhci_crypto_cfg_entry cfg = {}; | |
+ | |
+ return cqhci_crypto_program_key(cq_host, &cfg, slot); | |
+} | |
+ | |
+static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, | |
+ const struct blk_crypto_key *key, | |
+ unsigned int slot) | |
+{ | |
+ struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm); | |
+ | |
+ return cqhci_crypto_clear_keyslot(cq_host, slot); | |
+} | |
+ | |
+/* | |
+ * The keyslot management operations for CQHCI crypto. | |
+ * | |
+ * Note that the block layer ensures that these are never called while the host | |
+ * controller is runtime-suspended. However, the CQE won't necessarily be | |
+ * "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the | |
+ * CQHCI_CFG register. But the hardware allows that. | |
+ */ | |
+static const struct blk_ksm_ll_ops cqhci_ksm_ops = { | |
+ .keyslot_program = cqhci_crypto_keyslot_program, | |
+ .keyslot_evict = cqhci_crypto_keyslot_evict, | |
+}; | |
+ | |
+static enum blk_crypto_mode_num | |
+cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap) | |
+{ | |
+ int i; | |
+ | |
+ for (i = 0; i < ARRAY_SIZE(cqhci_crypto_algs); i++) { | |
+ BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0); | |
+ if (cqhci_crypto_algs[i].alg == cap.algorithm_id && | |
+ cqhci_crypto_algs[i].key_size == cap.key_size) | |
+ return i; | |
+ } | |
+ return BLK_ENCRYPTION_MODE_INVALID; | |
+} | |
+ | |
+/** | |
+ * cqhci_crypto_init - initialize CQHCI crypto support | |
+ * @cq_host: a cqhci host | |
+ * | |
+ * If the driver previously set MMC_CAP2_CRYPTO and the CQE declares | |
+ * CQHCI_CAP_CS, initialize the crypto support. This involves reading the | |
+ * crypto capability registers, initializing the keyslot manager, clearing all | |
+ * keyslots, and enabling 128-bit task descriptors. | |
+ * | |
+ * Return: 0 if crypto was initialized or isn't supported; whether | |
+ * MMC_CAP2_CRYPTO remains set indicates which one of those cases it is. | |
+ * Also can return a negative errno value on unexpected error. | |
+ */ | |
+int cqhci_crypto_init(struct cqhci_host *cq_host) | |
+{ | |
+ struct mmc_host *mmc = cq_host->mmc; | |
+ struct device *dev = mmc_dev(mmc); | |
+ struct blk_keyslot_manager *ksm = &mmc->ksm; | |
+ unsigned int num_keyslots; | |
+ unsigned int cap_idx; | |
+ enum blk_crypto_mode_num blk_mode_num; | |
+ unsigned int slot; | |
+ int err = 0; | |
+ | |
+ if (!(mmc->caps2 & MMC_CAP2_CRYPTO) || | |
+ !(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS)) | |
+ goto out; | |
+ | |
+ cq_host->crypto_capabilities.reg_val = | |
+ cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP)); | |
+ | |
+ cq_host->crypto_cfg_register = | |
+ (u32)cq_host->crypto_capabilities.config_array_ptr * 0x100; | |
+ | |
+ cq_host->crypto_cap_array = | |
+ devm_kcalloc(dev, cq_host->crypto_capabilities.num_crypto_cap, | |
+ sizeof(cq_host->crypto_cap_array[0]), GFP_KERNEL); | |
+ if (!cq_host->crypto_cap_array) { | |
+ err = -ENOMEM; | |
+ goto out; | |
+ } | |
+ | |
+ /* | |
+ * CCAP.CFGC is off by one, so the actual number of crypto | |
+ * configurations (a.k.a. keyslots) is CCAP.CFGC + 1. | |
+ */ | |
+ num_keyslots = cq_host->crypto_capabilities.config_count + 1; | |
+ | |
+ err = devm_blk_ksm_init(dev, ksm, num_keyslots); | |
+ if (err) | |
+ goto out; | |
+ | |
+ ksm->ksm_ll_ops = cqhci_ksm_ops; | |
+ ksm->dev = dev; | |
+ | |
+ /* Unfortunately, CQHCI crypto only supports 32 DUN bits. */ | |
+ ksm->max_dun_bytes_supported = 4; | |
+ | |
+ ksm->features = BLK_CRYPTO_FEATURE_STANDARD_KEYS; | |
+ | |
+ /* | |
+ * Cache all the crypto capabilities and advertise the supported crypto | |
+ * modes and data unit sizes to the block layer. | |
+ */ | |
+ for (cap_idx = 0; cap_idx < cq_host->crypto_capabilities.num_crypto_cap; | |
+ cap_idx++) { | |
+ cq_host->crypto_cap_array[cap_idx].reg_val = | |
+ cpu_to_le32(cqhci_readl(cq_host, | |
+ CQHCI_CRYPTOCAP + | |
+ cap_idx * sizeof(__le32))); | |
+ blk_mode_num = cqhci_find_blk_crypto_mode( | |
+ cq_host->crypto_cap_array[cap_idx]); | |
+ if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) | |
+ continue; | |
+ ksm->crypto_modes_supported[blk_mode_num] |= | |
+ cq_host->crypto_cap_array[cap_idx].sdus_mask * 512; | |
+ } | |
+ | |
+ /* Clear all the keyslots so that we start in a known state. */ | |
+ for (slot = 0; slot < num_keyslots; slot++) | |
+ cqhci_crypto_clear_keyslot(cq_host, slot); | |
+ | |
+ /* CQHCI crypto requires the use of 128-bit task descriptors. */ | |
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128; | |
+ | |
+ return 0; | |
+ | |
+out: | |
+ mmc->caps2 &= ~MMC_CAP2_CRYPTO; | |
+ return err; | |
+} | |
diff --git a/drivers/mmc/host/cqhci-crypto.h b/drivers/mmc/host/cqhci-crypto.h | |
new file mode 100644 | |
index 000000000000..d7fb084f563b | |
--- /dev/null | |
+++ b/drivers/mmc/host/cqhci-crypto.h | |
@@ -0,0 +1,50 @@ | |
+/* SPDX-License-Identifier: GPL-2.0-only */ | |
+/* | |
+ * CQHCI crypto engine (inline encryption) support | |
+ * | |
+ * Copyright 2020 Google LLC | |
+ */ | |
+ | |
+#ifndef LINUX_MMC_CQHCI_CRYPTO_H | |
+#define LINUX_MMC_CQHCI_CRYPTO_H | |
+ | |
+#include <linux/mmc/host.h> | |
+ | |
+#include "cqhci.h" | |
+ | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ | |
+int cqhci_crypto_init(struct cqhci_host *host); | |
+ | |
+/* | |
+ * Returns the crypto bits that should be set in bits 64-127 of the | |
+ * task descriptor. | |
+ */ | |
+static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq) | |
+{ | |
+ if (!mrq->crypto_ctx) | |
+ return 0; | |
+ | |
+ /* We set max_dun_bytes_supported=4, so all DUNs should be 32-bit. */ | |
+ WARN_ON_ONCE(mrq->crypto_ctx->bc_dun[0] > U32_MAX); | |
+ | |
+ return CQHCI_CRYPTO_ENABLE_BIT | | |
+ CQHCI_CRYPTO_KEYSLOT(mrq->crypto_key_slot) | | |
+ mrq->crypto_ctx->bc_dun[0]; | |
+} | |
+ | |
+#else /* CONFIG_MMC_CRYPTO */ | |
+ | |
+static inline int cqhci_crypto_init(struct cqhci_host *host) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline u64 cqhci_crypto_prep_task_desc(struct mmc_request *mrq) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+#endif /* !CONFIG_MMC_CRYPTO */ | |
+ | |
+#endif /* LINUX_MMC_CQHCI_CRYPTO_H */ | |
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h | |
index 89bf6adbce8c..ba9387ed90eb 100644 | |
--- a/drivers/mmc/host/cqhci.h | |
+++ b/drivers/mmc/host/cqhci.h | |
@@ -22,10 +22,13 @@ | |
/* capabilities */ | |
#define CQHCI_CAP 0x04 | |
+#define CQHCI_CAP_CS 0x10000000 /* Crypto Support */ | |
+ | |
/* configuration */ | |
#define CQHCI_CFG 0x08 | |
#define CQHCI_DCMD 0x00001000 | |
#define CQHCI_TASK_DESC_SZ 0x00000100 | |
+#define CQHCI_CRYPTO_GENERAL_ENABLE 0x00000002 | |
#define CQHCI_ENABLE 0x00000001 | |
/* control */ | |
@@ -39,8 +42,11 @@ | |
#define CQHCI_IS_TCC BIT(1) | |
#define CQHCI_IS_RED BIT(2) | |
#define CQHCI_IS_TCL BIT(3) | |
+#define CQHCI_IS_GCE BIT(4) /* General Crypto Error */ | |
+#define CQHCI_IS_ICCE BIT(5) /* Invalid Crypto Config Error */ | |
-#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED) | |
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED | \ | |
+ CQHCI_IS_GCE | CQHCI_IS_ICCE) | |
/* interrupt status enable */ | |
#define CQHCI_ISTE 0x14 | |
@@ -78,6 +84,9 @@ | |
/* task clear */ | |
#define CQHCI_TCLR 0x38 | |
+/* task descriptor processing error */ | |
+#define CQHCI_TDPE 0x3c | |
+ | |
/* send status config 1 */ | |
#define CQHCI_SSC1 0x40 | |
#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16) | |
@@ -107,6 +116,10 @@ | |
/* command response argument */ | |
#define CQHCI_CRA 0x5C | |
+/* crypto capabilities */ | |
+#define CQHCI_CCAP 0x100 | |
+#define CQHCI_CRYPTOCAP 0x104 | |
+ | |
#define CQHCI_INT_ALL 0xF | |
#define CQHCI_IC_DEFAULT_ICCTH 31 | |
#define CQHCI_IC_DEFAULT_ICTOVAL 1 | |
@@ -133,11 +146,70 @@ | |
#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22) | |
#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23) | |
+/* crypto task descriptor fields (for bits 64-127 of task descriptor) */ | |
+#define CQHCI_CRYPTO_ENABLE_BIT (1ULL << 47) | |
+#define CQHCI_CRYPTO_KEYSLOT(x) ((u64)(x) << 32) | |
+ | |
/* transfer descriptor fields */ | |
#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16) | |
#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32) | |
#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0) | |
+/* CCAP - Crypto Capability 100h */ | |
+union cqhci_crypto_capabilities { | |
+ __le32 reg_val; | |
+ struct { | |
+ u8 num_crypto_cap; | |
+ u8 config_count; | |
+ u8 reserved; | |
+ u8 config_array_ptr; | |
+ }; | |
+}; | |
+ | |
+enum cqhci_crypto_key_size { | |
+ CQHCI_CRYPTO_KEY_SIZE_INVALID = 0, | |
+ CQHCI_CRYPTO_KEY_SIZE_128 = 1, | |
+ CQHCI_CRYPTO_KEY_SIZE_192 = 2, | |
+ CQHCI_CRYPTO_KEY_SIZE_256 = 3, | |
+ CQHCI_CRYPTO_KEY_SIZE_512 = 4, | |
+}; | |
+ | |
+enum cqhci_crypto_alg { | |
+ CQHCI_CRYPTO_ALG_AES_XTS = 0, | |
+ CQHCI_CRYPTO_ALG_BITLOCKER_AES_CBC = 1, | |
+ CQHCI_CRYPTO_ALG_AES_ECB = 2, | |
+ CQHCI_CRYPTO_ALG_ESSIV_AES_CBC = 3, | |
+}; | |
+ | |
+/* x-CRYPTOCAP - Crypto Capability X */ | |
+union cqhci_crypto_cap_entry { | |
+ __le32 reg_val; | |
+ struct { | |
+ u8 algorithm_id; | |
+ u8 sdus_mask; /* Supported data unit size mask */ | |
+ u8 key_size; | |
+ u8 reserved; | |
+ }; | |
+}; | |
+ | |
+#define CQHCI_CRYPTO_CONFIGURATION_ENABLE (1 << 7) | |
+#define CQHCI_CRYPTO_KEY_MAX_SIZE 64 | |
+/* x-CRYPTOCFG - Crypto Configuration X */ | |
+union cqhci_crypto_cfg_entry { | |
+ __le32 reg_val[32]; | |
+ struct { | |
+ u8 crypto_key[CQHCI_CRYPTO_KEY_MAX_SIZE]; | |
+ u8 data_unit_size; | |
+ u8 crypto_cap_idx; | |
+ u8 reserved_1; | |
+ u8 config_enable; | |
+ u8 reserved_multi_host; | |
+ u8 reserved_2; | |
+ u8 vsb[2]; | |
+ u8 reserved_3[56]; | |
+ }; | |
+}; | |
+ | |
struct cqhci_host_ops; | |
struct mmc_host; | |
struct mmc_request; | |
@@ -196,6 +268,12 @@ struct cqhci_host { | |
struct completion halt_comp; | |
wait_queue_head_t wait_queue; | |
struct cqhci_slot *slot; | |
+ | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ union cqhci_crypto_capabilities crypto_capabilities; | |
+ union cqhci_crypto_cap_entry *crypto_cap_array; | |
+ u32 crypto_cfg_register; | |
+#endif | |
}; | |
struct cqhci_host_ops { | |
@@ -208,6 +286,10 @@ struct cqhci_host_ops { | |
u64 *data); | |
void (*pre_enable)(struct mmc_host *mmc); | |
void (*post_disable)(struct mmc_host *mmc); | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ int (*program_key)(struct cqhci_host *cq_host, | |
+ const union cqhci_crypto_cfg_entry *cfg, int slot); | |
+#endif | |
}; | |
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg) | |
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c | |
index 588b9a564117..bef081d35305 100644 | |
--- a/drivers/mmc/host/sdhci-msm.c | |
+++ b/drivers/mmc/host/sdhci-msm.c | |
@@ -13,6 +13,7 @@ | |
#include <linux/pm_opp.h> | |
#include <linux/slab.h> | |
#include <linux/iopoll.h> | |
+#include <linux/qcom_scm.h> | |
#include <linux/regulator/consumer.h> | |
#include <linux/interconnect.h> | |
#include <linux/pinctrl/consumer.h> | |
@@ -256,10 +257,12 @@ struct sdhci_msm_variant_info { | |
struct sdhci_msm_host { | |
struct platform_device *pdev; | |
void __iomem *core_mem; /* MSM SDCC mapped address */ | |
+ void __iomem *ice_mem; /* MSM ICE mapped address (if available) */ | |
int pwr_irq; /* power irq */ | |
struct clk *bus_clk; /* SDHC bus voter clock */ | |
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ | |
- struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ | |
+ /* core, iface, cal, sleep, and ice clocks */ | |
+ struct clk_bulk_data bulk_clks[5]; | |
unsigned long clk_rate; | |
struct mmc_host *mmc; | |
struct opp_table *opp_table; | |
@@ -1785,6 +1788,246 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) | |
__sdhci_msm_set_clock(host, clock); | |
} | |
+/*****************************************************************************\ | |
+ * * | |
+ * Inline Crypto Engine (ICE) support * | |
+ * * | |
+\*****************************************************************************/ | |
+ | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ | |
+#define AES_256_XTS_KEY_SIZE 64 | |
+ | |
+/* QCOM ICE registers */ | |
+ | |
+#define QCOM_ICE_REG_VERSION 0x0008 | |
+ | |
+#define QCOM_ICE_REG_FUSE_SETTING 0x0010 | |
+#define QCOM_ICE_FUSE_SETTING_MASK 0x1 | |
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 | |
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 | |
+ | |
+#define QCOM_ICE_REG_BIST_STATUS 0x0070 | |
+#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 | |
+ | |
+#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 | |
+ | |
+#define sdhci_msm_ice_writel(host, val, reg) \ | |
+ writel((val), (host)->ice_mem + (reg)) | |
+#define sdhci_msm_ice_readl(host, reg) \ | |
+ readl((host)->ice_mem + (reg)) | |
+ | |
+static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host) | |
+{ | |
+ struct device *dev = mmc_dev(msm_host->mmc); | |
+ u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION); | |
+ int major = regval >> 24; | |
+ int minor = (regval >> 16) & 0xFF; | |
+ int step = regval & 0xFFFF; | |
+ | |
+ /* For now this driver only supports ICE version 3. */ | |
+ if (major != 3) { | |
+ dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", | |
+ major, minor, step); | |
+ return false; | |
+ } | |
+ | |
+ dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", | |
+ major, minor, step); | |
+ | |
+ /* If fuses are blown, ICE might not work in the standard way. */ | |
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING); | |
+ if (regval & (QCOM_ICE_FUSE_SETTING_MASK | | |
+ QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | | |
+ QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { | |
+ dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); | |
+ return false; | |
+ } | |
+ return true; | |
+} | |
+ | |
+static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev) | |
+{ | |
+ return devm_clk_get(dev, "ice"); | |
+} | |
+ | |
+static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, | |
+ struct cqhci_host *cq_host) | |
+{ | |
+ struct mmc_host *mmc = msm_host->mmc; | |
+ struct device *dev = mmc_dev(mmc); | |
+ struct resource *res; | |
+ int err; | |
+ | |
+ if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS)) | |
+ return 0; | |
+ | |
+ res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM, | |
+ "ice"); | |
+ if (!res) { | |
+ dev_warn(dev, "ICE registers not found\n"); | |
+ goto disable; | |
+ } | |
+ | |
+ if (!qcom_scm_ice_available()) { | |
+ dev_warn(dev, "ICE SCM interface not found\n"); | |
+ goto disable; | |
+ } | |
+ | |
+ msm_host->ice_mem = devm_ioremap_resource(dev, res); | |
+ if (IS_ERR(msm_host->ice_mem)) { | |
+ err = PTR_ERR(msm_host->ice_mem); | |
+ dev_err(dev, "Failed to map ICE registers; err=%d\n", err); | |
+ return err; | |
+ } | |
+ | |
+ if (!sdhci_msm_ice_supported(msm_host)) | |
+ goto disable; | |
+ | |
+ mmc->caps2 |= MMC_CAP2_CRYPTO; | |
+ return 0; | |
+ | |
+disable: | |
+ dev_warn(dev, "Disabling inline encryption support\n"); | |
+ return 0; | |
+} | |
+ | |
+static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host) | |
+{ | |
+ u32 regval; | |
+ | |
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL); | |
+ /* | |
+ * Enable low power mode sequence | |
+ * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 | |
+ */ | |
+ regval |= 0x7000; | |
+ sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); | |
+} | |
+ | |
+static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host) | |
+{ | |
+ u32 regval; | |
+ | |
+ /* ICE Optimizations Enable Sequence */ | |
+ regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL); | |
+ regval |= 0xD807100; | |
+ /* ICE HPG requires delay before writing */ | |
+ udelay(5); | |
+ sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); | |
+ udelay(5); | |
+} | |
+ | |
+/* | |
+ * Wait until the ICE BIST (built-in self-test) has completed. | |
+ * | |
+ * This may be necessary before ICE can be used. | |
+ * | |
+ * Note that we don't really care whether the BIST passed or failed; we really | |
+ * just want to make sure that it isn't still running. This is because (a) the | |
+ * BIST is a FIPS compliance thing that never fails in practice, (b) ICE is | |
+ * documented to reject crypto requests if the BIST fails, so we needn't do it | |
+ * in software too, and (c) properly testing storage encryption requires testing | |
+ * the full storage stack anyway, and not relying on hardware-level self-tests. | |
+ */ | |
+static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host) | |
+{ | |
+ u32 regval; | |
+ int err; | |
+ | |
+ err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS, | |
+ regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), | |
+ 50, 5000); | |
+ if (err) | |
+ dev_err(mmc_dev(msm_host->mmc), | |
+ "Timed out waiting for ICE self-test to complete\n"); | |
+ return err; | |
+} | |
+ | |
+static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) | |
+{ | |
+ if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) | |
+ return; | |
+ sdhci_msm_ice_low_power_mode_enable(msm_host); | |
+ sdhci_msm_ice_optimization_enable(msm_host); | |
+ sdhci_msm_ice_wait_bist_status(msm_host); | |
+} | |
+ | |
+static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) | |
+{ | |
+ if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)) | |
+ return 0; | |
+ return sdhci_msm_ice_wait_bist_status(msm_host); | |
+} | |
+ | |
+/* | |
+ * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires | |
+ * vendor-specific SCM calls for this; it doesn't support the standard way. | |
+ */ | |
+static int sdhci_msm_program_key(struct cqhci_host *cq_host, | |
+ const union cqhci_crypto_cfg_entry *cfg, | |
+ int slot) | |
+{ | |
+ struct device *dev = mmc_dev(cq_host->mmc); | |
+ union cqhci_crypto_cap_entry cap; | |
+ union { | |
+ u8 bytes[AES_256_XTS_KEY_SIZE]; | |
+ u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; | |
+ } key; | |
+ int i; | |
+ int err; | |
+ | |
+ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)) | |
+ return qcom_scm_ice_invalidate_key(slot); | |
+ | |
+ /* Only AES-256-XTS has been tested so far. */ | |
+ cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx]; | |
+ if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS || | |
+ cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) { | |
+ dev_err_ratelimited(dev, | |
+ "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", | |
+ cap.algorithm_id, cap.key_size); | |
+ return -EINVAL; | |
+ } | |
+ | |
+ memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); | |
+ | |
+ /* | |
+ * The SCM call byte-swaps the 32-bit words of the key. So we have to | |
+ * do the same, in order for the final key be correct. | |
+ */ | |
+ for (i = 0; i < ARRAY_SIZE(key.words); i++) | |
+ __cpu_to_be32s(&key.words[i]); | |
+ | |
+ err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, | |
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS, | |
+ cfg->data_unit_size); | |
+ memzero_explicit(&key, sizeof(key)); | |
+ return err; | |
+} | |
+#else /* CONFIG_MMC_CRYPTO */ | |
+static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev) | |
+{ | |
+ return NULL; | |
+} | |
+ | |
+static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, | |
+ struct cqhci_host *cq_host) | |
+{ | |
+ return 0; | |
+} | |
+ | |
+static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host) | |
+{ | |
+} | |
+ | |
+static inline int __maybe_unused | |
+sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host) | |
+{ | |
+ return 0; | |
+} | |
+#endif /* !CONFIG_MMC_CRYPTO */ | |
+ | |
/*****************************************************************************\ | |
* * | |
* MSM Command Queue Engine (CQE) * | |
@@ -1803,6 +2046,16 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) | |
return 0; | |
} | |
+static void sdhci_msm_cqe_enable(struct mmc_host *mmc) | |
+{ | |
+ struct sdhci_host *host = mmc_priv(mmc); | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); | |
+ | |
+ sdhci_cqe_enable(mmc); | |
+ sdhci_msm_ice_enable(msm_host); | |
+} | |
+ | |
static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) | |
{ | |
struct sdhci_host *host = mmc_priv(mmc); | |
@@ -1852,8 +2105,11 @@ static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *c | |
} | |
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { | |
- .enable = sdhci_cqe_enable, | |
+ .enable = sdhci_msm_cqe_enable, | |
.disable = sdhci_msm_cqe_disable, | |
+#ifdef CONFIG_MMC_CRYPTO | |
+ .program_key = sdhci_msm_program_key, | |
+#endif | |
}; | |
static int sdhci_msm_cqe_add_host(struct sdhci_host *host, | |
@@ -1889,6 +2145,10 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host, | |
dma64 = host->flags & SDHCI_USE_64_BIT_DMA; | |
+ ret = sdhci_msm_ice_init(msm_host, cq_host); | |
+ if (ret) | |
+ goto cleanup; | |
+ | |
ret = cqhci_init(cq_host, host->mmc, dma64); | |
if (ret) { | |
dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", | |
@@ -2339,6 +2599,11 @@ static int sdhci_msm_probe(struct platform_device *pdev) | |
clk = NULL; | |
msm_host->bulk_clks[3].clk = clk; | |
+ clk = sdhci_msm_ice_get_clk(&pdev->dev); | |
+ if (IS_ERR(clk)) | |
+ clk = NULL; | |
+ msm_host->bulk_clks[4].clk = clk; | |
+ | |
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), | |
msm_host->bulk_clks); | |
if (ret) | |
@@ -2549,12 +2814,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) | |
* Whenever core-clock is gated dynamically, it's needed to | |
* restore the SDR DLL settings when the clock is ungated. | |
*/ | |
- if (msm_host->restore_dll_config && msm_host->clk_rate) | |
+ if (msm_host->restore_dll_config && msm_host->clk_rate) { | |
ret = sdhci_msm_restore_sdr_dll_config(host); | |
+ if (ret) | |
+ return ret; | |
+ } | |
dev_pm_opp_set_rate(dev, msm_host->clk_rate); | |
- return ret; | |
+ return sdhci_msm_ice_resume(msm_host); | |
} | |
static const struct dev_pm_ops sdhci_msm_pm_ops = { | |
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c | |
index 59d8d96ce206..bc58d8ee3c62 100644 | |
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c | |
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c | |
@@ -13,8 +13,10 @@ | |
#include <linux/module.h> | |
#include <linux/of.h> | |
#include <linux/sizes.h> | |
+#include <linux/delay.h> | |
#include "sdhci-pltfm.h" | |
+#include "sdhci-of-dwcmshc.h" | |
#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16) | |
@@ -26,8 +28,266 @@ | |
struct dwcmshc_priv { | |
struct clk *bus_clk; | |
+ void __iomem *soc_base; | |
+ bool is_emmc_card; | |
+ bool pull_up_en; | |
+ bool io_fixed_1v8; | |
+ bool wprtn_ignore; | |
}; | |
+#define HS400_DELAY_LINE 24 | |
+ | |
+static uint32_t delay_line = 50; | |
+ | |
+static void sdhci_phy_1_8v_init_no_pull(struct sdhci_host *host) | |
+{ | |
+ uint32_t val; | |
+ sdhci_writel(host, 1, DWC_MSHC_PTR_PHY_R); | |
+ sdhci_writeb(host, 1 << 4, PHY_SDCLKDL_CNFG_R); | |
+ sdhci_writeb(host, 0x40, PHY_SDCLKDL_DC_R); | |
+ | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << 4); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ | |
+ val = sdhci_readw(host, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_CMDPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_DATAPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_RSTNPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_STBPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_STBPAD_CNFG_R); | |
+ | |
+ val = sdhci_readb(host, PHY_DLL_CTRL_R); | |
+ sdhci_writeb(host, val | 1, PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void sdhci_phy_3_3v_init_no_pull(struct sdhci_host *host) | |
+{ | |
+ uint32_t val; | |
+ sdhci_writel(host, 1, DWC_MSHC_PTR_PHY_R); | |
+ sdhci_writeb(host, 1 << 4, PHY_SDCLKDL_CNFG_R); | |
+ sdhci_writeb(host, 0x40, PHY_SDCLKDL_DC_R); | |
+ | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << 4); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_CMDPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_DATAPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_RSTNPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_STBPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_STBPAD_CNFG_R); | |
+ | |
+ val = sdhci_readb(host, PHY_DLL_CTRL_R); | |
+ sdhci_writeb(host, val | 1, PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void snps_phy_1_8v_init(struct sdhci_host *host) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ u32 val; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ if (priv->pull_up_en == 0) { | |
+ sdhci_phy_1_8v_init_no_pull(host); | |
+ return; | |
+ } | |
+ | |
+ //set driving force | |
+ sdhci_writel(host, (1 << PHY_RSTN) | (0xc << PAD_SP) | (0xc << PAD_SN), PHY_CNFG_R); | |
+ | |
+ //disable delay lane | |
+ sdhci_writeb(host, 1 << UPDATE_DC, PHY_SDCLKDL_CNFG_R); | |
+ //set delay lane | |
+ sdhci_writeb(host, delay_line, PHY_SDCLKDL_DC_R); | |
+ sdhci_writeb(host, 0xa, PHY_DLL_CNFG2_R); | |
+ //enable delay lane | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << UPDATE_DC); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = (1 << RXSEL) | (1 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); | |
+ | |
+ val = (1 << RXSEL) | (2 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R); | |
+ | |
+ /* enable data strobe mode */ | |
+ sdhci_writeb(host, 3 << SLV_INPSEL, PHY_DLLDL_CNFG_R); | |
+ sdhci_writeb(host, (1 << DLL_EN), PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void snps_phy_3_3v_init(struct sdhci_host *host) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ u32 val; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ if (priv->pull_up_en == 0) { | |
+ sdhci_phy_3_3v_init_no_pull(host); | |
+ return; | |
+ } | |
+ | |
+ //set driving force | |
+ sdhci_writel(host, (1 << PHY_RSTN) | (0xc << PAD_SP) | (0xc << PAD_SN), PHY_CNFG_R); | |
+ | |
+ //disable delay lane | |
+ sdhci_writeb(host, 1 << UPDATE_DC, PHY_SDCLKDL_CNFG_R); | |
+ //set delay lane | |
+ sdhci_writeb(host, delay_line, PHY_SDCLKDL_DC_R); | |
+ sdhci_writeb(host, 0xa, PHY_DLL_CNFG2_R); | |
+ //enable delay lane | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << UPDATE_DC); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = (2 << RXSEL) | (1 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); | |
+ | |
+ val = (2 << RXSEL) | (2 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R); | |
+} | |
+ | |
+static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) | |
+{ | |
+ #define DW_SDHCI_TUNING_LOOP_COUNT 128 | |
+ int i; | |
+ /* | |
+ * Issue opcode repeatedly till Execute Tuning is set to 0 or the number | |
+ * of loops reaches tuning loop count. | |
+ */ | |
+ for (i = 0; i < DW_SDHCI_TUNING_LOOP_COUNT; i++) { | |
+ u16 ctrl; | |
+ | |
+ sdhci_send_tuning(host, opcode); | |
+ | |
+ if (!host->tuning_done) { | |
+ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", | |
+ mmc_hostname(host->mmc)); | |
+ sdhci_abort_tuning(host, opcode); | |
+ return -ETIMEDOUT; | |
+ } | |
+ | |
+ /* Spec does not require a delay between tuning cycles */ | |
+ if (host->tuning_delay > 0) | |
+ mdelay(host->tuning_delay); | |
+ | |
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | |
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { | |
+ if (ctrl & SDHCI_CTRL_TUNED_CLK) | |
+ return 0; /* Success! */ | |
+ break; | |
+ } | |
+ } | |
+ | |
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", | |
+ mmc_hostname(host->mmc)); | |
+ printk("%s: Tuning failed, falling back to fixed sampling clock\n", | |
+ mmc_hostname(host->mmc)); | |
+ sdhci_reset_tuning(host); | |
+ return -EAGAIN; | |
+} | |
+ | |
+static int snps_execute_tuning(struct sdhci_host *host, u32 opcode) | |
+{ | |
+ u32 val = 0; | |
+ | |
+ if (host->flags & SDHCI_HS400_TUNING) { | |
+ return 0; | |
+ } | |
+ | |
+ sdhci_writeb(host, 3 << INPSEL_CNFG, PHY_ATDL_CNFG_R); | |
+ | |
+ val = sdhci_readl(host, AT_CTRL_R); | |
+ | |
+ val &= ~((1 << CI_SEL) | (1 << RPT_TUNE_ERR)\ | |
+ | (1 << SW_TUNE_EN) |(0xf << WIN_EDGE_SEL)); | |
+ val |= (1 << AT_EN) | (1 << SWIN_TH_EN) | (1 << TUNE_CLK_STOP_EN)\ | |
+ | (1 << PRE_CHANGE_DLY) | (3 << POST_CHANGE_DLY) | (9 << SWIN_TH_VAL); | |
+ | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ val = sdhci_readl(host, AT_CTRL_R); | |
+ if(!(val & (1 << AT_EN))) { | |
+ pr_err("*****Auto Tuning is NOT Enable!!!\n"); | |
+ return -1; | |
+ } | |
+ | |
+ val &= ~(1 << AT_EN); | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ | |
+ sdhci_start_tuning(host); | |
+ | |
+ host->tuning_err = __sdhci_execute_tuning(host, opcode); | |
+ if (host->tuning_err) { | |
+ val &= ~(1 << AT_EN); | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ return -1; | |
+ } | |
+ | |
+ sdhci_end_tuning(host); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static void snps_sdhci_reset(struct sdhci_host *host, u8 mask) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ u8 emmc_ctl; | |
+ //u32 soc_reg; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ | |
+ /*soc reset, fix host reset error*/ | |
+ //soc_reg = readl( priv->soc_base); | |
+ //soc_reg &= ~1; | |
+ //writel(soc_reg, priv->soc_base); | |
+ //soc_reg |= 1; | |
+ //writel(soc_reg, priv->soc_base); | |
+ | |
+ /*host reset*/ | |
+ sdhci_reset(host, mask); | |
+ /*fix host reset error*/ | |
+ mdelay(100); | |
+ | |
+ emmc_ctl = sdhci_readw(host, EMMC_CTRL_R); | |
+ if (priv->is_emmc_card) { | |
+ snps_phy_1_8v_init(host); | |
+ emmc_ctl |= (1 << CARD_IS_EMMC); | |
+ } else { | |
+ snps_phy_3_3v_init(host); | |
+ emmc_ctl &=~(1 << CARD_IS_EMMC); | |
+ } | |
+ sdhci_writeb(host, emmc_ctl, EMMC_CTRL_R); | |
+ sdhci_writeb(host, 0x25, PHY_DLL_CNFG1_R); | |
+} | |
/* | |
* If DMA addr spans 128MB boundary, we split the DMA transfer into two | |
* so that each DMA transfer doesn't exceed the boundary. | |
@@ -77,6 +337,8 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, | |
unsigned int timing) | |
{ | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); | |
u16 ctrl_2; | |
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | |
@@ -97,7 +359,45 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, | |
ctrl_2 |= SDHCI_CTRL_UHS_DDR50; | |
else if (timing == MMC_TIMING_MMC_HS400) | |
ctrl_2 |= DWCMSHC_CTRL_HS400; | |
+ | |
+ if (priv->io_fixed_1v8) | |
+ ctrl_2 |= SDHCI_CTRL_VDD_180; | |
+ | |
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | |
+ | |
+ if (timing == MMC_TIMING_MMC_HS400) { | |
+ // //disable delay lane | |
+ // sdhci_writeb(host, 1 << UPDATE_DC, PHY_SDCLKDL_CNFG_R); | |
+ // //set delay lane | |
+ // sdhci_writeb(host, delay_line, PHY_SDCLKDL_DC_R); | |
+ // //enable delay lane | |
+ // reg = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ // reg &= ~(1 << UPDATE_DC); | |
+ // sdhci_writeb(host, reg, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ //disable auto tuning | |
+ u32 reg = sdhci_readl(host, AT_CTRL_R); | |
+ reg &= ~1; | |
+ sdhci_writel(host, reg, AT_CTRL_R); | |
+ | |
+ delay_line = HS400_DELAY_LINE; | |
+ } else { | |
+ sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R); | |
+ } | |
+} | |
+ | |
+static unsigned int dwcmshc_pltfm_get_ro(struct sdhci_host *host) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); | |
+ int is_readonly; | |
+ | |
+ if (priv->wprtn_ignore) | |
+ return 0; | |
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) | |
+ & SDHCI_WRITE_PROTECT); | |
+ | |
+ return is_readonly; | |
} | |
static const struct sdhci_ops sdhci_dwcmshc_ops = { | |
@@ -105,8 +405,12 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = { | |
.set_bus_width = sdhci_set_bus_width, | |
.set_uhs_signaling = dwcmshc_set_uhs_signaling, | |
.get_max_clock = sdhci_pltfm_clk_get_max_clock, | |
- .reset = sdhci_reset, | |
+ .get_ro = dwcmshc_pltfm_get_ro, | |
+ .reset = snps_sdhci_reset, | |
.adma_write_desc = dwcmshc_adma_write_desc, | |
+ .voltage_switch = snps_phy_1_8v_init, | |
+ .platform_execute_tuning = &snps_execute_tuning, | |
+ | |
}; | |
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { | |
@@ -135,10 +439,36 @@ static int dwcmshc_probe(struct platform_device *pdev) | |
if (extra > SDHCI_MAX_SEGS) | |
extra = SDHCI_MAX_SEGS; | |
host->adma_table_cnt += extra; | |
+ host->v4_mode = true; | |
pltfm_host = sdhci_priv(host); | |
priv = sdhci_pltfm_priv(pltfm_host); | |
+ /*used fix sdhci reset error*/ | |
+ priv->soc_base = devm_platform_ioremap_resource(pdev, 1); | |
+ | |
+ if (device_property_present(&pdev->dev, "is_emmc")) { | |
+ priv->is_emmc_card = 1; | |
+ } else { | |
+ priv->is_emmc_card = 0; | |
+ } | |
+ | |
+ if (device_property_present(&pdev->dev, "pull_up")) { | |
+ priv->pull_up_en = 1; | |
+ } else { | |
+ priv->pull_up_en = 0; | |
+ } | |
+ | |
+ if (device_property_present(&pdev->dev, "io_fixed_1v8")) | |
+ priv->io_fixed_1v8 = true; | |
+ else | |
+ priv->io_fixed_1v8 = false; | |
+ | |
+ if (device_property_present(&pdev->dev, "wprtn_ignore")) | |
+ priv->wprtn_ignore = true; | |
+ else | |
+ priv->wprtn_ignore = false; | |
+ | |
pltfm_host->clk = devm_clk_get(&pdev->dev, "core"); | |
if (IS_ERR(pltfm_host->clk)) { | |
err = PTR_ERR(pltfm_host->clk); | |
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.h b/drivers/mmc/host/sdhci-of-dwcmshc.h | |
new file mode 100644 | |
index 000000000000..6a68ae8e7e9d | |
--- /dev/null | |
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.h | |
@@ -0,0 +1,75 @@ | |
+/* SPDX-License-Identifier: GPL-2.0 */ | |
+/* | |
+ * snps sdhci driver. | |
+ * | |
+ * Copyright (C) 2011 Renesas Solutions Corp. | |
+ */ | |
+ | |
+#ifndef _SDHCI_OF_DWCMSHC_H_ | |
+#define _SDHCI_OF_DWCMSHC_H_ | |
+ | |
+#define DWC_MSHC_PTR_PHY_R 0x300 | |
+#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) //32bit | |
+#define PHY_RSTN 0x0 //1bit | |
+#define PAD_SP 0x10 //4bit | |
+#define PAD_SN 0x14 //4bit | |
+ | |
+#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) //16bit | |
+#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06) //16bit | |
+#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08) //16bit | |
+#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a) //16bit | |
+#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c) //16bit | |
+#define RXSEL 0x0 //3bit | |
+#define WEAKPULL_EN 0x3 //2bit | |
+#define TXSLEW_CTRL_P 0x5 //4bit | |
+#define TXSLEW_CTRL_N 0x9 //4bit | |
+ | |
+#define PHY_PADTEST_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0e) | |
+#define PHY_PADTEST_OUT_R (DWC_MSHC_PTR_PHY_R + 0x10) | |
+#define PHY_PADTEST_IN_R (DWC_MSHC_PTR_PHY_R + 0x12) | |
+#define PHY_PRBS_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x18) | |
+#define PHY_PHYLBK_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1a) | |
+#define PHY_COMMDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1c) | |
+ | |
+#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) //8bit | |
+#define UPDATE_DC 0x4 //1bit | |
+ | |
+#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) | |
+#define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20) | |
+#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) | |
+#define INPSEL_CNFG 2 //2bit | |
+ | |
+#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) | |
+#define DLL_EN 0x0 //1bit | |
+ | |
+#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25) | |
+#define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26) | |
+#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28) | |
+#define SLV_INPSEL 0x5 //2bit | |
+ | |
+#define PHY_DLL_OFFST_R (DWC_MSHC_PTR_PHY_R + 0x29) | |
+#define PHY_DLLMST_TSTDC_R (DWC_MSHC_PTR_PHY_R + 0x2a) | |
+#define PHY_DLLBT_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x2c) | |
+#define PHY_DLL_STATUS_R (DWC_MSHC_PTR_PHY_R + 0x2e) | |
+#define PHY_DLLDBG_MLKDC_R (DWC_MSHC_PTR_PHY_R + 0x30) | |
+#define PHY_DLLDBG_SLKDC_R (DWC_MSHC_PTR_PHY_R + 0x32) | |
+ | |
+#define SNPS_SDHCI_CTRL_HS400 0x7 | |
+ | |
+#define P_VENDOR_SPECIFIC_AREA 0x500 | |
+#define EMMC_CTRL_R (P_VENDOR_SPECIFIC_AREA + 0x2c) //16bit | |
+#define CARD_IS_EMMC 0x0 //1bit | |
+ | |
+#define AT_CTRL_R (P_VENDOR_SPECIFIC_AREA + 0x40) // 32bit | |
+#define AT_EN 0x0 //1bit | |
+#define CI_SEL 0x1 //1bit | |
+#define SWIN_TH_EN 0x2 //1bit | |
+#define RPT_TUNE_ERR 0x3 //1bit | |
+#define SW_TUNE_EN 0x4 //1bit | |
+#define WIN_EDGE_SEL 0x8 //4bit | |
+#define TUNE_CLK_STOP_EN 0x10 //1bit | |
+#define PRE_CHANGE_DLY 0x11 //2bit | |
+#define POST_CHANGE_DLY 0x13 //2bit | |
+#define SWIN_TH_VAL 0x18 //9bit | |
+ | |
+#endif /* _SDHCI_OF_DWCMSHC_H_*/ | |
diff --git a/drivers/mmc/host/sdhci-of-light-mpw.c b/drivers/mmc/host/sdhci-of-light-mpw.c | |
new file mode 100644 | |
index 000000000000..92fa6d7aaada | |
--- /dev/null | |
+++ b/drivers/mmc/host/sdhci-of-light-mpw.c | |
@@ -0,0 +1,558 @@ | |
+// SPDX-License-Identifier: GPL-2.0 | |
+ | |
+#include <linux/clk.h> | |
+#include <linux/dma-mapping.h> | |
+#include <linux/kernel.h> | |
+#include <linux/module.h> | |
+#include <linux/of.h> | |
+#include <linux/sizes.h> | |
+ | |
+#include <linux/delay.h> | |
+ | |
+#include "sdhci-pltfm.h" | |
+ | |
+#define DWC_MSHC_PTR_PHY_R 0x300 | |
+#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) //32bit | |
+#define PHY_RSTN 0x0 //1bit | |
+#define PAD_SP 0x10 //4bit | |
+#define PAD_SN 0x14 //4bit | |
+ | |
+#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) //16bit | |
+#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06) //16bit | |
+#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08) //16bit | |
+#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a) //16bit | |
+#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c) //16bit | |
+#define RXSEL 0x0 //3bit | |
+#define WEAKPULL_EN 0x3 //2bit | |
+#define TXSLEW_CTRL_P 0x5 //4bit | |
+#define TXSLEW_CTRL_N 0x9 //4bit | |
+ | |
+#define PHY_PADTEST_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0e) | |
+#define PHY_PADTEST_OUT_R (DWC_MSHC_PTR_PHY_R + 0x10) | |
+#define PHY_PADTEST_IN_R (DWC_MSHC_PTR_PHY_R + 0x12) | |
+#define PHY_PRBS_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x18) | |
+#define PHY_PHYLBK_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1a) | |
+#define PHY_COMMDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1c) | |
+ | |
+#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) //8bit | |
+#define UPDATE_DC 0x4 //1bit | |
+ | |
+#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) | |
+#define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20) | |
+#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) | |
+#define INPSEL_CNFG 2 //2bit | |
+ | |
+#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) | |
+#define DLL_EN 0x0 //1bit | |
+ | |
+#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25) | |
+#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28) | |
+#define SLV_INPSEL 0x5 //2bit | |
+ | |
+#define PHY_DLL_OFFST_R (DWC_MSHC_PTR_PHY_R + 0x29) | |
+#define PHY_DLLMST_TSTDC_R (DWC_MSHC_PTR_PHY_R + 0x2a) | |
+#define PHY_DLLBT_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x2c) | |
+#define PHY_DLL_STATUS_R (DWC_MSHC_PTR_PHY_R + 0x2e) | |
+#define PHY_DLLDBG_MLKDC_R (DWC_MSHC_PTR_PHY_R + 0x30) | |
+#define PHY_DLLDBG_SLKDC_R (DWC_MSHC_PTR_PHY_R + 0x32) | |
+ | |
+#define SNPS_SDHCI_CTRL_HS400 0x7 | |
+ | |
+#define P_VENDOR_SPECIFIC_AREA 0x500 | |
+#define EMMC_CTRL_R (P_VENDOR_SPECIFIC_AREA + 0x2c) //16bit | |
+#define CARD_IS_EMMC 0x0 //1bit | |
+ | |
+#define AT_CTRL_R (P_VENDOR_SPECIFIC_AREA + 0x40) // 32bit | |
+#define AT_EN 0x0 //1bit | |
+#define CI_SEL 0x1 //1bit | |
+#define SWIN_TH_EN 0x2 //1bit | |
+#define RPT_TUNE_ERR 0x3 //1bit | |
+#define SW_TUNE_EN 0x4 //1bit | |
+#define WIN_EDGE_SEL 0x8 //4bit | |
+#define TUNE_CLK_STOP_EN 0x10 //1bit | |
+#define PRE_CHANGE_DLY 0x11 //2bit | |
+#define POST_CHANGE_DLY 0x13 //2bit | |
+#define SWIN_TH_VAL 0x18 //9bit | |
+/* DWCMSHC specific Mode Select value */ | |
+#define DWCMSHC_CTRL_HS400 0x7 | |
+ | |
+#define BOUNDARY_OK(addr, len) \ | |
+ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) | |
+ | |
+struct dwcmshc_priv { | |
+ struct clk *bus_clk; | |
+ void __iomem *soc_base; | |
+ bool is_emmc_card; | |
+ bool pull_up_en; | |
+}; | |
+ | |
+#define DELAY_LANE 30 | |
+ | |
+static void sdhci_phy_1_8v_init_no_pull(struct sdhci_host *host) | |
+{ | |
+ uint32_t val; | |
+ | |
+ sdhci_writel(host, 1, DWC_MSHC_PTR_PHY_R); | |
+ sdhci_writeb(host, 1 << 4, PHY_SDCLKDL_CNFG_R); | |
+ sdhci_writeb(host, 0x40, PHY_SDCLKDL_DC_R); | |
+ | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << 4); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_CMDPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_DATAPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_RSTNPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_STBPAD_CNFG_R); | |
+ sdhci_writew(host, val | 1, PHY_STBPAD_CNFG_R); | |
+ | |
+ val = sdhci_readb(host, PHY_DLL_CTRL_R); | |
+ sdhci_writeb(host, val | 1, PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void sdhci_phy_3_3v_init_no_pull(struct sdhci_host *host) | |
+{ | |
+ uint32_t val; | |
+ | |
+ sdhci_writel(host, 1, DWC_MSHC_PTR_PHY_R); | |
+ sdhci_writeb(host, 1 << 4, PHY_SDCLKDL_CNFG_R); | |
+ sdhci_writeb(host, 0x40, PHY_SDCLKDL_DC_R); | |
+ | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << 4); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_CMDPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_DATAPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_RSTNPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = sdhci_readw(host, PHY_STBPAD_CNFG_R); | |
+ sdhci_writew(host, val | 2, PHY_STBPAD_CNFG_R); | |
+ | |
+ val = sdhci_readb(host, PHY_DLL_CTRL_R); | |
+ sdhci_writeb(host, val | 1, PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void snps_phy_1_8v_init(struct sdhci_host *host) | |
+{ | |
+ u32 val; | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ if (priv->pull_up_en == 0) { | |
+ sdhci_phy_1_8v_init_no_pull(host); | |
+ return; | |
+ } | |
+ | |
+ //set driving force | |
+ sdhci_writel(host, (1 << PHY_RSTN) | (0xc << PAD_SP) | (0xc << PAD_SN), PHY_CNFG_R); | |
+ | |
+ //disable delay lane | |
+ sdhci_writeb(host, 1 << UPDATE_DC, PHY_SDCLKDL_CNFG_R); | |
+ //set delay lane | |
+ sdhci_writeb(host, DELAY_LANE, PHY_SDCLKDL_DC_R); | |
+ //enable delay lane | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << UPDATE_DC); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = (1 << RXSEL) | (1 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); | |
+ | |
+ val = (1 << RXSEL) | (2 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R); | |
+ | |
+ sdhci_writeb(host, (1 << DLL_EN), PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static void snps_phy_3_3v_init(struct sdhci_host *host) | |
+{ | |
+ u32 val; | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ if (priv->pull_up_en == 0) { | |
+ sdhci_phy_3_3v_init_no_pull(host); | |
+ return; | |
+ } | |
+ //set driving force | |
+ sdhci_writel(host, (1 << PHY_RSTN) | (0xc << PAD_SP) | (0xc << PAD_SN), PHY_CNFG_R); | |
+ | |
+ //disable delay lane | |
+ sdhci_writeb(host, 1 << UPDATE_DC, PHY_SDCLKDL_CNFG_R); | |
+ //set delay lane | |
+ sdhci_writeb(host, DELAY_LANE, PHY_SDCLKDL_DC_R); | |
+ //enable delay lane | |
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); | |
+ val &= ~(1 << UPDATE_DC); | |
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); | |
+ | |
+ val = (2 << RXSEL) | (1 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); | |
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); | |
+ | |
+ val = (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); | |
+ | |
+ val = (2 << RXSEL) | (2 << WEAKPULL_EN) | (3 << TXSLEW_CTRL_P) | (3 << TXSLEW_CTRL_N); | |
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R); | |
+ | |
+ sdhci_writeb(host, (1 << DLL_EN), PHY_DLL_CTRL_R); | |
+} | |
+ | |
+static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) | |
+{ | |
+ #define DW_SDHCI_TUNING_LOOP_COUNT 128 | |
+ int i; | |
+ /* | |
+ * Issue opcode repeatedly till Execute Tuning is set to 0 or the number | |
+ * of loops reaches tuning loop count. | |
+ */ | |
+ for (i = 0; i < DW_SDHCI_TUNING_LOOP_COUNT; i++) { | |
+ u16 ctrl; | |
+ | |
+ sdhci_send_tuning(host, opcode); | |
+ | |
+ if (!host->tuning_done) { | |
+ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", | |
+ mmc_hostname(host->mmc)); | |
+ sdhci_abort_tuning(host, opcode); | |
+ return -ETIMEDOUT; | |
+ } | |
+ | |
+ /* Spec does not require a delay between tuning cycles */ | |
+ if (host->tuning_delay > 0) | |
+ mdelay(host->tuning_delay); | |
+ | |
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); | |
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { | |
+ if (ctrl & SDHCI_CTRL_TUNED_CLK) | |
+ return 0; /* Success! */ | |
+ break; | |
+ } | |
+ } | |
+ | |
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", | |
+ mmc_hostname(host->mmc)); | |
+ sdhci_reset_tuning(host); | |
+ return -EAGAIN; | |
+} | |
+ | |
+static int snps_execute_tuning(struct sdhci_host *host, u32 opcode) | |
+{ | |
+ u32 val = 0; | |
+ | |
+ if (host->flags & SDHCI_HS400_TUNING) | |
+ return 0; | |
+ | |
+ sdhci_writeb(host, 3 << INPSEL_CNFG, PHY_ATDL_CNFG_R); | |
+ | |
+ val = sdhci_readl(host, AT_CTRL_R); | |
+ | |
+ val &= ~((1 << CI_SEL) | (1 << RPT_TUNE_ERR)\ | |
+ | (1 << SW_TUNE_EN) |(0xf << WIN_EDGE_SEL)); | |
+ val |= (1 << AT_EN) | (1 << SWIN_TH_EN) | (1 << TUNE_CLK_STOP_EN)\ | |
+ | (1 << PRE_CHANGE_DLY) | (3 << POST_CHANGE_DLY) | (9 << SWIN_TH_VAL); | |
+ | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ val = sdhci_readl(host, AT_CTRL_R); | |
+ if(!(val & (1 << AT_EN))) { | |
+ pr_err("*****Auto Tuning is NOT Enable!!!\n"); | |
+ return -1; | |
+ } | |
+ | |
+ val &= ~(1 << AT_EN); | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ | |
+ sdhci_start_tuning(host); | |
+ | |
+ host->tuning_err = __sdhci_execute_tuning(host, opcode); | |
+ if (host->tuning_err) { | |
+ val &= ~(1 << AT_EN); | |
+ sdhci_writel(host, val, AT_CTRL_R); | |
+ return -1; | |
+ } | |
+ | |
+ sdhci_end_tuning(host); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static void snps_sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) | |
+{ | |
+ u16 ctrl_2; | |
+ | |
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); | |
+ /* Select Bus Speed Mode for host */ | |
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; | |
+ if ((timing == MMC_TIMING_MMC_HS200) || | |
+ (timing == MMC_TIMING_UHS_SDR104)) { | |
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104; | |
+ } | |
+ else if (timing == MMC_TIMING_UHS_SDR12) | |
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12; | |
+ else if (timing == MMC_TIMING_UHS_SDR25) | |
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25; | |
+ else if (timing == MMC_TIMING_UHS_SDR50) | |
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50; | |
+ else if ((timing == MMC_TIMING_UHS_DDR50) || | |
+ (timing == MMC_TIMING_MMC_DDR52)) | |
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50; | |
+ else if (timing == MMC_TIMING_MMC_HS400) { | |
+ ctrl_2 |= SNPS_SDHCI_CTRL_HS400; /* Non-standard */ | |
+ } | |
+ | |
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | |
+ if (timing == MMC_TIMING_MMC_HS400) { | |
+ //disable auto tuning | |
+ u32 reg = sdhci_readl(host, AT_CTRL_R); | |
+ reg &= ~1; | |
+ sdhci_writel(host, reg, AT_CTRL_R); | |
+ //used ds clock | |
+ sdhci_writeb(host, 3 << SLV_INPSEL, PHY_DLLDL_CNFG_R); | |
+ } else { | |
+ sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R); | |
+ } | |
+} | |
+ | |
+static void snps_sdhci_reset(struct sdhci_host *host, u8 mask) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct dwcmshc_priv *priv; | |
+ u8 emmc_ctl; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ | |
+ /*host reset*/ | |
+ sdhci_reset(host, mask); | |
+ /*fix host reset error*/ | |
+ mdelay(100); | |
+ | |
+ emmc_ctl = sdhci_readw(host, EMMC_CTRL_R); | |
+ if (priv->is_emmc_card) { | |
+ snps_phy_1_8v_init(host); | |
+ emmc_ctl |= (1 << CARD_IS_EMMC); | |
+ } else { | |
+ snps_phy_3_3v_init(host); | |
+ emmc_ctl &=~(1 << CARD_IS_EMMC); | |
+ } | |
+ sdhci_writeb(host, emmc_ctl, EMMC_CTRL_R); | |
+ /*set i wait*/ | |
+ sdhci_writeb(host, 0x5, PHY_DLL_CNFG1_R); | |
+} | |
+ | |
+/* | |
+ * If DMA addr spans 128MB boundary, we split the DMA transfer into two | |
+ * so that each DMA transfer doesn't exceed the boundary. | |
+ */ | |
+static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc, | |
+ dma_addr_t addr, int len, unsigned int cmd) | |
+{ | |
+ int tmplen, offset; | |
+ | |
+ if (likely(!len || BOUNDARY_OK(addr, len))) { | |
+ sdhci_adma_write_desc(host, desc, addr, len, cmd); | |
+ return; | |
+ } | |
+ | |
+ offset = addr & (SZ_128M - 1); | |
+ tmplen = SZ_128M - offset; | |
+ sdhci_adma_write_desc(host, desc, addr, tmplen, cmd); | |
+ | |
+ addr += tmplen; | |
+ len -= tmplen; | |
+ sdhci_adma_write_desc(host, desc, addr, len, cmd); | |
+} | |
+ | |
+static const struct sdhci_ops sdhci_dwcmshc_lw_ops = { | |
+ .set_clock = sdhci_set_clock, | |
+ .set_bus_width = sdhci_set_bus_width, | |
+ .set_uhs_signaling = snps_sdhci_set_uhs_signaling, | |
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock, | |
+ .reset = snps_sdhci_reset, | |
+ .adma_write_desc = dwcmshc_adma_write_desc, | |
+ .voltage_switch = snps_phy_1_8v_init, | |
+ .platform_execute_tuning = &snps_execute_tuning, | |
+}; | |
+ | |
+static const struct sdhci_pltfm_data sdhci_dwcmshc_lw_pdata = { | |
+ .ops = &sdhci_dwcmshc_lw_ops, | |
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, | |
+}; | |
+ | |
+static int dwcmshc_probe(struct platform_device *pdev) | |
+{ | |
+ struct sdhci_pltfm_host *pltfm_host; | |
+ struct sdhci_host *host; | |
+ struct dwcmshc_priv *priv; | |
+ int err; | |
+ u32 extra; | |
+ | |
+ host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_lw_pdata, | |
+ sizeof(struct dwcmshc_priv)); | |
+ if (IS_ERR(host)) | |
+ return PTR_ERR(host); | |
+ | |
+ /* | |
+ * extra adma table cnt for cross 128M boundary handling. | |
+ */ | |
+ extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M); | |
+ if (extra > SDHCI_MAX_SEGS) | |
+ extra = SDHCI_MAX_SEGS; | |
+ host->adma_table_cnt += extra; | |
+ host->v4_mode = true; | |
+ | |
+ pltfm_host = sdhci_priv(host); | |
+ priv = sdhci_pltfm_priv(pltfm_host); | |
+ | |
+ /*used fix sdhci reset error*/ | |
+ priv->soc_base = devm_platform_ioremap_resource(pdev, 1); | |
+ | |
+ if (device_property_present(&pdev->dev, "is_emmc")) { | |
+ priv->is_emmc_card = 1; | |
+ } else { | |
+ priv->is_emmc_card = 0; | |
+ } | |
+ | |
+ if (device_property_present(&pdev->dev, "pull_up")) { | |
+ priv->pull_up_en = 1; | |
+ } else { | |
+ priv->pull_up_en = 0; | |
+ } | |
+ | |
+ priv->pull_up_en = 0; | |
+ | |
+ pltfm_host->clk = devm_clk_get(&pdev->dev, "core"); | |
+ if (IS_ERR(pltfm_host->clk)) { | |
+ err = PTR_ERR(pltfm_host->clk); | |
+ dev_err(&pdev->dev, "failed to get core clk: %d\n", err); | |
+ goto free_pltfm; | |
+ } | |
+ err = clk_prepare_enable(pltfm_host->clk); | |
+ if (err) | |
+ goto free_pltfm; | |
+ | |
+ priv->bus_clk = devm_clk_get(&pdev->dev, "bus"); | |
+ if (!IS_ERR(priv->bus_clk)) | |
+ clk_prepare_enable(priv->bus_clk); | |
+ | |
+ err = mmc_of_parse(host->mmc); | |
+ if (err) | |
+ goto err_clk; | |
+ | |
+ sdhci_get_of_property(pdev); | |
+ | |
+ err = sdhci_add_host(host); | |
+ if (err) | |
+ goto err_clk; | |
+ | |
+ return 0; | |
+ | |
+err_clk: | |
+ clk_disable_unprepare(pltfm_host->clk); | |
+ clk_disable_unprepare(priv->bus_clk); | |
+free_pltfm: | |
+ sdhci_pltfm_free(pdev); | |
+ return err; | |
+} | |
+ | |
+static int dwcmshc_remove(struct platform_device *pdev) | |
+{ | |
+ struct sdhci_host *host = platform_get_drvdata(pdev); | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); | |
+ | |
+ sdhci_remove_host(host, 0); | |
+ | |
+ clk_disable_unprepare(pltfm_host->clk); | |
+ clk_disable_unprepare(priv->bus_clk); | |
+ | |
+ sdhci_pltfm_free(pdev); | |
+ | |
+ return 0; | |
+} | |
+ | |
+#ifdef CONFIG_PM_SLEEP | |
+static int dwcmshc_suspend(struct device *dev) | |
+{ | |
+ struct sdhci_host *host = dev_get_drvdata(dev); | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); | |
+ int ret; | |
+ | |
+ ret = sdhci_suspend_host(host); | |
+ if (ret) | |
+ return ret; | |
+ | |
+ clk_disable_unprepare(pltfm_host->clk); | |
+ if (!IS_ERR(priv->bus_clk)) | |
+ clk_disable_unprepare(priv->bus_clk); | |
+ | |
+ return ret; | |
+} | |
+ | |
+static int dwcmshc_resume(struct device *dev) | |
+{ | |
+ struct sdhci_host *host = dev_get_drvdata(dev); | |
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | |
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); | |
+ int ret; | |
+ | |
+ ret = clk_prepare_enable(pltfm_host->clk); | |
+ if (ret) | |
+ return ret; | |
+ | |
+ if (!IS_ERR(priv->bus_clk)) { | |
+ ret = clk_prepare_enable(priv->bus_clk); | |
+ if (ret) | |
+ return ret; | |
+ } | |
+ | |
+ return sdhci_resume_host(host); | |
+} | |
+#endif | |
+ | |
+static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume); | |
+ | |
+static const struct of_device_id sdhci_dwcmshc_lw_dt_ids[] = { | |
+ { .compatible = "snps,dwcmshc-sdhci-light-mpw" }, | |
+ {} | |
+}; | |
+MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_lw_dt_ids); | |
+ | |
+static struct platform_driver sdhci_dwcmshc_lw_driver = { | |
+ .driver = { | |
+ .name = "sdhci-dwcmshc-light-mpw", | |
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS, | |
+ .of_match_table = sdhci_dwcmshc_lw_dt_ids, | |
+ .pm = &dwcmshc_pmops, | |
+ }, | |
+ .probe = dwcmshc_probe, | |
+ .remove = dwcmshc_remove, | |
+}; | |
+module_platform_driver(sdhci_dwcmshc_lw_driver); | |
+ | |
+MODULE_DESCRIPTION("SDHCI platform driver for Synopsys DWC MSHC light mpw"); | |
+MODULE_LICENSE("GPL v2"); | |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c | |
index d42e86cdff12..3d0f8f1e7eb9 100644 | |
--- a/drivers/mmc/host/sdhci.c | |
+++ b/drivers/mmc/host/sdhci.c | |
@@ -33,6 +33,8 @@ | |
#include <linux/mmc/sdio.h> | |
#include <linux/mmc/slot-gpio.h> | |
+#include <trace/hooks/mmc_core.h> | |
+ | |
#include "sdhci.h" | |
#define DRIVER_NAME "sdhci" | |
@@ -772,19 +774,7 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, | |
len -= offset; | |
} | |
- /* | |
- * The block layer forces a minimum segment size of PAGE_SIZE, | |
- * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write | |
- * multiple descriptors, noting that the ADMA table is sized | |
- * for 4KiB chunks anyway, so it will be big enough. | |
- */ | |
- while (len > host->max_adma) { | |
- int n = 32 * 1024; /* 32KiB*/ | |
- | |
- __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); | |
- addr += n; | |
- len -= n; | |
- } | |
+ BUG_ON(len > 65536); | |
/* tran, valid */ | |
if (len) | |
@@ -2418,6 +2408,7 @@ static int sdhci_get_cd(struct mmc_host *mmc) | |
{ | |
struct sdhci_host *host = mmc_priv(mmc); | |
int gpio_cd = mmc_gpio_get_cd(mmc); | |
+ bool allow = true; | |
if (host->flags & SDHCI_DEVICE_DEAD) | |
return 0; | |
@@ -2426,6 +2417,10 @@ static int sdhci_get_cd(struct mmc_host *mmc) | |
if (!mmc_card_is_removable(host->mmc)) | |
return 1; | |
+ trace_android_vh_sdhci_get_cd(host, &allow); | |
+ if (!allow) | |
+ return 0; | |
+ | |
/* | |
* Try slot gpio detect, if defined it take precedence | |
* over build in controller functionality | |
@@ -3220,7 +3215,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) | |
return; | |
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", | |
mmc_hostname(host->mmc), (unsigned)intmask); | |
- sdhci_dumpregs(host); | |
+ //sdhci_dumpregs(host); | |
return; | |
} | |
@@ -3350,7 +3345,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | |
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", | |
mmc_hostname(host->mmc), (unsigned)intmask); | |
- sdhci_dumpregs(host); | |
+ //sdhci_dumpregs(host); | |
return; | |
} | |
@@ -3960,7 +3955,6 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, | |
* descriptor for each segment, plus 1 for a nop end descriptor. | |
*/ | |
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; | |
- host->max_adma = 65536; | |
return host; | |
} | |
@@ -4624,12 +4618,10 @@ int sdhci_setup_host(struct sdhci_host *host) | |
* be larger than 64 KiB though. | |
*/ | |
if (host->flags & SDHCI_USE_ADMA) { | |
- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { | |
- host->max_adma = 65532; /* 32-bit alignment */ | |
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) | |
mmc->max_seg_size = 65535; | |
- } else { | |
+ else | |
mmc->max_seg_size = 65536; | |
- } | |
} else { | |
mmc->max_seg_size = mmc->max_req_size; | |
} | |
@@ -4762,7 +4754,6 @@ int __sdhci_add_host(struct sdhci_host *host) | |
free_irq(host->irq, host); | |
unwq: | |
destroy_workqueue(host->complete_wq); | |
- | |
return ret; | |
} | |
EXPORT_SYMBOL_GPL(__sdhci_add_host); | |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h | |
index 8b1650f37fbb..15ecf856a7c7 100644 | |
--- a/drivers/mmc/host/sdhci.h | |
+++ b/drivers/mmc/host/sdhci.h | |
@@ -16,6 +16,7 @@ | |
#include <linux/io.h> | |
#include <linux/leds.h> | |
#include <linux/interrupt.h> | |
+#include <linux/android_kabi.h> | |
#include <linux/mmc/host.h> | |
@@ -338,8 +339,7 @@ struct sdhci_adma2_64_desc { | |
/* | |
* Maximum segments assuming a 512KiB maximum requisition size and a minimum | |
- * 4KiB page size. Note this also allows enough for multiple descriptors in | |
- * case of PAGE_SIZE >= 64KiB. | |
+ * 4KiB page size. | |
*/ | |
#define SDHCI_MAX_SEGS 128 | |
@@ -541,7 +541,6 @@ struct sdhci_host { | |
unsigned int blocks; /* remaining PIO blocks */ | |
int sg_count; /* Mapped sg entries */ | |
- int max_adma; /* Max. length in ADMA descriptor */ | |
void *adma_table; /* ADMA descriptor table */ | |
void *align_buffer; /* Bounce buffer */ | |
@@ -608,6 +607,8 @@ struct sdhci_host { | |
u64 data_timeout; | |
+ ANDROID_KABI_RESERVE(1); | |
+ | |
unsigned long private[] ____cacheline_aligned; | |
}; | |
@@ -655,6 +656,8 @@ struct sdhci_ops { | |
void (*request_done)(struct sdhci_host *host, | |
struct mmc_request *mrq); | |
void (*dump_vendor_regs)(struct sdhci_host *host); | |
+ | |
+ ANDROID_KABI_RESERVE(1); | |
}; | |
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment