From 2f3bfda1caf3e90a254737cd63e0ce26d32a1eaf Mon Sep 17 00:00:00 2001 From: Ravi Kumar Siddojigari Date: Thu, 9 Jan 2020 16:49:20 +0530 Subject: [PATCH 01/90] selinux: move ibpkeys code under CONFIG_SECURITY_INFINIBAND Move cache based pkey sid retrieval code which was added with Commit "409dcf31" under CONFIG_SECURITY_INFINIBAND. As its going to alloc a new cache which impacts low ram devices which was enabled by default. Change-Id: I179a461386a09ebd58591fe835e3e0e0002db392 Suggested-by: Paul Moore Signed-off-by: Ravi Kumar Siddojigari Signed-off-by: vijay rayabarapu --- security/selinux/Makefile | 4 +++- security/selinux/include/ibpkey.h | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/security/selinux/Makefile b/security/selinux/Makefile index c7161f8792b2..ef0f184119b3 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SECURITY_SELINUX) := selinux.o selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \ - netnode.o netport.o ibpkey.o exports.o \ + netnode.o netport.o exports.o\ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o @@ -14,6 +14,8 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o selinux-$(CONFIG_NETLABEL) += netlabel.o +selinux-$(CONFIG_SECURITY_INFINIBAND) += ibpkey.o + ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h diff --git a/security/selinux/include/ibpkey.h b/security/selinux/include/ibpkey.h index b17a19e348e6..e3c2b12ac22a 100644 --- a/security/selinux/include/ibpkey.h +++ b/security/selinux/include/ibpkey.h @@ -24,8 +24,20 @@ #ifndef _SELINUX_IB_PKEY_H #define _SELINUX_IB_PKEY_H +#ifdef CONFIG_SECURITY_INFINIBAND void sel_ib_pkey_flush(void); int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid); +#else + +static inline void sel_ib_pkey_flush(void) { } + +static inline int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid) +{ + *sid = SECINITSID_UNLABELED; + return 0; +} +#endif + #endif From 12f394471006f689f61aab12eb3df5d9216bfae2 Mon Sep 17 00:00:00 2001 From: Sriharsha Allenki Date: Thu, 19 Mar 2020 12:18:17 +0530 Subject: [PATCH 02/90] usb: dwc3: gsi: Disable GSI wrapper on clearing run_stop Disable GSI wrapper on clearing run_stop to prevent unnecessary access by GSI wrapper because of the pending GEVNTCOUNT on the GSI interrupters. Change-Id: I8ee7fe25d33713487b74044a3a02deff0b7057aa Signed-off-by: Sriharsha Allenki --- drivers/usb/dwc3/dwc3-msm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index d5bbf4bc946f..07fb3c500763 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -2158,6 +2158,9 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, dwc3_msm_write_reg_field(mdwc->base, GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), BLOCK_GSI_WR_GO_MASK, true); + dwc3_msm_write_reg_field(mdwc->base, + GSI_GENERAL_CFG_REG(mdwc->gsi_reg), + GSI_EN_MASK, 0); break; default: dev_dbg(mdwc->dev, "unknown dwc3 event\n"); From 8b285aec080c019e60e1a4553f0e43c77b5ad996 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Fri, 17 Jan 2020 12:01:29 +0530 Subject: [PATCH 03/90] mmc: sdhci-msm: Add SDCC debug feature With latest release of HPG for SDCC, some debug features were added to get useful information at hardware level at time of any issue. This change adds a provision to enter/exit SDCC debug mode. And it dumps below debug info when SDHC encounters any issue: - fsm history of sdhc - newly added ADMA descriptor info. - sdcc iib history. And exits from auto-recovery disable upon dumping sdcc registers so that FSM would move back to idle state. And enable testbus bit to disable internal clk gating as per sdcc HPG to support debug feature. Change-Id: Ie490fa5cd255ee6ba8dbaa6d08075dfa17f25093 Signed-off-by: Ram Prakash Gupta --- drivers/mmc/host/sdhci-msm.c | 151 +++++++++++++++++++++++++++++++++++ drivers/mmc/host/sdhci-msm.h | 64 ++++++++++++++- drivers/mmc/host/sdhci.c | 18 +++++ drivers/mmc/host/sdhci.h | 2 + include/linux/mmc/host.h | 2 + 5 files changed, 236 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 491a67daf475..12a8b1e690c8 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1206,6 +1206,78 @@ static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode, drv_type); } +#define IPCAT_MINOR_MASK(val) ((val & 0x0fff0000) >> 0x10) + +/* Enter sdcc debug mode */ +void sdhci_msm_enter_dbg_mode(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + struct platform_device *pdev = msm_host->pdev; + u32 enable_dbg_feature = 0; + u32 minor; + + minor = IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr + + SDCC_IP_CATALOG)); + if (minor < 2 || msm_host->debug_mode_enabled) + return; + + /* Enable debug mode */ + writel_relaxed(ENABLE_DBG, + host->ioaddr + SDCC_TESTBUS_CONFIG); + writel_relaxed(DUMMY, + host->ioaddr + SDCC_DEBUG_EN_DIS_REG); + writel_relaxed((readl_relaxed(host->ioaddr + + SDCC_TESTBUS_CONFIG) | TESTBUS_EN), + host->ioaddr + SDCC_TESTBUS_CONFIG); + + if (minor >= 2) + enable_dbg_feature |= FSM_HISTORY | + AUTO_RECOVERY_DISABLE | + MM_TRIGGER_DISABLE | + IIB_EN; + + /* Enable particular feature */ + writel_relaxed((readl_relaxed(host->ioaddr + + SDCC_DEBUG_FEATURE_CFG_REG) | enable_dbg_feature), + host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG); + + /* Read back to ensure write went through */ + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FEATURE_CFG_REG); + msm_host->debug_mode_enabled = true; + + dev_info(&pdev->dev, "Debug feature enabled 0x%08x\n", + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FEATURE_CFG_REG)); +} + +/* Exit sdcc debug mode */ +void sdhci_msm_exit_dbg_mode(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + struct platform_device *pdev = msm_host->pdev; + u32 minor; + + minor = IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr + + SDCC_IP_CATALOG)); + if (minor < 2 || !msm_host->debug_mode_enabled) + return; + + /* Exit debug mode */ + writel_relaxed(DISABLE_DBG, + host->ioaddr + SDCC_TESTBUS_CONFIG); + writel_relaxed(DUMMY, + host->ioaddr + SDCC_DEBUG_EN_DIS_REG); + + msm_host->debug_mode_enabled = false; + + dev_dbg(&pdev->dev, "Debug feature disabled 0x%08x\n", + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FEATURE_CFG_REG)); +} + int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) { unsigned long flags; @@ -1240,6 +1312,7 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) */ if (msm_host->tuning_in_progress) return 0; + sdhci_msm_exit_dbg_mode(host); msm_host->tuning_in_progress = true; pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__); @@ -1433,6 +1506,7 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) kfree: kfree(data_buf); out: + sdhci_msm_enter_dbg_mode(host); spin_lock_irqsave(&host->lock, flags); if (!rc) msm_host->tuning_done = true; @@ -3957,6 +4031,73 @@ static void sdhci_msm_cqe_dump_debug_ram(struct sdhci_host *host) pr_err("-------------------------\n"); } +#define DUMP_FSM readl_relaxed(host->ioaddr + SDCC_DEBUG_FSM_TRACE_RD_REG) +#define MAX_FSM 16 + +void sdhci_msm_dump_fsm_history(struct sdhci_host *host) +{ + u32 sel_fsm; + + pr_err("----------- FSM REGISTER DUMP -----------\n"); + /* select fsm to dump */ + for (sel_fsm = 0; sel_fsm <= MAX_FSM; sel_fsm++) { + writel_relaxed(sel_fsm, host->ioaddr + + SDCC_DEBUG_FSM_TRACE_CFG_REG); + pr_err(": selected fsm is 0x%08x\n", + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_CFG_REG)); + /* dump selected fsm history */ + pr_err("0x%08x 0x%08x 0x%08x 0x%08x\n", + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG), + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG), + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG), + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG)); + pr_err("0x%08x 0x%08x 0x%08x\n", + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG), + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG), + readl_relaxed(host->ioaddr + + SDCC_DEBUG_FSM_TRACE_RD_REG)); + } + /* Flush all fsm history */ + writel_relaxed(DUMMY, host->ioaddr + + SDCC_DEBUG_FSM_TRACE_FIFO_FLUSH_REG); + + /* Exit from Auto recovery disable to move FSMs to idle state */ + writel_relaxed(DUMMY, host->ioaddr + + SDCC_DEBUG_ERROR_STATE_EXIT_REG); + +} + +void sdhci_msm_dump_desc_history(struct sdhci_host *host) +{ + pr_err("----------- DESC HISTORY DUMP -----------\n"); + pr_err("Current Desc Addr: 0x%08x | Info: 0x%08x\n", + readl_relaxed(host->ioaddr + SDCC_CURR_DESC_ADDR), + readl_relaxed(host->ioaddr + SDCC_CURR_DESC_INFO)); + pr_err("Processed Desc1 Addr: 0x%08x | Info: 0x%08x\n", + readl_relaxed(host->ioaddr + SDCC_PROC_DESC0_ADDR), + readl_relaxed(host->ioaddr + SDCC_PROC_DESC0_INFO)); + pr_err("Processed Desc2 Addr: 0x%08x | Info: 0x%08x\n", + readl_relaxed(host->ioaddr + SDCC_PROC_DESC1_ADDR), + readl_relaxed(host->ioaddr + SDCC_PROC_DESC1_INFO)); +} + +void sdhci_msm_dump_iib(struct sdhci_host *host) +{ + u32 iter; + + pr_err("----------- IIB HISTORY DUMP -----------\n"); + for (iter = 0; iter < 8; iter++) + pr_err("0x%08x\n", readl_relaxed(host->ioaddr + + SDCC_DEBUG_IIB_REG + (iter * 4))); +} + void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -4014,6 +4155,14 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) msm_host_offset->CORE_VENDOR_SPEC_FUNC2), readl_relaxed(host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3)); + + if (msm_host->debug_mode_enabled) { + sdhci_msm_dump_fsm_history(host); + sdhci_msm_dump_desc_history(host); + } + /* Debug feature enable not must for iib */ + sdhci_msm_dump_iib(host); + /* * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits * of CORE_TESTBUS_CONFIG register. @@ -4732,6 +4881,8 @@ static struct sdhci_ops sdhci_msm_ops = { .get_current_limit = sdhci_msm_get_current_limit, .notify_load = sdhci_msm_notify_load, .irq = sdhci_msm_cqe_irq, + .enter_dbg_mode = sdhci_msm_enter_dbg_mode, + .exit_dbg_mode = sdhci_msm_exit_dbg_mode, }; static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 87f651ce79de..b0abbadb835d 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * */ @@ -11,6 +11,67 @@ #include #include "sdhci-pltfm.h" +/* check IP CATALOG version */ +#define SDCC_IP_CATALOG 0x328 + +/* DBG register offsets */ +#define SDCC_TESTBUS_CONFIG 0x32C +#define SDCC_DEBUG_EN_DIS_REG 0x390 +#define SDCC_DEBUG_FEATURE_CFG_REG 0x394 +#define SDCC_DEBUG_FSM_TRACE_CFG_REG 0x398 +#define SDCC_DEBUG_FSM_TRACE_RD_REG 0x39C +#define SDCC_DEBUG_FSM_TRACE_FIFO_FLUSH_REG 0x3A0 +#define SDCC_DEBUG_PANIC_ERROR_EN_REG 0x3A4 +#define SDCC_DEBUG_ERROR_STATE_EXIT_REG 0x3B8 +#define SDCC_CURR_DESC_ADDR 0x3EC +#define SDCC_CURR_DESC_INFO 0x3F0 +#define SDCC_PROC_DESC0_ADDR 0x3E4 +#define SDCC_PROC_DESC0_INFO 0x3E8 +#define SDCC_PROC_DESC1_ADDR 0x3DC +#define SDCC_PROC_DESC1_INFO 0x3E0 +#define SDCC_DEBUG_IIB_REG 0x980 +#define SDCC_DEBUG_MASK_PATTERN_REG 0x3C0 +#define SDCC_DEBUG_MATCH_PATTERN_REG 0x3C4 +#define SDCC_DEBUG_MM_TB_CFG_REG 0x3BC + +#define ENABLE_DBG 0x35350000 +#define DISABLE_DBG 0x26260000 + +#define DUMMY 0x1 /* value doesn't matter */ + +/* Panic on Err */ +#define BOOT_ACK_REC_EN BIT(0) +#define BOOT_ACK_ERR_EN BIT(1) +#define BOOT_TIMEOUT_EN BIT(2) +#define AUTO_CMD19_TOUT_EN BIT(3) +#define STBITE_EN BIT(4) +#define CTOUT_EN BIT(5) +#define CCRCF_EN BIT(6) +#define CMD_END_BIT_ERR_EN BIT(7) +#define CMD_INDEX_ERR_EN BIT(8) +#define DTOUT_EN BIT(9) +#define DCRCF_EN BIT(10) +#define DATA_END_BIT_ERR_EN BIT(11) +#define CMDQ_HALT_ACK_INT_EN BIT(16) +#define CMDQ_TASK_COMPLETED_INT_EN BIT(17) +#define CMDQ_RESP_ERR_INT_EN BIT(18) +#define CMDQ_TASK_CLEARED_INT_EN BIT(19) +#define CMDQ_GENERAL_CRYPTO_ERROR_EN BIT(20) +#define CMDQ_INVALID_CRYPTO_CFG_ERROR_EN BIT(21) +#define CMDQ_DEVICE_EXCEPTION_INT_EN BIT(22) +#define ADMA_ERROR_EXT_EN BIT(23) +#define HC_NONCQ_ICE_INT_STATUS_MASKED_EN BIT(24) + +/* Select debug Feature */ +#define FSM_HISTORY BIT(0) +#define PANIC_ALERT BIT(1) +#define AUTO_RECOVERY_DISABLE BIT(2) +#define MM_TRIGGER_DISABLE BIT(3) +#define DESC_HISTORY BIT(4) +#define IIB_EN BIT(6) + +#define TESTBUS_EN BIT(31) + /* This structure keeps information per regulator */ struct sdhci_msm_reg_data { /* voltage regulator handle */ @@ -265,6 +326,7 @@ struct sdhci_msm_host { struct sdhci_msm_dll_hsr *dll_hsr; struct sdhci_msm_ice_data ice; u32 ice_clk_rate; + bool debug_mode_enabled; }; extern char *saved_command_line; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 47d31257dcbc..e7fd3ef5316f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2420,6 +2420,22 @@ static void sdhci_hw_reset(struct mmc_host *mmc) host->ops->hw_reset(host); } +static void sdhci_enter_dbg_mode(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (host->ops && host->ops->enter_dbg_mode) + host->ops->enter_dbg_mode(host); +} + +static void sdhci_exit_dbg_mode(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (host->ops && host->ops->exit_dbg_mode) + host->ops->exit_dbg_mode(host); +} + static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) { u16 ctrl = 0; @@ -2970,6 +2986,8 @@ static const struct mmc_host_ops sdhci_ops = { .get_cd = sdhci_get_cd, .get_ro = sdhci_get_ro, .hw_reset = sdhci_hw_reset, + .enter_dbg_mode = sdhci_enter_dbg_mode, + .exit_dbg_mode = sdhci_exit_dbg_mode, .enable_sdio_irq = sdhci_enable_sdio_irq, .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 278ed0f16836..63201bc0c151 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -745,6 +745,8 @@ struct sdhci_ops { void (*pre_req)(struct sdhci_host *host, struct mmc_request *req); void (*post_req)(struct sdhci_host *host, struct mmc_request *req); unsigned int (*get_current_limit)(struct sdhci_host *host); + void (*enter_dbg_mode)(struct sdhci_host *host); + void (*exit_dbg_mode)(struct sdhci_host *host); }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3387294fea8d..bc4a33dc46e7 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -199,6 +199,8 @@ struct mmc_host_ops { unsigned int max_dtr, int host_drv, int card_drv, int *drv_type); void (*hw_reset)(struct mmc_host *host); + void (*enter_dbg_mode)(struct mmc_host *host); + void (*exit_dbg_mode)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); /* From df55581ea3ed04b857ebdd4fd000c37ab1183b68 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Wed, 29 Jan 2020 09:45:06 +0530 Subject: [PATCH 04/90] mmc: core: Add core support for enabling debug feature Add core support for enabling sdcc debug feature. Change-Id: I9461000ba225736534d9d0e331bf37913485facd Signed-off-by: Ram Prakash Gupta --- drivers/mmc/core/bus.c | 2 ++ drivers/mmc/core/mmc.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index d405d974b9b2..de7bf65459dd 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -420,6 +420,8 @@ void mmc_remove_card(struct mmc_card *card) device_del(&card->dev); of_node_put(card->dev.of_node); } + if (host->ops->exit_dbg_mode) + host->ops->exit_dbg_mode(host); put_device(&card->dev); } diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index ec7eaafec14c..259d0bac47d7 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2304,6 +2304,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (!oldcard) host->card = card; + if (host->ops->enter_dbg_mode) + host->ops->enter_dbg_mode(host); return 0; From 17d86246cee2eb61b3aeb0d1e0a504148280acbf Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Fri, 17 Jan 2020 22:02:51 +0530 Subject: [PATCH 05/90] mmc: sdhci-msm: Add support for gcc hw reset for sdcc Add support for gcc hw reset for sdcc. Change-Id: I32c7fbfb1a97bb69924e1a0c6ea90a400dd35c43 Signed-off-by: Ram Prakash Gupta --- drivers/mmc/core/mmc.c | 5 +++ drivers/mmc/host/sdhci-msm.c | 80 ++++++++++++++++++++++++++++++++++-- drivers/mmc/host/sdhci-msm.h | 3 ++ 3 files changed, 85 insertions(+), 3 deletions(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 259d0bac47d7..d50f72530d57 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2909,6 +2909,11 @@ static int _mmc_hw_reset(struct mmc_host *host) /* If the card accept RST_n signal, send it. */ mmc_set_clock(host, host->f_init); host->ops->hw_reset(host); + /* + * Do a brute force power cycle as some controller do not + * have gpio support to power cycle card + */ + mmc_power_cycle(host, card->ocr); /* Set initial state and call mmc_set_ios */ mmc_set_initial_state(host); } else { diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 12a8b1e690c8..40af5ec58405 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2054,6 +2054,23 @@ static int sdhci_msm_dt_parse_hsr_info(struct device *dev, return ret; } +int sdhci_msm_parse_reset_data(struct device *dev, + struct sdhci_msm_host *msm_host) +{ + int ret = 0; + + msm_host->core_reset = devm_reset_control_get(dev, + "core_reset"); + if (IS_ERR(msm_host->core_reset)) { + ret = PTR_ERR(msm_host->core_reset); + dev_err(dev, "core_reset unavailable,err = %d\n", + ret); + msm_host->core_reset = NULL; + } + + return ret; +} + /* Parse platform data */ static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, @@ -2071,6 +2088,7 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW; int bus_clk_table_len; u32 *bus_clk_table = NULL; + int ret = 0; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -2224,6 +2242,9 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, if (sdhci_msm_dt_parse_hsr_info(dev, msm_host)) goto out; + ret = sdhci_msm_parse_reset_data(dev, msm_host); + if (ret) + dev_err(dev, "Reset data parsing error\n"); return pdata; out: @@ -3309,7 +3330,8 @@ static void sdhci_msm_registers_save(struct sdhci_host *host) const struct sdhci_msm_offset *msm_host_offset = msm_host->offset; - if (!msm_host->regs_restore.is_supported) + if (!msm_host->regs_restore.is_supported && + !msm_host->reg_store) return; msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr + @@ -3373,8 +3395,9 @@ static void sdhci_msm_registers_restore(struct sdhci_host *host) msm_host->offset; struct mmc_ios ios = host->mmc->ios; - if (!msm_host->regs_restore.is_supported || - !msm_host->regs_restore.is_valid) + if ((!msm_host->regs_restore.is_supported || + !msm_host->regs_restore.is_valid) && + !msm_host->reg_store) return; writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK); @@ -4854,6 +4877,54 @@ static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state) return 0; } +static void sdhci_msm_hw_reset(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + struct platform_device *pdev = msm_host->pdev; + int ret = -ENOTSUPP; + + if (!msm_host->core_reset) { + dev_err(&pdev->dev, "%s: failed, err = %d\n", __func__, + ret); + return; + } + + if (!msm_host->debug_mode_enabled) + return; + msm_host->reg_store = true; + sdhci_msm_exit_dbg_mode(host); + sdhci_msm_registers_save(host); + if (host->mmc->caps2 & MMC_CAP2_CQE) { + host->mmc->cqe_ops->cqe_disable(host->mmc); + host->mmc->cqe_enabled = false; + } + + ret = reset_control_assert(msm_host->core_reset); + if (ret) { + dev_err(&pdev->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(msm_host->core_reset); + if (ret) + dev_err(&pdev->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + sdhci_msm_registers_restore(host); + msm_host->reg_store = false; +out: + return; +} + static struct sdhci_ops sdhci_msm_ops = { .crypto_engine_cfg = sdhci_msm_ice_cfg, .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end, @@ -4883,6 +4954,7 @@ static struct sdhci_ops sdhci_msm_ops = { .irq = sdhci_msm_cqe_irq, .enter_dbg_mode = sdhci_msm_enter_dbg_mode, .exit_dbg_mode = sdhci_msm_exit_dbg_mode, + .hw_reset = sdhci_msm_hw_reset, }; static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, @@ -5462,6 +5534,8 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE; msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE; msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; + if (msm_host->core_reset) + msm_host->mmc->caps |= MMC_CAP_HW_RESET; if (msm_host->pdata->nonremovable) msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE; diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index b0abbadb835d..82b02bff0573 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -9,6 +9,7 @@ #include #include +#include #include "sdhci-pltfm.h" /* check IP CATALOG version */ @@ -327,6 +328,8 @@ struct sdhci_msm_host { struct sdhci_msm_ice_data ice; u32 ice_clk_rate; bool debug_mode_enabled; + bool reg_store; + struct reset_control *core_reset; }; extern char *saved_command_line; From 2cdebc19bc33cd4234010be0d36b842060d9cc4c Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Fri, 24 Jan 2020 11:39:01 +0530 Subject: [PATCH 06/90] mmc: sdhci: Skip interrupt clear in case of err Skip interrupt clear in case of err so that driver do not loose interrupt status before register dump. Change-Id: I7597536f0b5ac231b9a8955b2f6c301b910075af Signed-off-by: Ram Prakash Gupta --- drivers/mmc/host/sdhci-msm.c | 10 ++++++++++ drivers/mmc/host/sdhci.c | 3 +++ 2 files changed, 13 insertions(+) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 40af5ec58405..b6d9d8300d6d 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2335,11 +2335,21 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) { int cmd_error = 0; int data_error = 0; + u32 mask; if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) return intmask; cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + /* + * Clear selected interrupts in err case. + * as earlier driver skipped + */ + if (data_error || cmd_error) { + mask = intmask & host->cqe_ier; + sdhci_writel(host, mask, SDHCI_INT_STATUS); + } return 0; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index e7fd3ef5316f..b620ecd351c5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3989,9 +3989,12 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, *data_error = 0; /* Clear selected interrupts. */ + if (*data_error || *cmd_error) + goto skip_intr_clear; mask = intmask & host->cqe_ier; sdhci_writel(host, mask, SDHCI_INT_STATUS); +skip_intr_clear: if (intmask & SDHCI_INT_BUS_POWER) pr_err("%s: Card is consuming too much power!\n", mmc_hostname(host->mmc)); From c89523bdc764c0de4ed3687c17a57fe07829b370 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Fri, 24 Jan 2020 11:43:34 +0530 Subject: [PATCH 07/90] mmc: cqhci: Clear interrupt after err handling Clear interrupt after error handling in cqe. And also dump cqhci register in case of cqe error. Change-Id: I8bf1956f18de5e7d5b0a2cfe97983dc6f58c12b5 Signed-off-by: Ram Prakash Gupta --- drivers/mmc/host/cqhci.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index ae2482711422..779a98b17929 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -901,14 +901,21 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, struct cqhci_host *cq_host = mmc->cqe_private; status = cqhci_readl(cq_host, CQHCI_IS); - cqhci_writel(cq_host, status, CQHCI_IS); pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); mmc_log_string(mmc, "CQIS: 0x%x cmd_error : %d data_err: %d\n", status, cmd_error, data_error); - if ((status & CQHCI_IS_RED) || cmd_error || data_error) + if ((status & CQHCI_IS_RED) || cmd_error || data_error) { + pr_err("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d\n", + mmc_hostname(mmc), status, cmd_error, data_error); + cqhci_dumpregs(cq_host); + cqhci_writel(cq_host, status, CQHCI_IS); cqhci_error_irq(mmc, status, cmd_error, data_error); + } else { + /* Clear interrupt */ + cqhci_writel(cq_host, status, CQHCI_IS); + } if (status & CQHCI_IS_TCC) { /* read TCN and complete the request */ From 50c959aaacf8e2127e495cf68599966423d76185 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Fri, 24 Jan 2020 11:46:24 +0530 Subject: [PATCH 08/90] mmc: core: Resume clk scaling after hw reset Fix clock scaling in hw reset path to resume clock scaling after hw reset. Also hw reset during mmc card initialization is not required. This may unnecessarily add latency in mmc card initialization upon addition of hw reset recovery support. Change-Id: I0c3580b6b113d4d1afad5a9e24fe9fec4deea5bc Signed-off-by: Ram Prakash Gupta --- drivers/mmc/core/mmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index d50f72530d57..c90247cab06c 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2940,9 +2940,9 @@ static int _mmc_hw_reset(struct mmc_host *host) return ret; } - ret = mmc_suspend_clk_scaling(host); + ret = mmc_resume_clk_scaling(host); if (ret) { - pr_err("%s: %s: fail to suspend clock scaling (%d)\n", + pr_err("%s: %s: fail to resume clock scaling (%d)\n", mmc_hostname(host), __func__, ret); } return ret; From 29fc96a57e36a41f6446014b6753152e0bce9fa1 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Wed, 22 Jan 2020 19:00:40 +0530 Subject: [PATCH 09/90] mmc: core: Set reset done after hw reset Set hw reset flag as done after sdcc hw reset else hw reset as part of recovery happens only once. Change-Id: I14915ae26240769b45cec8d2793e5f8604062eef Signed-off-by: Ram Prakash Gupta --- drivers/mmc/core/block.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f1953c92c500..e37e2118d9b9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1543,8 +1543,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } From 7bdf603aa3f952bfa39d1ec147703bafb0a2d981 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Tue, 4 Dec 2018 13:59:44 -0700 Subject: [PATCH 10/90] msm: kgsl: Correctly handle oob and fenced write failures These both happen when gx is collapsed so its safe to assume that nothing is inflight. Take an inline snapshot and then request for a dispatcher based recovery. Change-Id: I4a3438660953d38406dd8e70868fb38172f9e967 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno.h | 3 ++- drivers/gpu/msm/adreno_a6xx_preempt.c | 9 +++++---- drivers/gpu/msm/adreno_ringbuffer.c | 16 +++++++++++----- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 670011e2ce78..aebd661788e7 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -1736,8 +1736,9 @@ static inline int adreno_perfcntr_active_oob_get(struct kgsl_device *device) if (!ret) { ret = gmu_core_dev_oob_set(device, oob_perfcntr); if (ret) { + gmu_core_snapshot(device); adreno_set_gpu_fault(ADRENO_DEVICE(device), - ADRENO_GMU_FAULT); + ADRENO_GMU_FAULT_SKIP_SNAPSHOT); adreno_dispatcher_schedule(device); kgsl_active_count_put(device); } diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index 8c44c4a44adc..065699886f81 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include "adreno.h" @@ -388,10 +388,11 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) return; err: - - /* If fenced write fails, set the fault and trigger recovery */ + /* If fenced write fails, take inline snapshot and trigger recovery */ + if (!in_interrupt()) + gmu_core_snapshot(device); adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT_SKIP_SNAPSHOT); adreno_dispatcher_schedule(device); /* Clear the keep alive */ if (gmu_core_isenabled(device)) diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 9cb9ec457c38..1a65634bc5bd 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. */ #include @@ -74,6 +74,7 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev, static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev, struct adreno_ringbuffer *rb) { + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); unsigned long flags; int ret = 0; @@ -85,7 +86,7 @@ static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev, * Let the pwrscale policy know that new commands have * been submitted. */ - kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev)); + kgsl_pwrscale_busy(device); /* * Ensure the write posted after a possible @@ -110,9 +111,14 @@ static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev, spin_unlock_irqrestore(&rb->preempt_lock, flags); if (ret) { - /* If WPTR update fails, set the fault and trigger recovery */ - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); - adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); + /* + * If WPTR update fails, take inline snapshot and trigger + * recovery. + */ + gmu_core_snapshot(device); + adreno_set_gpu_fault(adreno_dev, + ADRENO_GMU_FAULT_SKIP_SNAPSHOT); + adreno_dispatcher_schedule(device); } } From 959a48457cd9e4f057d94522ed0987d0e7575825 Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Sun, 15 Mar 2020 18:44:55 +0530 Subject: [PATCH 11/90] mmc: cqhci: Enable ICE error interrupts Enable general and invalid crypto error encounterd with command queue engine during data transfer. Change-Id: Idfaefcc44ece3144896619d6426bfc70eb4cc2cf Signed-off-by: Ram Prakash Gupta --- drivers/mmc/host/cqhci.c | 10 +++++++--- drivers/mmc/host/cqhci.h | 7 +++++-- include/linux/mmc/host.h | 1 + 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 779a98b17929..e9a52f13ebb7 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -790,8 +790,10 @@ static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, terri = cqhci_readl(cq_host, CQHCI_TERRI); - pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", + pr_err("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", mmc_hostname(mmc), status, cmd_error, data_error, terri); + mmc_log_string(mmc, "%s: cqhci: status:0x%08x TERRI:0x%08x\n", + mmc_hostname(mmc), status, terri); /* Forget about errors when recovery has already been triggered */ if (cq_host->recovery_halt) @@ -896,20 +898,22 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, int data_error) { - u32 status; + u32 status, ice_err; unsigned long tag = 0, comp_status; struct cqhci_host *cq_host = mmc->cqe_private; status = cqhci_readl(cq_host, CQHCI_IS); + ice_err = status & (CQHCI_IS_GCE | CQHCI_IS_ICCE); pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); mmc_log_string(mmc, "CQIS: 0x%x cmd_error : %d data_err: %d\n", status, cmd_error, data_error); - if ((status & CQHCI_IS_RED) || cmd_error || data_error) { + if ((status & CQHCI_IS_RED) || cmd_error || data_error || ice_err) { pr_err("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d\n", mmc_hostname(mmc), status, cmd_error, data_error); cqhci_dumpregs(cq_host); + mmc->need_hw_reset = true; cqhci_writel(cq_host, status, CQHCI_IS); cqhci_error_irq(mmc, status, cmd_error, data_error); } else { diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h index 6b4351788e70..024c81b6d8dc 100644 --- a/drivers/mmc/host/cqhci.h +++ b/drivers/mmc/host/cqhci.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -50,8 +50,11 @@ #define CQHCI_IS_TCC BIT(1) #define CQHCI_IS_RED BIT(2) #define CQHCI_IS_TCL BIT(3) +#define CQHCI_IS_GCE BIT(4) +#define CQHCI_IS_ICCE BIT(5) -#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED) +#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED | \ + CQHCI_IS_GCE | CQHCI_IS_ICCE) /* interrupt status enable */ #define CQHCI_ISTE 0x14 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index bc4a33dc46e7..cd5410d2c922 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -632,6 +632,7 @@ struct mmc_host { bool inlinecrypt_reset_needed; /* Inline crypto reset */ bool crash_on_err; /* crash the system on error */ + bool need_hw_reset; atomic_t active_reqs; unsigned long private[0] ____cacheline_aligned; }; From 4fd00d1e48f770261ab3a2ecea02fcbd1deaa9f0 Mon Sep 17 00:00:00 2001 From: Vipin Deep Kaur Date: Fri, 27 Mar 2020 17:27:26 +0530 Subject: [PATCH 12/90] i2c: i2c-qcom-geni: Correct the IEOB/IEOT sequence for I2C GSI tres Correct the IEOB/IEOT sequence of I2C GSI tres handling the functionality and memory leak cases. Change-Id: I001381bc1a35667b16b603854257e210d38723ff Signed-off-by: Vipin Deep Kaur --- drivers/dma/qcom/gpi.c | 103 +++++++++++++++++++++-------- drivers/i2c/busses/i2c-qcom-geni.c | 37 ++++++++--- 2 files changed, 104 insertions(+), 36 deletions(-) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 811e3090165d..22cbca7133fc 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -582,7 +582,7 @@ struct gpii { struct gpi_reg_table dbg_reg_table; bool reg_table_dump; u32 dbg_gpi_irq_cnt; - bool ieob_set; + bool unlock_tre_set; }; struct gpi_desc { @@ -1449,6 +1449,22 @@ static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan, client_info->cb_param); } +/* free gpi_desc for the specified channel */ +static void gpi_free_chan_desc(struct gpii_chan *gpii_chan) +{ + struct virt_dma_desc *vd; + struct gpi_desc *gpi_desc; + unsigned long flags; + + spin_lock_irqsave(&gpii_chan->vc.lock, flags); + vd = vchan_next_desc(&gpii_chan->vc); + gpi_desc = to_gpi_desc(vd); + list_del(&vd->node); + spin_unlock_irqrestore(&gpii_chan->vc.lock, flags); + kfree(gpi_desc); + gpi_desc = NULL; +} + /* process DMA Immediate completion data events */ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan, struct immediate_data_event *imed_event) @@ -1462,6 +1478,7 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan, struct msm_gpi_dma_async_tx_cb_param *tx_cb_param; unsigned long flags; u32 chid; + struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN]; /* * If channel not active don't process event but let @@ -1514,12 +1531,33 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan, /* make sure rp updates are immediately visible to all cores */ smp_wmb(); + /* + * If unlock tre is present, don't send transfer callback on + * on IEOT, wait for unlock IEOB. Free the respective channel + * descriptors. + * If unlock is not present, IEOB indicates freeing the descriptor + * and IEOT indicates channel transfer completion. + */ chid = imed_event->chid; - if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { - if (chid == GPI_RX_CHAN) - goto gpi_free_desc; - else + if (gpii->unlock_tre_set) { + if (chid == GPI_RX_CHAN) { + if (imed_event->code == MSM_GPI_TCE_EOT) + goto gpi_free_desc; + else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR) + /* + * In case of an error in a read transfer on a + * shared se, unlock tre will not be processed + * as channels go to bad state so tx desc should + * be freed manually. + */ + gpi_free_chan_desc(gpii_tx_chan); + else + return; + } else if (imed_event->code == MSM_GPI_TCE_EOT) { return; + } + } else if (imed_event->code == MSM_GPI_TCE_EOB) { + goto gpi_free_desc; } tx_cb_param = vd->tx.callback_param; @@ -1539,11 +1577,7 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan, } gpi_free_desc: - spin_lock_irqsave(&gpii_chan->vc.lock, flags); - list_del(&vd->node); - spin_unlock_irqrestore(&gpii_chan->vc.lock, flags); - kfree(gpi_desc); - gpi_desc = NULL; + gpi_free_chan_desc(gpii_chan); } /* processing transfer completion events */ @@ -1558,6 +1592,7 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan, struct gpi_desc *gpi_desc; unsigned long flags; u32 chid; + struct gpii_chan *gpii_tx_chan = &gpii->gpii_chan[GPI_TX_CHAN]; /* only process events on active channel */ if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) { @@ -1602,12 +1637,33 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan, /* update must be visible to other cores */ smp_wmb(); + /* + * If unlock tre is present, don't send transfer callback on + * on IEOT, wait for unlock IEOB. Free the respective channel + * descriptors. + * If unlock is not present, IEOB indicates freeing the descriptor + * and IEOT indicates channel transfer completion. + */ chid = compl_event->chid; - if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { - if (chid == GPI_RX_CHAN) - goto gpi_free_desc; - else + if (gpii->unlock_tre_set) { + if (chid == GPI_RX_CHAN) { + if (compl_event->code == MSM_GPI_TCE_EOT) + goto gpi_free_desc; + else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) + /* + * In case of an error in a read transfer on a + * shared se, unlock tre will not be processed + * as channels go to bad state so tx desc should + * be freed manually. + */ + gpi_free_chan_desc(gpii_tx_chan); + else + return; + } else if (compl_event->code == MSM_GPI_TCE_EOT) { return; + } + } else if (compl_event->code == MSM_GPI_TCE_EOB) { + goto gpi_free_desc; } tx_cb_param = vd->tx.callback_param; @@ -1623,11 +1679,7 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan, } gpi_free_desc: - spin_lock_irqsave(&gpii_chan->vc.lock, flags); - list_del(&vd->node); - spin_unlock_irqrestore(&gpii_chan->vc.lock, flags); - kfree(gpi_desc); - gpi_desc = NULL; + gpi_free_chan_desc(gpii_chan); } @@ -2325,7 +2377,7 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan, void *tre, *wp = NULL; const gfp_t gfp = GFP_ATOMIC; struct gpi_desc *gpi_desc; - gpii->ieob_set = false; + u32 tre_type; GPII_VERB(gpii, gpii_chan->chid, "enter\n"); @@ -2362,13 +2414,12 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan, for_each_sg(sgl, sg, sg_len, i) { tre = sg_virt(sg); - /* Check if last tre has ieob set */ + /* Check if last tre is an unlock tre */ if (i == sg_len - 1) { - if ((((struct msm_gpi_tre *)tre)->dword[3] & - GPI_IEOB_BMSK) >> GPI_IEOB_BMSK_SHIFT) - gpii->ieob_set = true; - else - gpii->ieob_set = false; + tre_type = + MSM_GPI_TRE_TYPE(((struct msm_gpi_tre *)tre)); + gpii->unlock_tre_set = + tre_type == MSM_GPI_TRE_UNLOCK ? true : false; } for (j = 0; j < sg->length; diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 862128de4c5c..f90553bf2069 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -405,12 +405,8 @@ static void gi2c_gsi_tx_cb(void *ptr) struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr; struct geni_i2c_dev *gi2c = tx_cb->userdata; - if (tx_cb->completion_code == MSM_GPI_TCE_EOB) { - complete(&gi2c->xfer); - } else if (!(gi2c->cur->flags & I2C_M_RD)) { - gi2c_gsi_cb_err(tx_cb, "TX"); - complete(&gi2c->xfer); - } + gi2c_gsi_cb_err(tx_cb, "TX"); + complete(&gi2c->xfer); } static void gi2c_gsi_rx_cb(void *ptr) @@ -480,7 +476,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2; lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1); - /* unlock */ + /* unlock tre: ieob set */ unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0; unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1; unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2; @@ -535,12 +531,14 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], segs++; sg_init_table(gi2c->tx_sg, segs); if (i == 0) + /* Send lock tre for first transfer in a msg */ sg_set_buf(&gi2c->tx_sg[index++], &gi2c->lock_t, sizeof(gi2c->lock_t)); } else { sg_init_table(gi2c->tx_sg, segs); } + /* Send cfg tre when cfg not sent already */ if (!gi2c->cfg_sent) { sg_set_buf(&gi2c->tx_sg[index++], &gi2c->cfg0_t, sizeof(gi2c->cfg0_t)); @@ -553,12 +551,21 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (msgs[i].flags & I2C_M_RD) { go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(msgs[i].len); - go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 0, - 0); + /* + * For Rx Go tre: Set ieob for non-shared se and for all + * but last transfer in shared se + */ + if (!gi2c->is_shared || (gi2c->is_shared && i != num-1)) + go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, + 0, 1, 0); + else + go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, + 0, 0, 0); } else { + /* For Tx Go tre: ieob is not set, chain bit is set */ go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(0); go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(0, 0, 0, 0, - 1); + 1); } sg_set_buf(&gi2c->tx_sg[index++], &gi2c->go_t, @@ -591,6 +598,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(gi2c->rx_ph); gi2c->rx_t.dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len); + /* Set ieot for all Rx/Tx DMA tres */ gi2c->rx_t.dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 0); @@ -641,6 +649,10 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], gi2c->tx_t.dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len); if (gi2c->is_shared && i == num-1) + /* + * For Tx: unlock tre is send for last transfer + * so set chain bit for last transfer DMA tre. + */ gi2c->tx_t.dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1); else @@ -652,6 +664,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], } if (gi2c->is_shared && i == num-1) { + /* Send unlock tre at the end of last transfer */ sg_set_buf(&gi2c->tx_sg[index++], &gi2c->unlock_t, sizeof(gi2c->unlock_t)); } @@ -689,6 +702,10 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], dmaengine_terminate_all(gi2c->tx_c); gi2c->cfg_sent = 0; } + if (gi2c->is_shared) + /* Resend cfg tre for every new message on shared se */ + gi2c->cfg_sent = 0; + if (msgs[i].flags & I2C_M_RD) geni_se_iommu_unmap_buf(rx_dev, &gi2c->rx_ph, msgs[i].len, DMA_FROM_DEVICE); From 331cbeb2054f3e253edb76e1e138d5a6e0efdef5 Mon Sep 17 00:00:00 2001 From: Vatsal Bucha Date: Thu, 12 Mar 2020 18:21:35 +0530 Subject: [PATCH 13/90] drivers: fsa4480: Fix USB re-enumeration failure after PDR USB re-enumeration failure is coming post PDR due to switch settings being reset in fsa unreg notifier. Reset switch settings only for audio adapter to resolve issue. Change-Id: I2462a2f1d3679adbd71dd1a7abd30726f7c68e6a Signed-off-by: Vatsal Bucha --- drivers/soc/qcom/fsa4480-i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c index dbd11fb3c22e..cfc61d9d42de 100644 --- a/drivers/soc/qcom/fsa4480-i2c.c +++ b/drivers/soc/qcom/fsa4480-i2c.c @@ -254,7 +254,7 @@ int fsa4480_unreg_notifier(struct notifier_block *nb, goto done; } /* Do not reset switch settings for usb digital hs */ - if (mode.intval != POWER_SUPPLY_TYPEC_SINK) + if (mode.intval == POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER) fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98); rc = blocking_notifier_chain_unregister (&fsa_priv->fsa4480_notifier, nb); From a2095f8bcfd9c64f9d9b09942055156fb43b1a88 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Wed, 1 Apr 2020 11:55:09 +0530 Subject: [PATCH 14/90] Revert "serial: msm_geni_serial: Change sequence for stop_rx" This reverts commit 9f39d8eba8501b4a9cc6aec29870cfcf34cf1c09. Change-Id: I879b3f68bad9f4c3e4d2207be251ee4189d6923b Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 106 +++++++++------------------ 1 file changed, 33 insertions(+), 73 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 1a2cd960db02..2358ba971160 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -195,7 +195,7 @@ static unsigned int msm_geni_serial_tx_empty(struct uart_port *port); static int msm_geni_serial_power_on(struct uart_port *uport); static void msm_geni_serial_power_off(struct uart_port *uport); static int msm_geni_serial_poll_bit(struct uart_port *uport, - int offset, u32 bit_field, bool set); + int offset, int bit_field, bool set); static void msm_geni_serial_stop_rx(struct uart_port *uport); static int msm_geni_serial_runtime_resume(struct device *dev); static int msm_geni_serial_runtime_suspend(struct device *dev); @@ -562,7 +562,7 @@ static void msm_geni_serial_power_off(struct uart_port *uport) } static int msm_geni_serial_poll_bit(struct uart_port *uport, - int offset, u32 bit_field, bool set) + int offset, int bit_field, bool set) { int iter = 0; unsigned int reg; @@ -572,7 +572,6 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport, unsigned int baud = 115200; unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS; unsigned long total_iter = 1000; - u32 set_bits = 0; if (uport->private_data && !uart_console(uport)) { @@ -589,8 +588,7 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport, while (iter < total_iter) { reg = geni_read_reg_nolog(uport->membase, offset); - set_bits = reg & bit_field; - cond = (set_bits == bit_field); + cond = reg & bit_field; if (cond == set) { met = true; break; @@ -650,6 +648,26 @@ static void msm_geni_serial_abort_rx(struct uart_port *uport) geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG); } +static void msm_geni_serial_complete_rx_eot(struct uart_port *uport) +{ + int poll_done = 0, tries = 0; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + + do { + poll_done = msm_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, + RX_EOT, true); + tries++; + } while (!poll_done && tries < 5); + + if (!poll_done) + IPC_LOG_MSG(port->ipc_log_misc, + "%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS), + geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0)); + else + geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR); +} + #ifdef CONFIG_CONSOLE_POLL static int msm_geni_serial_get_char(struct uart_port *uport) { @@ -1239,10 +1257,8 @@ static void stop_rx_sequencer(struct uart_port *uport) unsigned int geni_m_irq_en; unsigned int geni_status; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + u32 irq_clear = S_CMD_CANCEL_EN; bool done; - u32 geni_s_irq_status; - u32 dma_rx_irq_stat; - u32 DMA_RX_CANCEL_BIT; if (port->xfer_mode == FIFO_MODE) { geni_s_irq_en = geni_read_reg_nolog(uport->membase, @@ -1263,22 +1279,6 @@ static void stop_rx_sequencer(struct uart_port *uport) if (!(geni_status & S_GENI_CMD_ACTIVE)) goto exit_rx_seq; - /* - * Clear previous unhandled interrupts, because in VI experiment it's - * observed that previous interrupts bits can cause problem to - * successive commands given to sequencers. - * To do: Handle RX EOT bit properly by restarting the DMA engine. - */ - geni_s_irq_status = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_STATUS); - geni_write_reg_nolog(geni_s_irq_status, uport->membase, - SE_GENI_S_IRQ_CLEAR); - if (port->xfer_mode == SE_DMA) { - dma_rx_irq_stat = geni_read_reg_nolog(uport->membase, - SE_DMA_RX_IRQ_STAT); - geni_write_reg_nolog(dma_rx_irq_stat, - uport->membase, SE_DMA_RX_IRQ_CLR); - } IPC_LOG_MSG(port->ipc_log_misc, "%s: Start 0x%x\n", __func__, geni_status); @@ -1288,58 +1288,18 @@ static void stop_rx_sequencer(struct uart_port *uport) * cancel control bit. */ mb(); - if (port->xfer_mode == SE_DMA) { - /* - * QUPS which has HW version <= 1.2 11th bit of - * SE_DMA_RX_IRQ_STAT register denotes DMA_RX_CANCEL_BIT. - */ - DMA_RX_CANCEL_BIT = ((port->ver_info.hw_major_ver <= 1) - && (port->ver_info.hw_minor_ver <= 2)) ? - BIT(11) : BIT(14); + if (!uart_console(uport)) + msm_geni_serial_complete_rx_eot(uport); - done = msm_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, - DMA_RX_CANCEL_BIT | RX_EOT | RX_DMA_DONE, true); - if (done) { - dma_rx_irq_stat = geni_read_reg_nolog(uport->membase, - SE_DMA_RX_IRQ_STAT); - geni_write_reg_nolog(dma_rx_irq_stat, - uport->membase, SE_DMA_RX_IRQ_CLR); - geni_s_irq_status = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_STATUS); - geni_write_reg_nolog(geni_s_irq_status, uport->membase, + done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, + S_GENI_CMD_CANCEL, false); + if (done) { + geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR); - } else { - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); - dma_rx_irq_stat = geni_read_reg_nolog(uport->membase, - SE_DMA_RX_IRQ_STAT); - geni_s_irq_status = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_STATUS); - IPC_LOG_MSG(port->ipc_log_misc, - "%s: Cancel fail: dma_irq_sts: 0x%x s_irq_sts: 0x%x\n", - __func__, dma_rx_irq_stat, geni_s_irq_status); - geni_se_dump_dbg_regs(&port->serial_rsc, uport->membase, - port->ipc_log_misc); - } + goto exit_rx_seq; } else { - done = msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, - S_CMD_CANCEL_EN | S_CMD_DONE_EN, true); - if (done) { - geni_s_irq_status = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_STATUS); - geni_write_reg_nolog(geni_s_irq_status, uport->membase, - SE_GENI_S_IRQ_CLEAR); - } else { - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); - geni_s_irq_status = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_STATUS); - IPC_LOG_MSG(port->console_log, - "%s Cancel fail s_irq_status: 0x%x\n", __func__, - geni_s_irq_status); - geni_se_dump_dbg_regs(&port->serial_rsc, uport->membase, - port->console_log); - } + IPC_LOG_MSG(port->ipc_log_misc, "%s Cancel fail 0x%x\n", + __func__, geni_status); } geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); From 33587af80595cd99fe3310c3a01b0ecec5904ecc Mon Sep 17 00:00:00 2001 From: Ashok Vuyyuru Date: Fri, 24 May 2019 16:52:13 +0530 Subject: [PATCH 15/90] msm: ipa3: Update holb config on WLAN & USB CONS ep After configuring WLAN CONS ep or USB ep there is a chance of IPA hardware stall, if WLAN ep or USB ep is not pulling data fast enough. So, set holb on WLAN CONS ep and USB CONS ep to avoid stall. Change-Id: Ib2e1ffe31d1028d1b2326fd385ed5450160e8844 Signed-off-by: Ashok Vuyyuru Signed-off-by: Praveen Kurapati --- drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 22 +++++++++++++++----- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 2 ++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 36c18e5d0920..871b3d957de2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -61,11 +61,22 @@ int ipa3_enable_data_path(u32 clnt_hdl) * on other end from IPA hw. */ if ((ep->client == IPA_CLIENT_USB_DPL_CONS) || - (ep->client == IPA_CLIENT_MHI_DPL_CONS)) + (ep->client == IPA_CLIENT_MHI_DPL_CONS)) { + holb_cfg.tmr_val = 0; holb_cfg.en = IPA_HOLB_TMR_EN; - else + } else if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_2 || + ipa3_ctx->ipa_hw_type == IPA_HW_v4_7) && + (ep->client == IPA_CLIENT_WLAN1_CONS || + ep->client == IPA_CLIENT_USB_CONS)) { + holb_cfg.en = IPA_HOLB_TMR_EN; + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) + holb_cfg.tmr_val = IPA_HOLB_TMR_VAL; + else + holb_cfg.tmr_val = IPA_HOLB_TMR_VAL_4_5; + } else { holb_cfg.en = IPA_HOLB_TMR_DIS; - holb_cfg.tmr_val = 0; + holb_cfg.tmr_val = 0; + } res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); } @@ -1400,8 +1411,6 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) if (!ep->keep_ipa_awake) IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); - ipa3_disable_data_path(clnt_hdl); - if (!IPA_CLIENT_IS_CONS(ep->client)) { IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", clnt_hdl, ep->client); @@ -1425,6 +1434,9 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id) goto stop_chan_fail; } } + + ipa3_disable_data_path(clnt_hdl); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); IPADBG("exit\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 72a78dba465b..9cef059e4160 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -64,6 +64,8 @@ #define IPA_UC_WAII_MAX_SLEEP 1200 #define IPA_HOLB_TMR_DIS 0x0 #define IPA_HOLB_TMR_EN 0x1 +#define IPA_HOLB_TMR_VAL 65535 +#define IPA_HOLB_TMR_VAL_4_5 31 #define IPA_MPM_MAX_RING_LEN 64 #define IPA_MAX_TETH_AGGR_BYTE_LIMIT 24 #define IPA_MPM_MAX_UC_THRESH 4 From e84b70c11a6c9a3185fdb1a04a449842e22a190d Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Tue, 21 Apr 2020 00:31:47 +0530 Subject: [PATCH 16/90] cnss2: Add platform driver code to handle hang event data On SSR, FW writes hang data in the carved out region from existing DDR memory, the platform driver copies the hang event data from the fixed offset to local buffer and sends the buffer and length to host driver via CNSS_HANG_EVENT event. Change-Id: I5928821770086547b4fe17c30a967ea059b39394 Signed-off-by: Mohammed Siddiq --- drivers/net/wireless/cnss2/pci.c | 59 ++++++++++++++++++++++++++++++++ drivers/net/wireless/cnss2/pci.h | 2 ++ include/net/cnss2.h | 29 +++++++++++----- 3 files changed, 82 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 8dd5e90bcb81..9eabe3c33efe 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -74,6 +74,9 @@ static DEFINE_SPINLOCK(time_sync_lock); #define LINK_TRAINING_RETRY_MAX_TIMES 3 +#define HANG_DATA_LENGTH 384 +#define HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH) + static struct cnss_pci_reg ce_src[] = { { "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET }, { "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET }, @@ -3780,6 +3783,59 @@ static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv, cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size); } +int cnss_call_driver_uevent(struct cnss_pci_data *pci_priv, + enum cnss_driver_status status, void *data) +{ + struct cnss_uevent_data uevent_data; + struct cnss_wlan_driver *driver_ops; + + driver_ops = pci_priv->driver_ops; + if (!driver_ops || !driver_ops->update_event) { + cnss_pr_dbg("Hang event driver ops is NULL\n"); + return -EINVAL; + } + + cnss_pr_dbg("Calling driver uevent: %d\n", status); + + uevent_data.status = status; + uevent_data.data = data; + + return driver_ops->update_event(pci_priv->pci_dev, &uevent_data); +} + +static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + struct cnss_hang_event hang_event = {0}; + void *hang_data_va = NULL; + int i = 0; + + if (!fw_mem || !plat_priv->fw_mem_seg_len) + return; + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 && + fw_mem[i].va) { + hang_data_va = fw_mem[i].va + HANG_DATA_OFFSET; + hang_event.hang_event_data = kmemdup(hang_data_va, + HANG_DATA_LENGTH, + GFP_ATOMIC); + if (!hang_event.hang_event_data) { + cnss_pr_dbg("Hang data memory alloc failed\n"); + return; + } + hang_event.hang_event_data_len = HANG_DATA_LENGTH; + break; + } + } + + cnss_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event); + + kfree(hang_event.hang_event_data); + hang_event.hang_event_data = NULL; +} + void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; @@ -3791,6 +3847,9 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic) struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; int ret, i, j; + if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) + cnss_pci_send_hang_event(pci_priv); + if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) { cnss_pr_dbg("RAM dump is already collected, skip\n"); return; diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h index 5ffb1ffdbb75..2984273d9a42 100644 --- a/drivers/net/wireless/cnss2/pci.h +++ b/drivers/net/wireless/cnss2/pci.h @@ -200,6 +200,8 @@ void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv); void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv); int cnss_pci_update_status(struct cnss_pci_data *pci_priv, enum cnss_driver_status status); +int cnss_call_driver_uevent(struct cnss_pci_data *pci_priv, + enum cnss_driver_status status, void *data); int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv); int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv); int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv); diff --git a/include/net/cnss2.h b/include/net/cnss2.h index 78143ea5a701..eb37edc647f5 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -67,6 +67,25 @@ struct cnss_wlan_runtime_ops { int (*runtime_resume)(struct pci_dev *pdev); }; +enum cnss_driver_status { + CNSS_UNINITIALIZED, + CNSS_INITIALIZED, + CNSS_LOAD_UNLOAD, + CNSS_RECOVERY, + CNSS_FW_DOWN, + CNSS_HANG_EVENT, +}; + +struct cnss_hang_event { + void *hang_event_data; + u16 hang_event_data_len; +}; + +struct cnss_uevent_data { + enum cnss_driver_status status; + void *data; +}; + struct cnss_wlan_driver { char *name; int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id); @@ -83,18 +102,12 @@ struct cnss_wlan_driver { int (*resume_noirq)(struct pci_dev *pdev); void (*modem_status)(struct pci_dev *pdev, int state); void (*update_status)(struct pci_dev *pdev, uint32_t status); + int (*update_event)(struct pci_dev *pdev, + struct cnss_uevent_data *uevent); struct cnss_wlan_runtime_ops *runtime_ops; const struct pci_device_id *id_table; }; -enum cnss_driver_status { - CNSS_UNINITIALIZED, - CNSS_INITIALIZED, - CNSS_LOAD_UNLOAD, - CNSS_RECOVERY, - CNSS_FW_DOWN, -}; - struct cnss_ce_tgt_pipe_cfg { u32 pipe_num; u32 pipe_dir; From 734785553bbd49b8c7ff40f309b2726256279ed7 Mon Sep 17 00:00:00 2001 From: Da Hoon Pyun Date: Wed, 15 Apr 2020 15:33:44 -0400 Subject: [PATCH 17/90] msm: npu: Support cx ipeak limit management This change is to support cx ipeak limit management in order to allow sysmon to limit cx currrent by adjusting npu power level. Change-Id: I98cfdd1a27e13fc69151f077ffc38f50405b95c2 Signed-off-by: Da Hoon Pyun --- drivers/media/platform/msm/npu/npu_common.h | 4 +- drivers/media/platform/msm/npu/npu_dev.c | 195 +++++++++++++++++++- drivers/media/platform/msm/npu/npu_mgr.c | 6 + drivers/media/platform/msm/npu/npu_mgr.h | 2 +- 4 files changed, 203 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index 30afa4ab6439..f58e74b9140b 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _NPU_COMMON_H @@ -341,5 +341,5 @@ int load_fw(struct npu_device *npu_dev); int unload_fw(struct npu_device *npu_dev); int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab); int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt); - +int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable); #endif /* _NPU_COMMON_H */ diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index f31cead6a437..cf744f10fdfc 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "npu_common.h" @@ -111,7 +112,8 @@ static int npu_pm_suspend(struct device *dev); static int npu_pm_resume(struct device *dev); static int __init npu_init(void); static void __exit npu_exit(void); - +static uint32_t npu_notify_cdsprm_cxlimit_corner(struct npu_device *npu_dev, + uint32_t pwr_lvl); /* ------------------------------------------------------------------------- * File Scope Variables * ------------------------------------------------------------------------- @@ -387,6 +389,168 @@ static ssize_t boot_store(struct device *dev, * Power Related * ------------------------------------------------------------------------- */ +static enum npu_power_level cdsprm_corner_to_npu_power_level( + enum cdsprm_npu_corner corner) +{ + enum npu_power_level pwr_lvl = NPU_PWRLEVEL_TURBO_L1; + + switch (corner) { + case CDSPRM_NPU_CLK_OFF: + pwr_lvl = NPU_PWRLEVEL_OFF; + break; + case CDSPRM_NPU_MIN_SVS: + pwr_lvl = NPU_PWRLEVEL_MINSVS; + break; + case CDSPRM_NPU_LOW_SVS: + pwr_lvl = NPU_PWRLEVEL_LOWSVS; + break; + case CDSPRM_NPU_SVS: + pwr_lvl = NPU_PWRLEVEL_SVS; + break; + case CDSPRM_NPU_SVS_L1: + pwr_lvl = NPU_PWRLEVEL_SVS_L1; + break; + case CDSPRM_NPU_NOM: + pwr_lvl = NPU_PWRLEVEL_NOM; + break; + case CDSPRM_NPU_NOM_L1: + pwr_lvl = NPU_PWRLEVEL_NOM_L1; + break; + case CDSPRM_NPU_TURBO: + pwr_lvl = NPU_PWRLEVEL_TURBO; + break; + case CDSPRM_NPU_TURBO_L1: + default: + pwr_lvl = NPU_PWRLEVEL_TURBO_L1; + break; + } + + return pwr_lvl; +} + +static enum cdsprm_npu_corner npu_power_level_to_cdsprm_corner( + enum npu_power_level pwr_lvl) +{ + enum cdsprm_npu_corner corner = CDSPRM_NPU_MIN_SVS; + + switch (pwr_lvl) { + case NPU_PWRLEVEL_OFF: + corner = CDSPRM_NPU_CLK_OFF; + break; + case NPU_PWRLEVEL_MINSVS: + corner = CDSPRM_NPU_MIN_SVS; + break; + case NPU_PWRLEVEL_LOWSVS: + corner = CDSPRM_NPU_LOW_SVS; + break; + case NPU_PWRLEVEL_SVS: + corner = CDSPRM_NPU_SVS; + break; + case NPU_PWRLEVEL_SVS_L1: + corner = CDSPRM_NPU_SVS_L1; + break; + case NPU_PWRLEVEL_NOM: + corner = CDSPRM_NPU_NOM; + break; + case NPU_PWRLEVEL_NOM_L1: + corner = CDSPRM_NPU_NOM_L1; + break; + case NPU_PWRLEVEL_TURBO: + corner = CDSPRM_NPU_TURBO; + break; + case NPU_PWRLEVEL_TURBO_L1: + default: + corner = CDSPRM_NPU_TURBO_L1; + break; + } + + return corner; +} + +static int npu_set_cdsprm_corner_limit(enum cdsprm_npu_corner corner) +{ + struct npu_pwrctrl *pwr; + enum npu_power_level pwr_lvl; + + if (!g_npu_dev) + return 0; + + pwr = &g_npu_dev->pwrctrl; + pwr_lvl = cdsprm_corner_to_npu_power_level(corner); + pwr->cdsprm_pwrlevel = pwr_lvl; + NPU_DBG("power level from cdsp %d\n", pwr_lvl); + + return npu_set_power_level(g_npu_dev, false); +} + +const struct cdsprm_npu_limit_cbs cdsprm_npu_limit_cbs = { + .set_corner_limit = npu_set_cdsprm_corner_limit, +}; + +int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable) +{ + if (!npu_dev->cxlimit_registered) + return 0; + + NPU_DBG("notify cxlimit %s activity\n", enable ? "enable" : "disable"); + + return cdsprm_cxlimit_npu_activity_notify(enable ? 1 : 0); +} + +static uint32_t npu_notify_cdsprm_cxlimit_corner( + struct npu_device *npu_dev, uint32_t pwr_lvl) +{ + uint32_t corner, pwr_lvl_to_set; + + if (!npu_dev->cxlimit_registered) + return pwr_lvl; + + corner = npu_power_level_to_cdsprm_corner(pwr_lvl); + corner = cdsprm_cxlimit_npu_corner_notify(corner); + pwr_lvl_to_set = cdsprm_corner_to_npu_power_level(corner); + NPU_DBG("Notify cdsprm %d:%d\n", pwr_lvl, + pwr_lvl_to_set); + + return pwr_lvl_to_set; +} + +int npu_cdsprm_cxlimit_init(struct npu_device *npu_dev) +{ + bool enabled; + int ret = 0; + + enabled = of_property_read_bool(npu_dev->pdev->dev.of_node, + "qcom,npu-cxlimit-enable"); + NPU_DBG("qcom,npu-xclimit-enable is %s\n", enabled ? "true" : "false"); + + npu_dev->cxlimit_registered = false; + if (enabled) { + ret = cdsprm_cxlimit_npu_limit_register(&cdsprm_npu_limit_cbs); + if (ret) { + NPU_ERR("register cxlimit npu limit failed\n"); + } else { + NPU_DBG("register cxlimit npu limit succeeds\n"); + npu_dev->cxlimit_registered = true; + } + } + + return ret; +} + +int npu_cdsprm_cxlimit_deinit(struct npu_device *npu_dev) +{ + int ret = 0; + + if (npu_dev->cxlimit_registered) { + ret = cdsprm_cxlimit_npu_limit_deregister(); + if (ret) + NPU_ERR("deregister cxlimit npu limit failed\n"); + npu_dev->cxlimit_registered = false; + } + + return ret; +} + int npu_enable_core_power(struct npu_device *npu_dev) { struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; @@ -530,6 +694,11 @@ int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit) return 0; } + /* notify cxlimit to get allowed power level */ + if ((pwr_level_to_set > pwr->active_pwrlevel) && notify_cxlimit) + pwr_level_to_set = npu_notify_cdsprm_cxlimit_corner( + npu_dev, pwr_level_to_cdsprm); + pwr_level_to_set = min(pwr_level_to_set, npu_dev->pwrctrl.cdsprm_pwrlevel); @@ -596,6 +765,12 @@ int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit) ret = 0; } + if ((pwr_level_to_cdsprm < pwr->active_pwrlevel) && notify_cxlimit) { + npu_notify_cdsprm_cxlimit_corner(npu_dev, + pwr_level_to_cdsprm); + NPU_DBG("Notify cdsprm(post) %d\n", pwr_level_to_cdsprm); + } + pwr->active_pwrlevel = pwr_level_to_set; return ret; } @@ -708,6 +883,13 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) uint32_t pwrlevel_to_set, pwrlevel_idx; pwrlevel_to_set = pwr->active_pwrlevel; + if (!post_pil) { + pwrlevel_to_set = npu_notify_cdsprm_cxlimit_corner( + npu_dev, pwrlevel_to_set); + NPU_DBG("Notify cdsprm %d\n", pwrlevel_to_set); + pwr->active_pwrlevel = pwrlevel_to_set; + } + pwrlevel_idx = npu_power_level_to_index(npu_dev, pwrlevel_to_set); pwrlevel = &pwr->pwrlevels[pwrlevel_idx]; for (i = 0; i < npu_dev->core_clk_num; i++) { @@ -775,6 +957,11 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) int i, rc = 0; struct npu_clk *core_clks = npu_dev->core_clks; + if (!post_pil) { + npu_notify_cdsprm_cxlimit_corner(npu_dev, NPU_PWRLEVEL_OFF); + NPU_DBG("Notify cdsprm clock off\n"); + } + for (i = npu_dev->core_clk_num - 1; i >= 0 ; i--) { if (post_pil) { if (!npu_is_post_clock(core_clks[i].clk_name)) @@ -2459,10 +2646,15 @@ static int npu_probe(struct platform_device *pdev) thermal_cdev_update(tcdev); } + rc = npu_cdsprm_cxlimit_init(npu_dev); + if (rc) + goto error_driver_init; + g_npu_dev = npu_dev; return rc; error_driver_init: + npu_cdsprm_cxlimit_deinit(npu_dev); if (npu_dev->tcdev) thermal_cooling_device_unregister(npu_dev->tcdev); sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group); @@ -2487,6 +2679,7 @@ static int npu_remove(struct platform_device *pdev) npu_dev = platform_get_drvdata(pdev); npu_host_deinit(npu_dev); npu_debugfs_deinit(npu_dev); + npu_cdsprm_cxlimit_deinit(npu_dev); if (npu_dev->tcdev) thermal_cooling_device_unregister(npu_dev->tcdev); sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group); diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index e0b681beb11e..eb95adc395a6 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -2697,6 +2697,9 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, return -EINVAL; } + if (atomic_inc_return(&host_ctx->network_execute_cnt) == 1) + npu_notify_cdsprm_cxlimit_activity(npu_dev, true); + if (!network->is_active) { NPU_ERR("network is not active\n"); ret = -EINVAL; @@ -2847,6 +2850,9 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, host_error_hdlr(npu_dev, true); } + if (atomic_dec_return(&host_ctx->network_execute_cnt) == 0) + npu_notify_cdsprm_cxlimit_activity(npu_dev, false); + return ret; } diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index 5cc0e58b30ee..a3b636b32fc6 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -126,7 +126,7 @@ struct npu_host_ctx { uint32_t fw_dbg_mode; uint32_t exec_flags_override; atomic_t ipc_trans_id; - atomic_t network_exeute_cnt; + atomic_t network_execute_cnt; uint32_t err_irq_sts; uint32_t wdg_irq_sts; From 0efac21455942724b304e01d0684f22b36621bd1 Mon Sep 17 00:00:00 2001 From: Da Hoon Pyun Date: Wed, 15 Apr 2020 15:47:04 -0400 Subject: [PATCH 18/90] msm: npu: Send IPCC irq directly while apps crashes Before subsystem resets NPU to collect the dump, it will send force-stop notification to NPU via smp2p. But triggering the ipcc irq is done by the worker thread in npu driver which is not possible to run in this scenario. This change is to trigger this irq when system crash notification is received as a solution. Change-Id: Ifa3a420500f06d109d6c0ad4d652199fdf4a97b1 Signed-off-by: Da Hoon Pyun --- drivers/media/platform/msm/npu/npu_common.h | 2 ++ drivers/media/platform/msm/npu/npu_dev.c | 4 +++ drivers/media/platform/msm/npu/npu_mgr.c | 29 ++++++++++++++++++++- drivers/media/platform/msm/npu/npu_mgr.h | 2 ++ 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index f58e74b9140b..7618c6621bcd 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -342,4 +342,6 @@ int unload_fw(struct npu_device *npu_dev); int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab); int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt); int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable); +int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx, + struct npu_mbox *mbox, void *data); #endif /* _NPU_COMMON_H */ diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index cf744f10fdfc..f4af1c407895 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -2199,6 +2199,10 @@ static int npu_ipcc_bridge_mbox_send_data(struct mbox_chan *chan, void *data) queue_work(host_ctx->wq, &host_ctx->bridge_mbox_work); spin_unlock_irqrestore(&host_ctx->bridge_mbox_lock, flags); + if (host_ctx->app_crashed) + npu_bridge_mbox_send_data(host_ctx, + ipcc_mbox_chan->npu_mbox, NULL); + return 0; } diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index eb95adc395a6..876dc231f8eb 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -672,6 +672,25 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code, return ret; } +static int npu_panic_handler(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int i; + struct npu_host_ctx *host_ctx = + container_of(this, struct npu_host_ctx, panic_nb); + struct npu_device *npu_dev = host_ctx->npu_dev; + + NPU_INFO("Apps crashed\n"); + + for (i = 0; i < NPU_MAX_MBOX_NUM; i++) + if (npu_dev->mbox[i].send_data_pending) + npu_bridge_mbox_send_data(host_ctx, + &npu_dev->mbox[i], NULL); + + host_ctx->app_crashed = true; + return NOTIFY_DONE; +} + static void npu_update_pwr_work(struct work_struct *work) { int ret; @@ -724,6 +743,14 @@ int npu_host_init(struct npu_device *npu_dev) goto fail; } + host_ctx->panic_nb.notifier_call = npu_panic_handler; + ret = atomic_notifier_chain_register(&panic_notifier_list, + &host_ctx->panic_nb); + if (ret) { + NPU_ERR("register panic notifier failed\n"); + goto fail; + } + host_ctx->wq = create_workqueue("npu_general_wq"); host_ctx->wq_pri = alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0); @@ -1104,7 +1131,7 @@ static void npu_disable_fw_work(struct work_struct *work) NPU_DBG("Exit disable fw work\n"); } -static int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx, +int npu_bridge_mbox_send_data(struct npu_host_ctx *host_ctx, struct npu_mbox *mbox, void *data) { NPU_DBG("Generating IRQ for client_id: %u; signal_id: %u\n", diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index a3b636b32fc6..e44fb38d827b 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -132,7 +132,9 @@ struct npu_host_ctx { uint32_t wdg_irq_sts; bool fw_error; bool cancel_work; + bool app_crashed; struct notifier_block nb; + struct notifier_block panic_nb; void *notif_hdle; spinlock_t bridge_mbox_lock; bool bridge_mbox_pwr_on; From bda8855af594be95aee96f80fb9ddc0494b81b14 Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Mon, 13 Apr 2020 12:13:28 +0530 Subject: [PATCH 19/90] smcinvoke: Remove workqueue has sleeper If invoke calls are made before any accept threads then its possible that workqueue has sleeper will return false. So CB failures are seen during TA loading. Change-Id: Id3b3a1907456c59bd8d51355dbdcba30a5c1a79a Signed-off-by: AnilKumar Chimata --- drivers/soc/qcom/smcinvoke.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c index dd416360fba1..b42a6e176f42 100644 --- a/drivers/soc/qcom/smcinvoke.c +++ b/drivers/soc/qcom/smcinvoke.c @@ -986,12 +986,11 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp) * we need not worry that server_info will be deleted because as long * as this CBObj is served by this server, srvr_info will be valid. */ - if (wq_has_sleeper(&srvr_info->req_wait_q)) { - wake_up_interruptible_all(&srvr_info->req_wait_q); - ret = wait_event_interruptible(srvr_info->rsp_wait_q, - (cb_txn->state == SMCINVOKE_REQ_PROCESSED) || - (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)); - } + wake_up_interruptible_all(&srvr_info->req_wait_q); + ret = wait_event_interruptible(srvr_info->rsp_wait_q, + (cb_txn->state == SMCINVOKE_REQ_PROCESSED) || + (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)); + out: /* * we could be here because of either: a. Req is PROCESSED From 724ad7dbc10b66830f5ed573a2d464d771830b57 Mon Sep 17 00:00:00 2001 From: Naveen Yadav Date: Fri, 10 Apr 2020 14:59:01 +0530 Subject: [PATCH 20/90] clk: qcom: gdsc-regulator: Add support to skip GDSC disable Add check to skip gdsc disable when disable call comes before the gdsc enable. In case if the gdsc is enabled by entity external to HLOS then skip disable call. Change-Id: I14ba8a24fb4403a1805cba23967bb22a82ac6e7f Signed-off-by: Naveen Yadav --- drivers/clk/qcom/gdsc-regulator.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c index 1b95e826738e..e16a9e5baad6 100644 --- a/drivers/clk/qcom/gdsc-regulator.c +++ b/drivers/clk/qcom/gdsc-regulator.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -74,6 +74,7 @@ struct gdsc { int reset_count; int root_clk_idx; u32 gds_timeout; + bool skip_disable_before_enable; }; enum gdscr_status { @@ -366,6 +367,7 @@ static int gdsc_enable(struct regulator_dev *rdev) clk_disable_unprepare(sc->clocks[sc->root_clk_idx]); sc->is_gdsc_enabled = true; + sc->skip_disable_before_enable = false; end: if (ret && sc->bus_handle) { msm_bus_scale_client_update_request(sc->bus_handle, 0); @@ -384,6 +386,16 @@ static int gdsc_disable(struct regulator_dev *rdev) uint32_t regval; int i, ret = 0; + /* + * Protect GDSC against late_init disabling when the GDSC is enabled + * by an entity outside external to HLOS. + */ + if (sc->skip_disable_before_enable) { + dev_dbg(&rdev->dev, "Skip Disabling: %s\n", sc->rdesc.name); + sc->skip_disable_before_enable = false; + return 0; + } + if (sc->force_root_en) clk_prepare_enable(sc->clocks[sc->root_clk_idx]); @@ -670,6 +682,8 @@ static int gdsc_parse_dt_data(struct gdsc *sc, struct device *dev, "qcom,no-status-check-on-disable"); sc->retain_ff_enable = of_property_read_bool(dev->of_node, "qcom,retain-regs"); + sc->skip_disable_before_enable = of_property_read_bool(dev->of_node, + "qcom,skip-disable-before-sw-enable"); sc->toggle_logic = !of_property_read_bool(dev->of_node, "qcom,skip-logic-collapse"); From 0fe730ab99665715de222db0eeedf192fa7e8853 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Mon, 13 Apr 2020 18:04:44 +0530 Subject: [PATCH 21/90] serial: msm_geni_serial: Initialize 'ret' with zero Initialize the local variable 'ret' with zero to prevent un-initialized access. Also fix uninitialized uport pointers access from msm_geni_serial_probe(). Change-Id: I25198737992f24f7b06e77e68f29ba3263fb0b30 Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 1a2cd960db02..4c3aa93ad835 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -1534,7 +1534,7 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); unsigned int rx_bytes = 0; struct tty_port *tport; - int ret; + int ret = 0; unsigned int geni_status; geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); @@ -2935,13 +2935,10 @@ static int msm_geni_serial_probe(struct platform_device *pdev) if (ret) goto exit_geni_serial_probe; - IPC_LOG_MSG(dev_port->ipc_log_misc, "%s: port:%s irq:%d\n", __func__, - uport->name, uport->irq); - return uart_add_one_port(drv, uport); + ret = uart_add_one_port(drv, uport); exit_geni_serial_probe: - IPC_LOG_MSG(dev_port->ipc_log_misc, "%s: fail port:%s ret:%d\n", - __func__, uport->name, ret); + IPC_LOG_MSG(dev_port->ipc_log_misc, "%s: ret:%d\n", __func__, ret); return ret; } From 12b64d1ea6767df3773b082ac5ffe9f60bddcf4c Mon Sep 17 00:00:00 2001 From: Deepak Kumar Singh Date: Tue, 7 Apr 2020 16:58:06 +0530 Subject: [PATCH 22/90] soc: qcom: smem: map only partitions used by local HOST SMEM driver is IO mapping complete region and CPU is doing a speculative read into a partition where local HOST does not have permission resulting in a NOC error. Map only those partitions which are accessibly to local HOST. CRs-Fixed: 2647368 Change-Id: If03b21f99642db861ea5d549cac9342b77ad3d43 Signed-off-by: Deepak Kumar Singh --- drivers/soc/qcom/smem.c | 234 +++++++++++++++++++++++++++++----------- 1 file changed, 173 insertions(+), 61 deletions(-) diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 7888648f0b33..b5638df21344 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2013, 2018-2020 The Linux Foundation. All rights reserved. */ #include @@ -192,6 +192,19 @@ struct smem_partition_header { __le32 offset_free_cached; __le32 reserved[3]; }; +/** + * struct smem_partition_desc - descriptor for partition + * @virt_base: starting virtual address of partition + * @phys_base: starting physical address of partition + * @cacheline: alignment for "cached" entries + * @size: size of partition + */ +struct smem_partition_desc { + void __iomem *virt_base; + u32 phys_base; + u32 cacheline; + u32 size; +}; static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; @@ -248,9 +261,9 @@ struct smem_region { * struct qcom_smem - device data for the smem device * @dev: device pointer * @hwlock: reference to a hwspinlock - * @global_partition_entry: pointer to global partition entry when in use - * @ptable_entries: list of pointers to partitions table entry of current - * processor/host + * @ptable_base: virtual base of partition table + * @global_partition_desc: descriptor for global partition when in use + * @partition_desc: list of partition descriptor of current processor/host * @item_count: max accepted item number * @num_regions: number of @regions * @regions: list of the memory regions defining the shared memory @@ -260,9 +273,10 @@ struct qcom_smem { struct hwspinlock *hwlock; - struct smem_ptable_entry *global_partition_entry; - struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT]; u32 item_count; + struct smem_ptable *ptable_base; + struct smem_partition_desc global_partition_desc; + struct smem_partition_desc partition_desc[SMEM_HOST_COUNT]; unsigned num_regions; struct smem_region regions[0]; @@ -274,12 +288,6 @@ static struct qcom_smem *__smem; /* Timeout (ms) for the trylock of remote spinlocks */ #define HWSPINLOCK_TIMEOUT 1000 -static struct smem_partition_header * -ptable_entry_to_phdr(struct smem_ptable_entry *entry) -{ - return __smem->regions[0].virt_base + le32_to_cpu(entry->offset); -} - static struct smem_private_entry * phdr_to_last_uncached_entry(struct smem_partition_header *phdr) { @@ -346,7 +354,7 @@ static void *cached_entry_to_item(struct smem_private_entry *e) } static int qcom_smem_alloc_private(struct qcom_smem *smem, - struct smem_ptable_entry *entry, + struct smem_partition_desc *p_desc, unsigned item, size_t size) { @@ -356,8 +364,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, void *cached; void *p_end; - phdr = ptable_entry_to_phdr(entry); - p_end = (void *)phdr + le32_to_cpu(entry->size); + phdr = p_desc->virt_base; + p_end = (void *)phdr + p_desc->size; hdr = phdr_to_first_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr); @@ -450,7 +458,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, */ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) { - struct smem_ptable_entry *entry; + struct smem_partition_desc *p_desc; unsigned long flags; int ret; @@ -472,12 +480,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) if (ret) return ret; - if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) { - entry = __smem->ptable_entries[host]; - ret = qcom_smem_alloc_private(__smem, entry, item, size); - } else if (__smem->global_partition_entry) { - entry = __smem->global_partition_entry; - ret = qcom_smem_alloc_private(__smem, entry, item, size); + if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) { + p_desc = &__smem->partition_desc[host]; + ret = qcom_smem_alloc_private(__smem, p_desc, item, size); + } else if (__smem->global_partition_desc.virt_base) { + p_desc = &__smem->global_partition_desc; + ret = qcom_smem_alloc_private(__smem, p_desc, item, size); } else { ret = qcom_smem_alloc_global(__smem, item, size); } @@ -528,22 +536,20 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, } static void *qcom_smem_get_private(struct qcom_smem *smem, - struct smem_ptable_entry *entry, + struct smem_partition_desc *p_desc, unsigned item, size_t *size) { struct smem_private_entry *e, *end; struct smem_partition_header *phdr; void *item_ptr, *p_end; - u32 partition_size; size_t cacheline; u32 padding_data; u32 e_size; - phdr = ptable_entry_to_phdr(entry); - partition_size = le32_to_cpu(entry->size); - p_end = (void *)phdr + partition_size; - cacheline = le32_to_cpu(entry->cacheline); + phdr = p_desc->virt_base; + p_end = (void *)phdr + p_desc->size; + cacheline = p_desc->cacheline; e = phdr_to_first_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr); @@ -560,7 +566,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, e_size = le32_to_cpu(e->size); padding_data = le16_to_cpu(e->padding_data); - if (e_size < partition_size + if (e_size < p_desc->size && padding_data < e_size) *size = e_size - padding_data; else @@ -596,7 +602,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, e_size = le32_to_cpu(e->size); padding_data = le16_to_cpu(e->padding_data); - if (e_size < partition_size + if (e_size < p_desc->size && padding_data < e_size) *size = e_size - padding_data; else @@ -635,7 +641,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, */ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) { - struct smem_ptable_entry *entry; + struct smem_partition_desc *p_desc; unsigned long flags; int ret; void *ptr = ERR_PTR(-EPROBE_DEFER); @@ -652,12 +658,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) if (ret) return ERR_PTR(ret); - if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) { - entry = __smem->ptable_entries[host]; - ptr = qcom_smem_get_private(__smem, entry, item, size); - } else if (__smem->global_partition_entry) { - entry = __smem->global_partition_entry; - ptr = qcom_smem_get_private(__smem, entry, item, size); + if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) { + p_desc = &__smem->partition_desc[host]; + ptr = qcom_smem_get_private(__smem, p_desc, item, size); + } else if (__smem->global_partition_desc.virt_base) { + p_desc = &__smem->global_partition_desc; + ptr = qcom_smem_get_private(__smem, p_desc, item, size); } else { ptr = qcom_smem_get_global(__smem, item, size); } @@ -679,30 +685,30 @@ EXPORT_SYMBOL(qcom_smem_get); int qcom_smem_get_free_space(unsigned host) { struct smem_partition_header *phdr; - struct smem_ptable_entry *entry; + struct smem_partition_desc *p_desc; struct smem_header *header; unsigned ret; if (!__smem) return -EPROBE_DEFER; - if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) { - entry = __smem->ptable_entries[host]; - phdr = ptable_entry_to_phdr(entry); + if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) { + p_desc = &__smem->partition_desc[host]; + phdr = p_desc->virt_base; ret = le32_to_cpu(phdr->offset_free_cached) - le32_to_cpu(phdr->offset_free_uncached); - if (ret > le32_to_cpu(entry->size)) + if (ret > p_desc->size) return -EINVAL; - } else if (__smem->global_partition_entry) { - entry = __smem->global_partition_entry; - phdr = ptable_entry_to_phdr(entry); + } else if (__smem->global_partition_desc.virt_base) { + p_desc = &__smem->global_partition_desc; + phdr = p_desc->virt_base; ret = le32_to_cpu(phdr->offset_free_cached) - le32_to_cpu(phdr->offset_free_uncached); - if (ret > le32_to_cpu(entry->size)) + if (ret > p_desc->size) return -EINVAL; } else { header = __smem->regions[0].virt_base; @@ -716,6 +722,15 @@ int qcom_smem_get_free_space(unsigned host) } EXPORT_SYMBOL(qcom_smem_get_free_space); +static int addr_in_range(void *virt_base, unsigned int size, void *addr) +{ + if (virt_base && addr >= virt_base && + addr < virt_base + size) + return 1; + + return 0; +} + /** * qcom_smem_virt_to_phys() - return the physical address associated * with an smem item pointer (previously returned by qcom_smem_get() @@ -725,17 +740,36 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); */ phys_addr_t qcom_smem_virt_to_phys(void *p) { - unsigned i; + struct smem_partition_desc *p_desc; + struct smem_region *area; + u64 offset; + u32 i; + + for (i = 0; i < SMEM_HOST_COUNT; i++) { + p_desc = &__smem->partition_desc[i]; + + if (addr_in_range(p_desc->virt_base, p_desc->size, p)) { + offset = p - p_desc->virt_base; + + return (phys_addr_t)p_desc->phys_base + offset; + } + } + + p_desc = &__smem->global_partition_desc; + + if (addr_in_range(p_desc->virt_base, p_desc->size, p)) { + offset = p - p_desc->virt_base; + + return (phys_addr_t)p_desc->phys_base + offset; + } for (i = 0; i < __smem->num_regions; i++) { - struct smem_region *region = &__smem->regions[i]; + area = &__smem->regions[i]; - if (p < region->virt_base) - continue; - if (p < region->virt_base + region->size) { - u64 offset = p - region->virt_base; + if (addr_in_range(area->virt_base, area->size, p)) { + offset = p - area->virt_base; - return (phys_addr_t)region->aux_base + offset; + return (phys_addr_t)area->aux_base + offset; } } @@ -759,7 +793,7 @@ static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) struct smem_ptable *ptable; u32 version; - ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; + ptable = smem->ptable_base; if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) return ERR_PTR(-ENOENT); @@ -793,11 +827,12 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem) struct smem_partition_header *header; struct smem_ptable_entry *entry; struct smem_ptable *ptable; + u32 phys_addr; u32 host0, host1, size; bool found = false; int i; - if (smem->global_partition_entry) { + if (smem->global_partition_desc.virt_base) { dev_err(smem->dev, "Already found the global partition\n"); return -EINVAL; } @@ -827,7 +862,12 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem) return -EINVAL; } - header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); + phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); + header = devm_ioremap_wc(smem->dev, + phys_addr, le32_to_cpu(entry->size)); + if (!header) + return -ENOMEM; + host0 = le16_to_cpu(header->host0); host1 = le16_to_cpu(header->host1); @@ -853,7 +893,10 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem) return -EINVAL; } - smem->global_partition_entry = entry; + smem->global_partition_desc.virt_base = (void __iomem *)header; + smem->global_partition_desc.phys_base = phys_addr; + smem->global_partition_desc.size = le32_to_cpu(entry->size); + smem->global_partition_desc.cacheline = le32_to_cpu(entry->cacheline); return 0; } @@ -864,6 +907,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, struct smem_partition_header *header; struct smem_ptable_entry *entry; struct smem_ptable *ptable; + u32 phys_addr; unsigned int remote_host; u32 host0, host1; int i; @@ -898,14 +942,20 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, return -EINVAL; } - if (smem->ptable_entries[remote_host]) { + if (smem->partition_desc[remote_host].virt_base) { dev_err(smem->dev, "Already found a partition for host %d\n", remote_host); return -EINVAL; } - header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); + phys_addr = smem->regions[0].aux_base + + le32_to_cpu(entry->offset); + header = devm_ioremap_wc(smem->dev, + phys_addr, le32_to_cpu(entry->size)); + if (!header) + return -ENOMEM; + host0 = le16_to_cpu(header->host0); host1 = le16_to_cpu(header->host1); @@ -940,7 +990,13 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, return -EINVAL; } - smem->ptable_entries[remote_host] = entry; + smem->partition_desc[remote_host].virt_base = + (void __iomem *)header; + smem->partition_desc[remote_host].phys_base = phys_addr; + smem->partition_desc[remote_host].size = + le32_to_cpu(entry->size); + smem->partition_desc[remote_host].cacheline = + le32_to_cpu(entry->cacheline); } return 0; @@ -973,6 +1029,61 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, return 0; } +static int qcom_smem_map_toc(struct qcom_smem *smem, struct device *dev, + const char *name, int i) +{ + struct device_node *np; + struct resource r; + int ret; + + np = of_parse_phandle(dev->of_node, name, 0); + if (!np) { + dev_err(dev, "No %s specified\n", name); + return -EINVAL; + } + + ret = of_address_to_resource(np, 0, &r); + of_node_put(np); + if (ret) + return ret; + + smem->regions[i].aux_base = (u32)r.start; + smem->regions[i].size = resource_size(&r); + /* map starting 4K for smem header */ + smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, SZ_4K); + /* map last 4k for toc */ + smem->ptable_base = devm_ioremap_wc(dev, + r.start + resource_size(&r) - SZ_4K, SZ_4K); + + if (!smem->regions[i].virt_base || !smem->ptable_base) + return -ENOMEM; + + return 0; +} + +static int qcom_smem_mamp_legacy(struct qcom_smem *smem) +{ + struct smem_header *header; + u32 phys_addr; + u32 p_size; + + phys_addr = smem->regions[0].aux_base; + header = smem->regions[0].virt_base; + p_size = header->available; + + /* unmap previously mapped starting 4k for smem header */ + devm_iounmap(smem->dev, smem->regions[0].virt_base); + + smem->regions[0].size = p_size; + smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, + phys_addr, p_size); + + if (!smem->regions[0].virt_base) + return -ENOMEM; + + return 0; +} + static int qcom_smem_probe(struct platform_device *pdev) { struct smem_header *header; @@ -995,7 +1106,7 @@ static int qcom_smem_probe(struct platform_device *pdev) smem->dev = &pdev->dev; smem->num_regions = num_regions; - ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); + ret = qcom_smem_map_toc(smem, &pdev->dev, "memory-region", 0); if (ret) return ret; @@ -1019,6 +1130,7 @@ static int qcom_smem_probe(struct platform_device *pdev) smem->item_count = qcom_smem_get_item_count(smem); break; case SMEM_GLOBAL_HEAP_VERSION: + qcom_smem_mamp_legacy(smem); smem->item_count = SMEM_ITEM_COUNT; break; default: From c544f0cc2e485a14e2ed9e19ab475779457f4646 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Thu, 26 Mar 2020 18:52:48 +0530 Subject: [PATCH 23/90] icnss: Avoid setting of ICNSS_WLFW_EXISTS bit twice Change to avoid setting of ICNSS_WLFW_EXISTS bit twice. Change-Id: Ibc518739e0a7b4a207941e8ca4a483d20cd86510 Signed-off-by: Sandeep Singh --- drivers/soc/qcom/icnss_qmi.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c index 26aaefe5ad70..c955911276e7 100644 --- a/drivers/soc/qcom/icnss_qmi.c +++ b/drivers/soc/qcom/icnss_qmi.c @@ -1232,7 +1232,6 @@ int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data) ret = -ENODEV; goto out; } - set_bit(ICNSS_WLFW_EXISTS, &priv->state); sq.sq_family = AF_QIPCRTR; sq.sq_node = event_data->node; From 3594179bdfd85151dfb01f9d4d6d7db8b2404b6f Mon Sep 17 00:00:00 2001 From: Aditya Raut Date: Wed, 18 Mar 2020 23:26:49 +0530 Subject: [PATCH 24/90] usb: gadget: increase write buffer size Increasing the write buffer size from 2k to 50k. Change-Id: Ifac500581404467b08b06de0d9b8cd90c4afd0e5 Signed-off-by: Aditya Raut --- drivers/usb/gadget/function/f_cdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index 3bbf33bf6793..62a8b73c955a 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011, 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2013-2020, The Linux Foundation. All rights reserved. * Linux Foundation chooses to take subject only to the GPLv2 license terms, * and distributes only under these terms. * @@ -49,7 +49,7 @@ #define BRIDGE_RX_QUEUE_SIZE 8 #define BRIDGE_RX_BUF_SIZE 2048 #define BRIDGE_TX_QUEUE_SIZE 8 -#define BRIDGE_TX_BUF_SIZE 2048 +#define BRIDGE_TX_BUF_SIZE (50 * 1024) #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ From fdcd6e1f3cc4e8c949c7c14705be9e2ee1d3e189 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Fri, 17 Apr 2020 15:48:58 -0700 Subject: [PATCH 25/90] mhi: core: Make sure reg_write_q stores visible to other cores mhi_reg_write_enqueue API stores reg, val and valid which can go out of order. In case valid is set to true before val is set, offload worker running on another core ends up writing stale value to register. Another possibility is valid being set to true but not visible to other cores. When offload worker gets a chance to run, this results into skipping register write. Fix these issues by adding smp_wmb() between stores of val and valid and another after valid is set. Change-Id: I3b930e7fad4252d34386de525491f94997b34f36 Signed-off-by: Hemant Kumar --- drivers/bus/mhi/core/mhi_main.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 6a858fa59d67..594284a45aac 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -118,7 +118,21 @@ static void mhi_reg_write_enqueue(struct mhi_controller *mhi_cntrl, mhi_cntrl->reg_write_q[q_index].reg_addr = reg_addr; mhi_cntrl->reg_write_q[q_index].val = val; + + /* + * prevent reordering to make sure val is set before valid is set to + * true. This prevents offload worker running on another core to write + * stale value to register with valid set to true. + */ + smp_wmb(); + mhi_cntrl->reg_write_q[q_index].valid = true; + + /* + * make sure valid value is visible to other cores to prevent offload + * worker from skipping the reg write. + */ + smp_wmb(); } void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl, From 5c12e86789cae8e80a8f9c56727df2ce1ce47bde Mon Sep 17 00:00:00 2001 From: Zhenhua Huang Date: Sat, 18 Apr 2020 22:43:59 +0800 Subject: [PATCH 26/90] ion: Improve ION allocation paths Clean up some of the ION heap interfaces so that they are more inline/uniform with respect to each other. Change-Id: I4edabc2c8ccb533898540ceda1fd6aacc2e2e56a Signed-off-by: Isaac J. Manjarres Signed-off-by: Zhenhua Huang --- drivers/staging/android/ion/ion_cma_heap.c | 18 +++++++++++++++--- drivers/staging/android/ion/ion_secure_util.c | 7 ++++++- drivers/staging/android/ion/ion_secure_util.h | 4 +++- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index e236c713d033..2591a451e20a 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -3,7 +3,7 @@ * Copyright (C) Linaro 2012 * Author: for ST-Ericsson. * - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ #include @@ -25,6 +25,11 @@ struct ion_cma_heap { #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) +static bool ion_heap_is_cma_heap_type(enum ion_heap_type type) +{ + return type == ION_HEAP_TYPE_DMA; +} + /* ION CMA heap operations functions */ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, @@ -39,6 +44,13 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, int ret; struct device *dev = heap->priv; + if (ion_heap_is_cma_heap_type(buffer->heap->type) && + is_secure_allocation(buffer->flags)) { + pr_err("%s: CMA heap doesn't support secure allocations\n", + __func__); + return -EINVAL; + } + if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; @@ -46,7 +58,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, if (!pages) return -ENOMEM; - if (!(flags & ION_FLAG_SECURE)) { + if (hlos_accessible_buffer(buffer)) { if (PageHighMem(pages)) { unsigned long nr_clear_pages = nr_pages; struct page *page = pages; @@ -65,7 +77,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, } if (MAKE_ION_ALLOC_DMA_READY || - (flags & ION_FLAG_SECURE) || + (!hlos_accessible_buffer(buffer)) || (!ion_buffer_cached(buffer))) ion_pages_sync_for_device(dev, pages, size, DMA_BIDIRECTIONAL); diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c index c0b4c4d45c55..4cbf2ca3f1b5 100644 --- a/drivers/staging/android/ion/ion_secure_util.c +++ b/drivers/staging/android/ion/ion_secure_util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #include @@ -24,6 +24,11 @@ bool is_secure_vmid_valid(int vmid) vmid == VMID_CP_CDSP); } +bool is_secure_allocation(unsigned long flags) +{ + return !!(flags & (ION_FLAGS_CP_MASK | ION_FLAG_SECURE)); +} + int get_secure_vmid(unsigned long flags) { if (flags & ION_FLAG_CP_TOUCH) diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h index bd525e50e86f..97d75550a151 100644 --- a/drivers/staging/android/ion/ion_secure_util.h +++ b/drivers/staging/android/ion/ion_secure_util.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018,2020, The Linux Foundation. All rights reserved. */ #include "ion.h" @@ -23,4 +23,6 @@ int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags); bool hlos_accessible_buffer(struct ion_buffer *buffer); +bool is_secure_allocation(unsigned long flags); + #endif /* _ION_SECURE_UTIL_H */ From a50eb798fd3b0ebf769a4bdf7fc9656cb09165ad Mon Sep 17 00:00:00 2001 From: "Tao,Zhang" Date: Fri, 10 Apr 2020 16:21:59 +0800 Subject: [PATCH 27/90] coresight: tmc: Set flush cti for both etr and etb Set flush cti for both etr and etb. Set FlushandStop bit to default value of FFCR in enablement function. Config the correct trigger number of flush and reset cti for both etr and etb. Change-Id: Idee3802e5a69aca521a6259f9936ddb8361a87d4 Signed-off-by: Tao,Zhang --- drivers/hwtracing/coresight/coresight-tmc-etf.c | 2 +- drivers/hwtracing/coresight/coresight-tmc-etr.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index a20de48ec3f5..d55944623a97 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -21,7 +21,7 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, + TMC_FFCR_TRIGON_TRIGIN | TMC_FFCR_STOP_ON_FLUSH, drvdata->base + TMC_FFCR); writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 437e5d4fc2bf..afcdcc1307e7 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -976,7 +976,7 @@ void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, + TMC_FFCR_TRIGON_TRIGIN | TMC_FFCR_STOP_ON_FLUSH, drvdata->base + TMC_FFCR); writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); tmc_enable_hw(drvdata); @@ -1355,7 +1355,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) return -ENOMEM; } coresight_cti_map_trigout(drvdata->cti_flush, 3, 0); - coresight_cti_map_trigin(drvdata->cti_reset, 2, 0); + coresight_cti_map_trigin(drvdata->cti_reset, 0, 0); } else if (drvdata->byte_cntr->sw_usb) { if (!drvdata->etr_buf) { free_buf = new_buf = @@ -1365,7 +1365,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) } } coresight_cti_map_trigout(drvdata->cti_flush, 3, 0); - coresight_cti_map_trigin(drvdata->cti_reset, 2, 0); + coresight_cti_map_trigin(drvdata->cti_reset, 0, 0); drvdata->usbch = usb_qdss_open("qdss_mdm", drvdata->byte_cntr, From f62c00b9cd7d70103fcd2836668f576bff167732 Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Wed, 8 Apr 2020 14:52:16 +0530 Subject: [PATCH 28/90] icnss: Release qmi handle on qmi server exit This changes release qmi handle and registers again to qmi service for wlan server notifications. Change-Id: I0a20622ec308778c9be2d66ff829f4b6ee841e92 Signed-off-by: Mohammed Siddiq --- drivers/soc/qcom/icnss_qmi.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c index c955911276e7..3141600156ea 100644 --- a/drivers/soc/qcom/icnss_qmi.c +++ b/drivers/soc/qcom/icnss_qmi.c @@ -1255,12 +1255,22 @@ int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data) int icnss_clear_server(struct icnss_priv *priv) { + int ret; + if (!priv) return -ENODEV; icnss_pr_info("QMI Service Disconnected: 0x%lx\n", priv->state); clear_bit(ICNSS_WLFW_CONNECTED, &priv->state); + icnss_unregister_fw_service(priv); + + ret = icnss_register_fw_service(priv); + if (ret < 0) { + icnss_pr_err("WLFW server registration failed\n"); + ICNSS_ASSERT(0); + } + return 0; } From 9df4c5c374014ba923c8be8fcc0711a939df0fb6 Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Thu, 2 Apr 2020 12:26:19 +0530 Subject: [PATCH 29/90] usb: gadget: f_qdss: Add ipc logging for qdss driver This change adds ipc logging support for qdss function driver. Change-Id: I53be137a161bd7a4b0c4be250938e8fa46b2254e Signed-off-by: Pratham Pratap --- drivers/usb/gadget/function/f_qdss.c | 59 ++++++++++++++++------------ drivers/usb/gadget/function/f_qdss.h | 17 +++++++- 2 files changed, 49 insertions(+), 27 deletions(-) diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index 278d093e203b..96a35288c409 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -225,7 +225,7 @@ static void qdss_write_complete(struct usb_ep *ep, enum qdss_state state; unsigned long flags; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); if (qdss->debug_inface_enabled) { in = qdss->port.ctrl_in; @@ -259,7 +259,7 @@ static void qdss_ctrl_read_complete(struct usb_ep *ep, struct qdss_request *d_req = req->context; unsigned long flags; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); d_req->actual = req->actual; d_req->status = req->status; @@ -279,14 +279,14 @@ void usb_qdss_free_req(struct usb_qdss_ch *ch) struct usb_request *req; struct list_head *act, *tmp; - pr_debug("%s\n", __func__); - qdss = ch->priv_usb; if (!qdss) { pr_err("%s: qdss ctx is NULL\n", __func__); return; } + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); + list_for_each_safe(act, tmp, &qdss->data_write_pool) { req = list_entry(act, struct usb_request, list); list_del(&req->list); @@ -316,7 +316,7 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf, struct list_head *list_pool; int i; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); if (!qdss) { pr_err("%s: %s closed\n", __func__, ch->name); @@ -371,7 +371,7 @@ static void clear_eps(struct usb_function *f) { struct f_qdss *qdss = func_to_qdss(f); - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); if (qdss->port.ctrl_in) qdss->port.ctrl_in->driver_data = NULL; @@ -383,7 +383,9 @@ static void clear_eps(struct usb_function *f) static void clear_desc(struct usb_gadget *gadget, struct usb_function *f) { - pr_debug("%s\n", __func__); + struct f_qdss *qdss = func_to_qdss(f); + + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); usb_free_all_descriptors(f); } @@ -395,7 +397,7 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; int iface, id, ret; - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); /* Allocate data I/F */ iface = usb_interface_id(c, f); @@ -510,7 +512,7 @@ static void qdss_unbind(struct usb_configuration *c, struct usb_function *f) struct f_qdss *qdss = func_to_qdss(f); struct usb_gadget *gadget = c->cdev->gadget; - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); flush_workqueue(qdss->wq); @@ -528,7 +530,7 @@ static void qdss_eps_disable(struct usb_function *f) { struct f_qdss *qdss = func_to_qdss(f); - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); if (qdss->ctrl_in_enabled) { usb_ep_disable(qdss->port.ctrl_in); @@ -554,7 +556,7 @@ static void usb_qdss_disconnect_work(struct work_struct *work) unsigned long flags; qdss = container_of(work, struct f_qdss, disconnect_w); - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); /* Notify qdss to cancel all active transfers */ @@ -595,7 +597,7 @@ static void qdss_disable(struct usb_function *f) struct f_qdss *qdss = func_to_qdss(f); unsigned long flags; - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); spin_lock_irqsave(&qdss->lock, flags); if (!qdss->usb_connected) { spin_unlock_irqrestore(&qdss->lock, flags); @@ -620,12 +622,12 @@ static void usb_qdss_connect_work(struct work_struct *work) /* If cable is already removed, discard connect_work */ if (qdss->usb_connected == 0) { - pr_debug("%s: discard connect_work\n", __func__); + qdss_log("%s: discard connect_work\n", __func__); cancel_work_sync(&qdss->disconnect_w); return; } - pr_debug("%s\n", __func__); + qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) goto notify; @@ -662,7 +664,7 @@ static int qdss_set_alt(struct usb_function *f, unsigned int intf, struct usb_qdss_ch *ch = &qdss->ch; int ret = 0; - pr_debug("%s qdss pointer = %pK\n", __func__, qdss); + qdss_log("%s qdss pointer = %pK\n", __func__, qdss); qdss->gadget = gadget; if (alt != 0) @@ -728,12 +730,12 @@ static int qdss_set_alt(struct usb_function *f, unsigned int intf, if (qdss->ctrl_out_enabled && qdss->ctrl_in_enabled && qdss->data_enabled) { qdss->usb_connected = 1; - pr_debug("%s usb_connected INTF enabled\n", __func__); + qdss_log("%s usb_connected INTF enabled\n", __func__); } } else { if (qdss->data_enabled) { qdss->usb_connected = 1; - pr_debug("%s usb_connected INTF disabled\n", __func__); + qdss_log("%s usb_connected INTF disabled\n", __func__); } } @@ -808,7 +810,7 @@ int usb_qdss_ctrl_read(struct usb_qdss_ch *ch, struct qdss_request *d_req) unsigned long flags; struct usb_request *req = NULL; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); if (!qdss) return -ENODEV; @@ -853,7 +855,7 @@ int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) unsigned long flags; struct usb_request *req = NULL; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); if (!qdss) return -ENODEV; @@ -897,7 +899,7 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) unsigned long flags; struct usb_request *req = NULL; - pr_debug("usb_qdss_data_write\n"); + qdss_log("usb_qdss_data_write\n"); if (!qdss) return -ENODEV; @@ -950,7 +952,7 @@ struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, unsigned long flags; int found = 0; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); if (!notify) { pr_err("%s: notification func is missing\n", __func__); @@ -968,11 +970,11 @@ struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, if (!found) { spin_unlock_irqrestore(&qdss_lock, flags); - pr_debug("%s failed as %s not found\n", __func__, name); + qdss_log("%s failed as %s not found\n", __func__, name); return NULL; } - pr_debug("%s: qdss ctx found\n", __func__); + qdss_log("%s: qdss ctx found\n", __func__); qdss = container_of(ch, struct f_qdss, ch); ch->priv_usb = qdss; ch->priv = priv; @@ -998,7 +1000,7 @@ void usb_qdss_close(struct usb_qdss_ch *ch) struct usb_request *req; struct qdss_request *d_req; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); spin_lock_irqsave(&qdss_lock, flags); if (!qdss) @@ -1054,7 +1056,7 @@ static void qdss_cleanup(void) struct usb_qdss_ch *_ch; unsigned long flags; - pr_debug("%s\n", __func__); + qdss_log("%s\n", __func__); list_for_each_safe(act, tmp, &usb_qdss_ch_list) { _ch = list_entry(act, struct usb_qdss_ch, list); @@ -1166,7 +1168,7 @@ static int usb_qdss_set_inst_name(struct usb_function_instance *f, } opts->channel_name = ptr; - pr_debug("qdss: channel_name:%s\n", opts->channel_name); + qdss_log("qdss: channel_name:%s\n", opts->channel_name); usb_qdss = alloc_usb_qdss(opts->channel_name); if (IS_ERR(usb_qdss)) { @@ -1219,6 +1221,10 @@ static int __init usb_qdss_init(void) { int ret; + _qdss_ipc_log = ipc_log_context_create(NUM_PAGES, "usb_qdss", 0); + if (IS_ERR_OR_NULL(_qdss_ipc_log)) + _qdss_ipc_log = NULL; + INIT_LIST_HEAD(&usb_qdss_ch_list); ret = usb_function_register(&qdssusb_func); if (ret) { @@ -1230,6 +1236,7 @@ static int __init usb_qdss_init(void) static void __exit usb_qdss_exit(void) { + ipc_log_context_destroy(_qdss_ipc_log); usb_function_unregister(&qdssusb_func); qdss_cleanup(); } diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h index 5a68f235f0b1..fe847d2c947d 100644 --- a/drivers/usb/gadget/function/f_qdss.h +++ b/drivers/usb/gadget/function/f_qdss.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ #ifndef _F_QDSS_H #define _F_QDSS_H #include +#include #include #include #include @@ -66,6 +67,20 @@ struct f_qdss { bool qdss_close; }; +static void *_qdss_ipc_log; + +#define NUM_PAGES 10 /* # of pages for ipc logging */ + +#ifdef CONFIG_DYNAMIC_DEBUG +#define qdss_log(fmt, ...) do { \ + ipc_log_string(_qdss_ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__); \ + dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#else +#define qdss_log(fmt, ...) \ + ipc_log_string(_qdss_ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__) +#endif + struct usb_qdss_opts { struct usb_function_instance func_inst; struct f_qdss *usb_qdss; From 913b0eef36217ada78ad4a11dfad21475bff2b17 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Thu, 9 Apr 2020 15:24:05 +0530 Subject: [PATCH 30/90] usb: phy: qusb: Drive a pulse on DP on CDP detection Some USB host PCs switch the downstream port mode from CDP to SDP if the device does not pull DP up within certain duration (~2s). This limits the current drawn by the device to 500mA(or 900mA) as opposed to the possible 1.5A resulting in slow charging of the battery. Fix this by driving a DP pulse when there is a connect notification from PMIC and detected the charger type as CDP. Change-Id: Idef17e1d3504ee0fb4707d546ad553cf00aa3179 Signed-off-by: Udipto Goswami --- drivers/usb/phy/phy-msm-qusb-v2.c | 82 +++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index c5d4d823e65b..b113be2785be 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -22,6 +22,7 @@ /* QUSB2PHY_PWR_CTRL1 register related bits */ #define PWR_CTRL1_POWR_DOWN BIT(0) +#define CLAMP_N_EN BIT(1) /* QUSB2PHY_PLL_COMMON_STATUS_ONE register related bits */ #define CORE_READY_STATUS BIT(0) @@ -69,6 +70,10 @@ /* STAT5 register bits */ #define VSTATUS_PLL_LOCK_STATUS_MASK BIT(0) +/* DEBUG_CTRL4 register bits */ +#define FORCED_UTMI_DPPULLDOWN BIT(2) +#define FORCED_UTMI_DMPULLDOWN BIT(3) + enum qusb_phy_reg { PORT_TUNE1, PLL_COMMON_STATUS_ONE, @@ -79,6 +84,8 @@ enum qusb_phy_reg { BIAS_CTRL_2, DEBUG_CTRL1, DEBUG_CTRL2, + DEBUG_CTRL3, + DEBUG_CTRL4, STAT5, USB2_PHY_REG_MAX, }; @@ -396,6 +403,25 @@ static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, } } +static void msm_usb_write_readback(void __iomem *base, u32 offset, + const u32 mask, u32 val) +{ + u32 write_val, tmp = readl_relaxed(base + offset); + + tmp &= ~mask; /* retain other bits */ + write_val = tmp | val; + + writel_relaxed(write_val, base + offset); + + /* Read back to see if val was written */ + tmp = readl_relaxed(base + offset); + tmp &= mask; /* clear other bits */ + + if (tmp != val) + pr_err("%s: write: %x to QSCRATCH: %x FAILED\n", + __func__, val, offset); +} + static void qusb_phy_reset(struct qusb_phy *qphy) { int ret; @@ -736,6 +762,61 @@ static int qusb_phy_notify_disconnect(struct usb_phy *phy, return 0; } +static int msm_qusb_phy_drive_dp_pulse(struct usb_phy *phy, + unsigned int interval_ms) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + int ret; + + ret = qusb_phy_enable_power(qphy); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator enable failed:%d\n", ret); + return ret; + } + qusb_phy_enable_clocks(qphy, true); + msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1], + PWR_CTRL1_POWR_DOWN, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4], + FORCED_UTMI_DPPULLDOWN, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4], + FORCED_UTMI_DMPULLDOWN, + FORCED_UTMI_DMPULLDOWN); + msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL3], + 0xd1, 0xd1); + msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1], + CLAMP_N_EN, CLAMP_N_EN); + msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL], + DPSE_INTR_HIGH_SEL, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL], + DPSE_INTR_EN, DPSE_INTR_EN); + + msleep(interval_ms); + + msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL], + DPSE_INTR_HIGH_SEL | + DPSE_INTR_EN, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL3], + 0xd1, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4], + FORCED_UTMI_DPPULLDOWN | + FORCED_UTMI_DMPULLDOWN, 0x00); + msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1], + PWR_CTRL1_POWR_DOWN | + CLAMP_N_EN, 0x00); + + msleep(20); + + qusb_phy_enable_clocks(qphy, false); + ret = qusb_phy_disable_power(qphy); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator disable failed:%d\n", ret); + } + + return 0; +} + static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev) { int ret = 0; @@ -1133,6 +1214,7 @@ static int qusb_phy_probe(struct platform_device *pdev) qphy->phy.type = USB_PHY_TYPE_USB2; qphy->phy.notify_connect = qusb_phy_notify_connect; qphy->phy.notify_disconnect = qusb_phy_notify_disconnect; + qphy->phy.drive_dp_pulse = msm_qusb_phy_drive_dp_pulse; ret = usb_add_phy_dev(&qphy->phy); if (ret) From bd2bf2025e5ed0c9cb775179931db15f7973ad25 Mon Sep 17 00:00:00 2001 From: Akhil P Oommen Date: Thu, 26 Mar 2020 19:51:07 +0530 Subject: [PATCH 31/90] msm: kgsl: Fix a memory leak during Global PT init Make sure we free iommu_pt when SMMU aperture programing fails. Change-Id: I73fef2c61c62ba5040549abab81fa6ab7d293f64 Signed-off-by: Akhil P Oommen --- drivers/gpu/msm/kgsl_iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 61c6eb0e9be4..23adfb474e60 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. */ #include @@ -1222,7 +1222,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) dev_err(device->dev, "SMMU aperture programming call failed with error %d\n", ret); - return ret; + goto done; } } From 1abc9c066b4359da95e0ca3371623cd0257145fb Mon Sep 17 00:00:00 2001 From: Akhil P Oommen Date: Thu, 26 Mar 2020 19:36:41 +0530 Subject: [PATCH 32/90] msm: kgsl: Fix skipping aperture programming During the first adreno_start(), we initialize the global PT and make an scm call to program the aperture related registers. This programing is skipped when scm_is_call_available() call fails. Skipping aperture programing can make CP get stuck during a context switch. To fix this, avoid scm_is_call_available() call and force aperture programing from secure world for all a6xx GPUs. Change-Id: I13923afbece0be12b1a9336b7d20c3039164a6b6 Signed-off-by: Akhil P Oommen --- drivers/gpu/msm/adreno.c | 3 +++ drivers/gpu/msm/kgsl_iommu.c | 4 ++-- drivers/gpu/msm/kgsl_mmu.h | 4 +++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 1a28f59dd8ea..ddd27c8cf1e6 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1429,6 +1429,9 @@ static int adreno_probe(struct platform_device *pdev) if (adreno_support_64bit(adreno_dev)) device->mmu.features |= KGSL_MMU_64BIT; + if (adreno_is_a6xx(adreno_dev)) + device->mmu.features |= KGSL_MMU_SMMU_APERTURE; + device->pwrctrl.bus_width = adreno_dev->gpucore->bus_width; status = kgsl_device_platform_probe(device); diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 23adfb474e60..c705e0448307 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1203,8 +1203,8 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } - if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) && - scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) { + if (kgsl_mmu_is_perprocess(mmu) && MMU_FEATURE(mmu, + KGSL_MMU_SMMU_APERTURE)) { struct scm_desc desc = {0}; desc.args[0] = 0xFFFF0000 | ((CP_APERTURE_REG & 0xff) << 8) | diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index 7cc33213084d..3eb84f9ae77d 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. */ #ifndef __KGSL_MMU_H #define __KGSL_MMU_H @@ -135,6 +135,8 @@ struct kgsl_mmu_pt_ops { #define KGSL_MMU_NEED_GUARD_PAGE BIT(9) /* The device supports IO coherency */ #define KGSL_MMU_IO_COHERENT BIT(10) +/* The device supports aperture programming from secure world */ +#define KGSL_MMU_SMMU_APERTURE BIT(11) /** * struct kgsl_mmu - Master definition for KGSL MMU devices From fc92480cf1ed44a1a94d8e0657a8d1ddbb796f60 Mon Sep 17 00:00:00 2001 From: Akhil P Oommen Date: Fri, 3 Apr 2020 21:45:33 +0530 Subject: [PATCH 33/90] msm: kgsl: Use milliseconds in Snapshot OS data Use milliseconds instead of jiffies in the snapshot OS section. Change-Id: I14519234b6ccac21e25401b41732abc5eaf13f38 Signed-off-by: Akhil P Oommen Signed-off-by: Sebanti Das --- drivers/gpu/msm/kgsl_snapshot.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index 99bb67bc2511..0397177d8c6c 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ #include @@ -117,7 +117,8 @@ static size_t snapshot_os(struct kgsl_device *device, /* Remember the power information */ header->power_flags = pwr->power_flags; header->power_level = pwr->active_pwrlevel; - header->power_interval_timeout = pwr->interval_timeout; + header->power_interval_timeout = + jiffies_to_msecs(pwr->interval_timeout); header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]); /* @@ -204,7 +205,8 @@ static size_t snapshot_os_no_ctxt(struct kgsl_device *device, /* Remember the power information */ header->power_flags = pwr->power_flags; header->power_level = pwr->active_pwrlevel; - header->power_interval_timeout = pwr->interval_timeout; + header->power_interval_timeout = + jiffies_to_msecs(pwr->interval_timeout); header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]); /* Return the size of the data segment */ From 6e46374458b8794163780ab1b980161a987f48eb Mon Sep 17 00:00:00 2001 From: Akhil P Oommen Date: Tue, 7 Apr 2020 19:41:41 +0530 Subject: [PATCH 34/90] msm: kgsl: Poll GDSCR to ensure CX collapse The regulator_is_enabled() API doesn't guarantee that CX gdsc has collapsed at hardware. There could be a vote on the GDSC from another subsystem like TZ. So poll the CX GDSCR register to ensure that CX has indeed collapsed. Change-Id: Id98c5318d5358b16f4277cb5d96027add63ad801 Signed-off-by: Akhil P Oommen Signed-off-by: Sebanti Das --- drivers/gpu/msm/a6xx_reg.h | 3 ++- drivers/gpu/msm/adreno_a6xx_gmu.c | 13 +++++++++++++ drivers/gpu/msm/kgsl_gmu.c | 11 ++++++----- drivers/gpu/msm/kgsl_gmu_core.c | 12 +++++++++++- drivers/gpu/msm/kgsl_gmu_core.h | 4 +++- 5 files changed, 35 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index ea56fe56f93d..f491f9ba2753 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _A6XX_REG_H @@ -1055,6 +1055,7 @@ /* GPUCC registers */ #define A6XX_GPU_CC_GX_GDSCR 0x24403 #define A6XX_GPU_CC_GX_DOMAIN_MISC 0x24542 +#define A6XX_GPU_CC_CX_GDSCR 0x2441B /* GPU RSC sequencer registers */ #define A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00004 diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 0bb59bbafb80..3a20e0b7bd54 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -821,6 +821,18 @@ static bool a6xx_gmu_gx_is_on(struct kgsl_device *device) return is_on(val); } +/* + * a6xx_gmu_cx_is_on() - Check if CX is on using GPUCC register + * @device - Pointer to KGSL device struct + */ +static bool a6xx_gmu_cx_is_on(struct kgsl_device *device) +{ + unsigned int val; + + gmu_core_regread(device, A6XX_GPU_CC_CX_GDSCR, &val); + return (val & BIT(31)); +} + /* * a6xx_gmu_sptprac_is_on() - Check if SPTP is on using pwr status register * @adreno_dev - Pointer to adreno_device @@ -1741,6 +1753,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .enable_lm = a6xx_gmu_enable_lm, .rpmh_gpu_pwrctrl = a6xx_gmu_rpmh_gpu_pwrctrl, .gx_is_on = a6xx_gmu_gx_is_on, + .cx_is_on = a6xx_gmu_cx_is_on, .wait_for_lowest_idle = a6xx_gmu_wait_for_lowest_idle, .wait_for_gmu_idle = a6xx_gmu_wait_for_idle, .ifpc_store = a6xx_gmu_ifpc_store, diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 4816d2b789f0..7efe0278691a 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1468,8 +1468,9 @@ static int gmu_enable_gdsc(struct gmu_device *gmu) } #define CX_GDSC_TIMEOUT 5000 /* ms */ -static int gmu_disable_gdsc(struct gmu_device *gmu) +static int gmu_disable_gdsc(struct kgsl_device *device) { + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); int ret; unsigned long t; @@ -1491,13 +1492,13 @@ static int gmu_disable_gdsc(struct gmu_device *gmu) */ t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT); do { - if (!regulator_is_enabled(gmu->cx_gdsc)) + if (!gmu_core_dev_cx_is_on(device)) return 0; usleep_range(10, 100); } while (!(time_after(jiffies, t))); - if (!regulator_is_enabled(gmu->cx_gdsc)) + if (!gmu_core_dev_cx_is_on(device)) return 0; dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout\n"); @@ -1525,7 +1526,7 @@ static int gmu_suspend(struct kgsl_device *device) if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_IDLE); - gmu_disable_gdsc(gmu); + gmu_disable_gdsc(device); if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_NORMAL); @@ -1700,7 +1701,7 @@ static void gmu_stop(struct kgsl_device *device) gmu_dev_ops->rpmh_gpu_pwrctrl(device, GMU_FW_STOP, 0, 0); gmu_disable_clks(device); - gmu_disable_gdsc(gmu); + gmu_disable_gdsc(device); msm_bus_scale_client_update_request(gmu->pcl, 0); return; diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 26a283a3865f..5bd48cca0920 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ #include @@ -343,6 +343,16 @@ bool gmu_core_dev_gx_is_on(struct kgsl_device *device) return true; } +bool gmu_core_dev_cx_is_on(struct kgsl_device *device) +{ + struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device); + + if (ops && ops->cx_is_on) + return ops->cx_is_on(device); + + return true; +} + int gmu_core_dev_ifpc_show(struct kgsl_device *device) { struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device); diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index 24cb1d84bb06..bab59077d9e7 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ #ifndef __KGSL_GMU_CORE_H #define __KGSL_GMU_CORE_H @@ -138,6 +138,7 @@ struct gmu_dev_ops { int (*wait_for_lowest_idle)(struct kgsl_device *device); int (*wait_for_gmu_idle)(struct kgsl_device *device); bool (*gx_is_on)(struct kgsl_device *device); + bool (*cx_is_on)(struct kgsl_device *device); void (*prepare_stop)(struct kgsl_device *device); int (*ifpc_store)(struct kgsl_device *device, unsigned int val); unsigned int (*ifpc_show)(struct kgsl_device *device); @@ -224,6 +225,7 @@ void gmu_core_dev_enable_lm(struct kgsl_device *device); void gmu_core_dev_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot); bool gmu_core_dev_gx_is_on(struct kgsl_device *device); +bool gmu_core_dev_cx_is_on(struct kgsl_device *device); int gmu_core_dev_ifpc_show(struct kgsl_device *device); int gmu_core_dev_ifpc_store(struct kgsl_device *device, unsigned int val); void gmu_core_dev_prepare_stop(struct kgsl_device *device); From eb60a50ebf70b5baf0c2ac186d656a1d5d8211a1 Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Wed, 22 Apr 2020 21:48:02 +0530 Subject: [PATCH 35/90] power: power-supply: Add POWER_SUPPLY_PROP_CHARGER_STATUS property CHARGER_STATUS power-supply property reports the charger status. Change-Id: I269b0be18b56d56c0ab0e68ee1d7194d09824dd6 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/power_supply_sysfs.c | 1 + include/linux/power_supply.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 1a188a9a0d59..b9e064ac401c 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -481,6 +481,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(cp_ilim), POWER_SUPPLY_ATTR(irq_status), POWER_SUPPLY_ATTR(parallel_output_mode), + POWER_SUPPLY_ATTR(charger_status), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index e9a6d4d85309..df38e9a09765 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -361,6 +361,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CP_ILIM, POWER_SUPPLY_PROP_IRQ_STATUS, POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, + POWER_SUPPLY_PROP_CHARGER_STATUS, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ From 57421ccfeaa8d9e4af7d5dc54ed301ba19bbc20d Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Sun, 29 Mar 2020 17:42:12 +0530 Subject: [PATCH 36/90] power: smb5-lib: Report charger detection status Use the CHARGER_STATUS to report the charger-detection status and INPUT_VOLTAGE_SETTLED to report input voltage, reading them from SDAM. While at it, clear the SMB_EN for CP in the shutdown path to prevent SMB from being enabled in the next boot. Change-Id: I269b0be18b56d56c0ab0e68ee1d7194d09824dd7 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/qcom/qpnp-smb5.c | 82 ++++++++++++++++++++++++++- drivers/power/supply/qcom/smb5-lib.c | 13 +++++ drivers/power/supply/qcom/smb5-lib.h | 1 + drivers/power/supply/qcom/smb5-reg.h | 5 ++ 4 files changed, 100 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 68ba7333e2da..f41bcb19eb19 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -724,6 +724,46 @@ static int smb5_parse_dt_voltages(struct smb5 *chip, struct device_node *node) return 0; } +static int smb5_parse_sdam(struct smb5 *chip, struct device_node *node) +{ + struct device_node *child; + struct smb_charger *chg = &chip->chg; + struct property *prop; + const char *name; + int rc; + u32 base; + u8 type; + + for_each_available_child_of_node(node, child) { + of_property_for_each_string(child, "reg", prop, name) { + rc = of_property_read_u32(child, "reg", &base); + if (rc < 0) { + pr_err("Failed to read base rc=%d\n", rc); + return rc; + } + + rc = smblib_read(chg, base + PERPH_TYPE_OFFSET, &type); + if (rc < 0) { + pr_err("Failed to read type rc=%d\n", rc); + return rc; + } + + switch (type) { + case SDAM_TYPE: + chg->sdam_base = base; + break; + default: + break; + } + } + } + + if (!chg->sdam_base) + pr_debug("SDAM node not defined\n"); + + return 0; +} + static int smb5_parse_dt(struct smb5 *chip) { struct smb_charger *chg = &chip->chg; @@ -751,6 +791,10 @@ static int smb5_parse_dt(struct smb5 *chip) if (rc < 0) return rc; + rc = smb5_parse_sdam(chip, node); + if (rc < 0) + return rc; + return 0; } @@ -822,6 +866,8 @@ static enum power_supply_property smb5_usb_props[] = { POWER_SUPPLY_PROP_SKIN_HEALTH, POWER_SUPPLY_PROP_APSD_RERUN, POWER_SUPPLY_PROP_APSD_TIMEOUT, + POWER_SUPPLY_PROP_CHARGER_STATUS, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, }; static int smb5_usb_get_prop(struct power_supply *psy, @@ -831,6 +877,7 @@ static int smb5_usb_get_prop(struct power_supply *psy, struct smb5 *chip = power_supply_get_drvdata(psy); struct smb_charger *chg = &chip->chg; int rc = 0; + u8 reg = 0, buff[2] = {0}; val->intval = 0; switch (psp) { @@ -971,6 +1018,24 @@ static int smb5_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_APSD_TIMEOUT: val->intval = chg->apsd_ext_timeout; break; + case POWER_SUPPLY_PROP_CHARGER_STATUS: + val->intval = 0; + if (chg->sdam_base) { + rc = smblib_read(chg, + chg->sdam_base + SDAM_QC_DET_STATUS_REG, ®); + if (!rc) + val->intval = reg; + } + break; + case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED: + val->intval = 0; + if (chg->sdam_base) { + rc = smblib_batch_read(chg, + chg->sdam_base + SDAM_QC_ADC_LSB_REG, buff, 2); + if (!rc) + val->intval = (buff[1] << 8 | buff[0]) * 1038; + } + break; default: pr_err("get prop %d is not supported in usb\n", psp); rc = -EINVAL; @@ -2568,11 +2633,23 @@ static int smb5_init_hw(struct smb5 *chip) { struct smb_charger *chg = &chip->chg; int rc; - u8 val = 0, mask = 0; + u8 val = 0, mask = 0, buf[2] = {0}; if (chip->dt.no_battery) chg->fake_capacity = 50; + if (chg->sdam_base) { + rc = smblib_write(chg, + chg->sdam_base + SDAM_QC_DET_STATUS_REG, 0); + if (rc < 0) + pr_err("Couldn't clear SDAM QC status rc=%d\n", rc); + + rc = smblib_batch_write(chg, + chg->sdam_base + SDAM_QC_ADC_LSB_REG, buf, 2); + if (rc < 0) + pr_err("Couldn't clear SDAM ADC status rc=%d\n", rc); + } + if (chip->dt.batt_profile_fcc_ua < 0) smblib_get_charge_param(chg, &chg->param.fcc, &chg->batt_profile_fcc_ua); @@ -3628,6 +3705,9 @@ static void smb5_shutdown(struct platform_device *pdev) /* disable all interrupts */ smb5_disable_interrupts(chg); + /* disable the SMB_EN configuration */ + smblib_masked_write(chg, MISC_SMB_EN_CMD_REG, EN_CP_CMD_BIT, 0); + /* configure power role for UFP */ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC) smblib_masked_write(chg, TYPE_C_MODE_CFG_REG, diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 2a0e4fd75446..9cdd1e6522ee 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -5905,6 +5905,7 @@ static void typec_src_removal(struct smb_charger *chg) struct smb_irq_data *data; struct storm_watch *wdata; int sec_charger; + u8 val[2] = {0}; sec_charger = chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : POWER_SUPPLY_CHARGER_SEC_NONE; @@ -5995,6 +5996,18 @@ static void typec_src_removal(struct smb_charger *chg) smblib_err(chg, "Couldn't write float charger options rc=%d\n", rc); + if (chg->sdam_base) { + rc = smblib_write(chg, + chg->sdam_base + SDAM_QC_DET_STATUS_REG, 0); + if (rc < 0) + pr_err("Couldn't clear SDAM QC status rc=%d\n", rc); + + rc = smblib_batch_write(chg, + chg->sdam_base + SDAM_QC_ADC_LSB_REG, val, 2); + if (rc < 0) + pr_err("Couldn't clear SDAM ADC status rc=%d\n", rc); + } + if (!chg->pr_swap_in_progress) { rc = smblib_usb_pd_adapter_allowance_override(chg, FORCE_NULL); if (rc < 0) diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 244578dcfb85..b907377ecdc3 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -382,6 +382,7 @@ struct smb_charger { struct smb_chg_freq chg_freq; int otg_delay_ms; int weak_chg_icl_ua; + u32 sdam_base; bool pd_not_supported; /* locks */ diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h index a5fe6918c392..d77c8cb00cbb 100644 --- a/drivers/power/supply/qcom/smb5-reg.h +++ b/drivers/power/supply/qcom/smb5-reg.h @@ -22,6 +22,7 @@ #define PERPH_SUBTYPE_OFFSET 0x05 #define SUBTYPE_MASK GENMASK(7, 0) #define INT_RT_STS_OFFSET 0x10 +#define SDAM_TYPE 0x2E /******************************** * CHGR Peripheral Registers * @@ -549,4 +550,8 @@ enum { /* SDAM regs */ #define MISC_PBS_RT_STS_REG (MISC_PBS_BASE + 0x10) #define PULSE_SKIP_IRQ_BIT BIT(4) + +#define SDAM_QC_DET_STATUS_REG 0x58 +#define SDAM_QC_ADC_LSB_REG 0x54 + #endif /* __SMB5_CHARGER_REG_H */ From ea80f0a2bf15134cd6caa748bb0c2556714b7034 Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Mon, 20 Apr 2020 22:39:23 +0530 Subject: [PATCH 37/90] mfd: qcom-i2c-pmic: Toggle STAT pin at init Logic to toggle STAT pin during init. Change-Id: I269b0be18b56d56c0ab0e68ee1d7194d09824dd5 Signed-off-by: Anirudh Ghayal --- drivers/mfd/qcom-i2c-pmic.c | 75 ++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/mfd/qcom-i2c-pmic.c b/drivers/mfd/qcom-i2c-pmic.c index 8ea90e8b07b0..d0ba0c5494f6 100644 --- a/drivers/mfd/qcom-i2c-pmic.c +++ b/drivers/mfd/qcom-i2c-pmic.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "I2C PMIC: %s: " fmt, __func__ @@ -61,6 +61,7 @@ struct i2c_pmic { int summary_irq; bool resume_completed; bool irq_waiting; + bool toggle_stat; }; static void i2c_pmic_irq_bus_lock(struct irq_data *d) @@ -473,6 +474,9 @@ static int i2c_pmic_parse_dt(struct i2c_pmic *chip) of_property_read_string(node, "pinctrl-names", &chip->pinctrl_name); + chip->toggle_stat = of_property_read_bool(node, + "qcom,enable-toggle-stat"); + return rc; } @@ -513,6 +517,69 @@ static int i2c_pmic_determine_initial_status(struct i2c_pmic *chip) return 0; } +#define INT_TEST_OFFSET 0xE0 +#define INT_TEST_MODE_EN_BIT BIT(7) +#define INT_TEST_VAL_OFFSET 0xE1 +#define INT_0_BIT BIT(0) +static int i2c_pmic_toggle_stat(struct i2c_pmic *chip) +{ + int rc = 0, i; + + if (!chip->toggle_stat || !chip->num_periphs) + return 0; + + rc = regmap_write(chip->regmap, + chip->periph[0].addr | INT_EN_SET_OFFSET, + INT_0_BIT); + if (rc < 0) { + pr_err("Couldn't write to int_en_set rc=%d\n", rc); + return rc; + } + + rc = regmap_write(chip->regmap, chip->periph[0].addr | INT_TEST_OFFSET, + INT_TEST_MODE_EN_BIT); + if (rc < 0) { + pr_err("Couldn't write to int_test rc=%d\n", rc); + return rc; + } + + for (i = 0; i < 5; i++) { + rc = regmap_write(chip->regmap, + chip->periph[0].addr | INT_TEST_VAL_OFFSET, + INT_0_BIT); + if (rc < 0) { + pr_err("Couldn't write to int_test_val rc=%d\n", rc); + goto exit; + } + + usleep_range(10000, 11000); + + rc = regmap_write(chip->regmap, + chip->periph[0].addr | INT_TEST_VAL_OFFSET, + 0); + if (rc < 0) { + pr_err("Couldn't write to int_test_val rc=%d\n", rc); + goto exit; + } + + rc = regmap_write(chip->regmap, + chip->periph[0].addr | INT_LATCHED_CLR_OFFSET, + INT_0_BIT); + if (rc < 0) { + pr_err("Couldn't write to int_latched_clr rc=%d\n", rc); + goto exit; + } + + usleep_range(10000, 11000); + } +exit: + regmap_write(chip->regmap, chip->periph[0].addr | INT_TEST_OFFSET, 0); + regmap_write(chip->regmap, chip->periph[0].addr | INT_EN_CLR_OFFSET, + INT_0_BIT); + + return rc; +} + static struct regmap_config i2c_pmic_regmap_config = { .reg_bits = 16, .val_bits = 8, @@ -571,6 +638,12 @@ static int i2c_pmic_probe(struct i2c_client *client, chip->resume_completed = true; mutex_init(&chip->irq_complete); + rc = i2c_pmic_toggle_stat(chip); + if (rc < 0) { + pr_err("Couldn't toggle stat rc=%d\n", rc); + goto cleanup; + } + rc = devm_request_threaded_irq(&client->dev, client->irq, NULL, i2c_pmic_irq_handler, IRQF_ONESHOT | IRQF_SHARED, From 51db1ea67d8ec5dfd4b387c34dc1722a90657977 Mon Sep 17 00:00:00 2001 From: Mohammed Nayeem Ur Rahman Date: Mon, 20 Apr 2020 16:34:16 +0530 Subject: [PATCH 38/90] msm: adsprpc: Put upper limit on IOMMU mapping size Place appropriate validation checks for IOMMU mapping size. Without these checks, a mapping in an invalid kernel space could be created and fail during access. Change-Id: I0783834c09a5f7c37abfb29e2f098fbc86677ff2 Acked-by: Maitreyi Gupta Signed-off-by: Mohammed Nayeem Ur Rahman --- drivers/char/adsprpc.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index c55fc361c08f..f6ca64cfbc74 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -383,6 +383,7 @@ struct fastrpc_apps { uint64_t jobid[NUM_CHANNELS]; struct wakeup_source *wake_source; struct qos_cores silvercores; + uint32_t max_size_limit; }; struct fastrpc_mmap { @@ -1070,6 +1071,14 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, trace_fastrpc_dma_map(fl->cid, fd, map->phys, map->size, len, mflags, map->attach->dma_map_attrs); + VERIFY(err, map->size >= len && map->size < me->max_size_limit); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid map size 0x%zx len 0x%zx\n", + __func__, map->size, len); + goto bail; + } + vmid = fl->apps->channel[fl->cid].vmid; if (!sess->smmu.enabled && !vmid) { VERIFY(err, map->phys >= me->range.addr && @@ -1111,12 +1120,17 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size, int remote, struct fastrpc_buf **obuf) { int err = 0, vmid; + struct fastrpc_apps *me = &gfa; struct fastrpc_buf *buf = NULL, *fr = NULL; struct hlist_node *n; - VERIFY(err, size > 0); - if (err) + VERIFY(err, size > 0 && size < me->max_size_limit); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid allocation size 0x%zx\n", + __func__, size); goto bail; + } if (!remote) { /* find the smallest buffer that fits in the cache */ @@ -4530,9 +4544,11 @@ static int fastrpc_cb_probe(struct device *dev) struct fastrpc_channel_ctx *chan; struct fastrpc_session_ctx *sess; struct of_phandle_args iommuspec; + struct fastrpc_apps *me = &gfa; const char *name; int err = 0, cid = -1, i = 0; u32 sharedcb_count = 0, j = 0; + uint32_t dma_addr_pool[2] = {0, 0}; VERIFY(err, NULL != (name = of_get_property(dev->of_node, "label", NULL))); @@ -4579,6 +4595,11 @@ static int fastrpc_cb_probe(struct device *dev) dma_set_max_seg_size(sess->smmu.dev, DMA_BIT_MASK(32)); dma_set_seg_boundary(sess->smmu.dev, (unsigned long)DMA_BIT_MASK(64)); + of_property_read_u32_array(dev->of_node, "qcom,iommu-dma-addr-pool", + dma_addr_pool, 2); + me->max_size_limit = (dma_addr_pool[1] == 0 ? 0x78000000 : + dma_addr_pool[1]); + if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { err = of_property_read_u32(dev->of_node, "shared-cb", &sharedcb_count); @@ -4718,7 +4739,6 @@ static int fastrpc_probe(struct platform_device *pdev) init_qos_cores_list(dev, "qcom,qos-cores", &me->silvercores); - of_property_read_u32(dev->of_node, "qcom,rpc-latency-us", &me->latency); if (of_get_property(dev->of_node, From 5780bc67aa8b32d9cc31ffe2e3a52738936a8b71 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Mon, 20 Apr 2020 20:06:03 +0530 Subject: [PATCH 39/90] msm-geni-serial: Correct the interrupt polling logic in uart Correct the interrupt polling logic in uart driver and depend on the interrupt routine instead of polling where ever possible. Change-Id: Ia01889549d7edbe380e2e58b92efb2a0b3b34e7b Signed-off-by: Prudhvi Yarlagadda Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 850 ++++++++++++++++++--------- include/linux/qcom-geni-se.h | 9 +- 2 files changed, 587 insertions(+), 272 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 102f4c87c2dc..3449f1e81227 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -118,6 +118,26 @@ #define IPC_LOG_TX_RX_PAGES (10) #define DATA_BYTES_PER_LINE (32) +#define M_IRQ_BITS (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN |\ + M_CMD_CANCEL_EN | M_CMD_ABORT_EN) +#define S_IRQ_BITS (S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN |\ + S_CMD_CANCEL_EN | S_CMD_ABORT_EN) +#define DMA_TX_IRQ_BITS (TX_RESET_DONE | TX_DMA_DONE |\ + TX_GENI_CANCEL_IRQ) +#define DMA_RX_IRQ_BITS (RX_EOT | RX_GENI_CANCEL_IRQ |\ + RX_RESET_DONE | UART_DMA_RX_ERRS |\ + UART_DMA_RX_PARITY_ERR | UART_DMA_RX_BREAK |\ + RX_DMA_DONE) + +/* Required for polling for 100 msecs */ +#define POLL_WAIT_TIMEOUT_MSEC 100 + +/* + * Number of iterrations required while polling + * where each iterration has a delay of 100 usecs + */ +#define POLL_ITERATIONS 1000 + #define IPC_LOG_MSG(ctx, x...) do { \ if (ctx) \ ipc_log_string(ctx, x); \ @@ -136,7 +156,7 @@ struct msm_geni_serial_ver_info { struct msm_geni_serial_port { struct uart_port uport; - char name[20]; + const char *name; unsigned int tx_fifo_depth; unsigned int tx_fifo_width; unsigned int rx_fifo_depth; @@ -176,6 +196,12 @@ struct msm_geni_serial_port { bool startup_in_progress; bool is_console; bool rumi_platform; + bool m_cmd_done; + bool s_cmd_done; + bool m_cmd; + bool s_cmd; + struct completion m_cmd_timeout; + struct completion s_cmd_timeout; }; static const struct uart_ops msm_geni_serial_pops; @@ -209,6 +235,156 @@ static int uart_line_id; static struct msm_geni_serial_port msm_geni_console_port; static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS]; +static void msm_geni_serial_handle_isr(struct uart_port *uport); + +/* + * The below API is required to check if uport->lock (spinlock) + * is taken by the serial layer or not. If the lock is not taken + * then we can rely on the isr to be fired and if the lock is taken + * by the serial layer then we need to poll for the interrupts. + * + * Returns true(1) if spinlock is already taken by framework (serial layer) + * Return false(0) if spinlock is not taken by framework. + */ +static int msm_geni_serial_spinlocked(struct uart_port *uport) +{ + unsigned long flags; + bool locked; + + locked = spin_trylock_irqsave(&uport->lock, flags); + if (locked) + spin_unlock_irqrestore(&uport->lock, flags); + + return !locked; +} + +/* + * We are enabling the interrupts once the polling operations + * is completed. + */ +static void msm_geni_serial_enable_interrupts(struct uart_port *uport) +{ + unsigned int geni_m_irq_en, geni_s_irq_en; + unsigned int dma_m_irq_en, dma_s_irq_en; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + + geni_m_irq_en = geni_read_reg_nolog(uport->membase, + SE_GENI_M_IRQ_EN); + geni_s_irq_en = geni_read_reg_nolog(uport->membase, + SE_GENI_S_IRQ_EN); + if (port->xfer_mode == SE_DMA) { + dma_m_irq_en = geni_read_reg_nolog(uport->membase, + SE_DMA_TX_IRQ_EN); + dma_s_irq_en = geni_read_reg_nolog(uport->membase, + SE_DMA_RX_IRQ_EN); + } + + geni_m_irq_en |= M_IRQ_BITS; + geni_s_irq_en |= S_IRQ_BITS; + if (port->xfer_mode == SE_DMA) { + dma_m_irq_en |= DMA_TX_IRQ_BITS; + dma_s_irq_en |= DMA_RX_IRQ_BITS; + } + + geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN); + geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN); + if (port->xfer_mode == SE_DMA) { + geni_write_reg_nolog(dma_m_irq_en, uport->membase, + SE_DMA_TX_IRQ_EN); + geni_write_reg_nolog(dma_s_irq_en, uport->membase, + SE_DMA_RX_IRQ_EN); + } +} + +/* Disable the interrupts in order to do polling in an atomic contexts. */ +static void msm_geni_serial_disable_interrupts(struct uart_port *uport) +{ + unsigned int geni_m_irq_en, geni_s_irq_en; + unsigned int dma_m_irq_en, dma_s_irq_en; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + + /* + * We don't need to disable interrupts if spinlock is not taken + * by framework as we can rely on ISR. + */ + if (!msm_geni_serial_spinlocked(uport)) + return; + + geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); + geni_s_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_S_IRQ_EN); + if (port->xfer_mode == SE_DMA) { + dma_m_irq_en = geni_read_reg_nolog(uport->membase, + SE_DMA_TX_IRQ_EN); + dma_s_irq_en = geni_read_reg_nolog(uport->membase, + SE_DMA_RX_IRQ_EN); + } + + geni_m_irq_en &= ~M_IRQ_BITS; + geni_s_irq_en &= ~S_IRQ_BITS; + if (port->xfer_mode == SE_DMA) { + dma_m_irq_en &= ~DMA_TX_IRQ_BITS; + dma_s_irq_en &= ~DMA_RX_IRQ_BITS; + } + + geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN); + geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN); + if (port->xfer_mode == SE_DMA) { + geni_write_reg_nolog(dma_m_irq_en, uport->membase, + SE_DMA_TX_IRQ_EN); + geni_write_reg_nolog(dma_s_irq_en, uport->membase, + SE_DMA_RX_IRQ_EN); + } +} + +/* + * We need to poll for interrupt if we are in an atomic context + * as serial framework might be taking spinlocks and depend on the isr + * in a non-atomic context. This API decides wheather to poll for + * interrupt or depend on the isr based on in_atomic() call. + */ +bool geni_wait_for_cmd_done(struct uart_port *uport) +{ + struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); + unsigned long timeout = POLL_ITERATIONS; + unsigned long ret; + + /* + * We need to do polling if spinlock is taken + * by framework as we cannot rely on ISR. + */ + if (msm_geni_serial_spinlocked(uport)) { + /* + * Polling is done for 1000 iterrations with + * 10 usecs interval which in total accumulates + * to 10 msecs + */ + if (msm_port->m_cmd) { + while (!msm_port->m_cmd_done && timeout > 0) { + msm_geni_serial_handle_isr(uport); + timeout--; + udelay(100); + } + } else if (msm_port->s_cmd) { + while (!msm_port->s_cmd_done && timeout > 0) { + msm_geni_serial_handle_isr(uport); + timeout--; + udelay(100); + } + } + } else { + /* Waiting for 10 milli second for interrupt to be fired */ + if (msm_port->m_cmd) + ret = wait_for_completion_timeout + (&msm_port->m_cmd_timeout, + msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC)); + else if (msm_port->s_cmd) + ret = wait_for_completion_timeout + (&msm_port->s_cmd_timeout, + msecs_to_jiffies(POLL_WAIT_TIMEOUT_MSEC)); + } + + return ret ? 0 : 1; +} static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags) { @@ -528,7 +704,6 @@ static int msm_geni_serial_power_on(struct uart_port *uport) } else { pm_runtime_get_noresume(uport->dev); pm_runtime_set_active(uport->dev); - enable_irq(uport->irq); } pm_runtime_enable(uport->dev); if (lock) @@ -573,7 +748,6 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport, unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS; unsigned long total_iter = 1000; - if (uport->private_data && !uart_console(uport)) { port = GET_DEV_PORT(uport); baud = (port->cur_baud ? port->cur_baud : 115200); @@ -614,58 +788,32 @@ static void msm_geni_serial_setup_tx(struct uart_port *uport, mb(); } -static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport) +static void msm_geni_serial_poll_tx_done(struct uart_port *uport) { int done = 0; - unsigned int irq_clear = M_CMD_DONE_EN; + unsigned int irq_clear = 0; done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_DONE_EN, true); if (!done) { - geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase, - SE_GENI_M_CMD_CTRL_REG); - irq_clear |= M_CMD_ABORT_EN; - msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_ABORT_EN, true); + /* + * Failure IPC logs are not added as this API is + * used by early console and it doesn't have log handle. + */ + geni_write_reg(S_GENI_CMD_CANCEL, uport->membase, + SE_GENI_S_CMD_CTRL_REG); + done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_CANCEL_EN, true); + if (!done) { + geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase, + SE_GENI_M_CMD_CTRL_REG); + msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_ABORT_EN, true); + } } - geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR); -} - -static void msm_geni_serial_abort_rx(struct uart_port *uport) -{ - unsigned int irq_clear = S_CMD_DONE_EN; - struct msm_geni_serial_port *port = GET_DEV_PORT(uport); - - geni_abort_s_cmd(uport->membase); - /* Ensure this goes through before polling. */ - mb(); - irq_clear |= S_CMD_ABORT_EN; - msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, - S_GENI_CMD_ABORT, false); - geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR); - /* FORCE_DEFAULT makes RFR default high, hence set manually Low */ - msm_geni_serial_set_manual_flow(true, port); - geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG); -} - -static void msm_geni_serial_complete_rx_eot(struct uart_port *uport) -{ - int poll_done = 0, tries = 0; - struct msm_geni_serial_port *port = GET_DEV_PORT(uport); - - do { - poll_done = msm_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, - RX_EOT, true); - tries++; - } while (!poll_done && tries < 5); - if (!poll_done) - IPC_LOG_MSG(port->ipc_log_misc, - "%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__, - geni_read_reg_nolog(uport->membase, SE_GENI_STATUS), - geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0)); - else - geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR); + irq_clear = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_STATUS); + geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR); } #ifdef CONFIG_CONSOLE_POLL @@ -721,7 +869,9 @@ static void msm_geni_serial_poll_put_char(struct uart_port *uport, * Ensure FIFO write goes through before polling for status but. */ mb(); - msm_geni_serial_poll_cancel_tx(uport); + msm_geni_serial_disable_interrupts(uport); + msm_geni_serial_poll_tx_done(uport); + msm_geni_serial_enable_interrupts(uport); } #endif @@ -757,6 +907,7 @@ __msm_geni_serial_console_write(struct uart_port *uport, const char *s, SE_GENI_TX_WATERMARK_REG); msm_geni_serial_setup_tx(uport, bytes_to_send); i = 0; + while (i < count) { u32 chars_to_write = 0; u32 avail_fifo_bytes = (fifo_depth - tx_wm); @@ -781,7 +932,9 @@ __msm_geni_serial_console_write(struct uart_port *uport, const char *s, mb(); i += chars_to_write; } - msm_geni_serial_poll_cancel_tx(uport); + msm_geni_serial_disable_interrupts(uport); + msm_geni_serial_poll_tx_done(uport); + msm_geni_serial_enable_interrupts(uport); } static void msm_geni_serial_console_write(struct console *co, const char *s, @@ -792,6 +945,7 @@ static void msm_geni_serial_console_write(struct console *co, const char *s, bool locked = true; unsigned long flags; unsigned int geni_status; + bool timeout; int irq_en; /* Max 1 port supported as of now */ @@ -811,23 +965,39 @@ static void msm_geni_serial_console_write(struct console *co, const char *s, /* Cancel the current write to log the fault */ if (!locked) { + port->m_cmd_done = false; + port->m_cmd = true; + reinit_completion(&port->m_cmd_timeout); + msm_geni_serial_disable_interrupts(uport); geni_cancel_m_cmd(uport->membase); - if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_CANCEL_EN, true)) { + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + IPC_LOG_MSG(port->console_log, + "%s: tx_cancel failed 0x%x\n", + __func__, geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS)); + + reinit_completion(&port->m_cmd_timeout); geni_abort_m_cmd(uport->membase); - msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_ABORT_EN, true); - geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase, - SE_GENI_M_IRQ_CLEAR); + timeout = geni_wait_for_cmd_done(uport); + if (timeout) + IPC_LOG_MSG(port->console_log, + "%s: tx abort failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS)); } - writel_relaxed(M_CMD_CANCEL_EN, uport->membase + - SE_GENI_M_IRQ_CLEAR); + + msm_geni_serial_enable_interrupts(uport); + port->m_cmd = false; } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->cur_tx_remaining) { /* It seems we can interrupt existing transfers unless all data * has been sent, in which case we need to look for done first. */ - msm_geni_serial_poll_cancel_tx(uport); + msm_geni_serial_disable_interrupts(uport); + msm_geni_serial_poll_tx_done(uport); + msm_geni_serial_enable_interrupts(uport); /* Enable WM interrupt for every new console write op */ if (uart_circ_chars_pending(&uport->state->xmit)) { @@ -855,6 +1025,7 @@ static int handle_rx_console(struct uart_port *uport, { int i, c; unsigned char *rx_char; + unsigned long flags; struct tty_port *tport; struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); @@ -890,9 +1061,9 @@ static int handle_rx_console(struct uart_port *uport, * release the port lock before calling tty_flip_buffer_push() * to avoid deadlock scenarios. */ - spin_unlock(&uport->lock); + spin_unlock_irqrestore(&uport->lock, flags); tty_flip_buffer_push(tport); - spin_lock(&uport->lock); + spin_lock_irqsave(&uport->lock, flags); } return 0; } @@ -913,8 +1084,8 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); struct circ_buf *xmit = &uport->state->xmit; unsigned int xmit_size; - u32 geni_status; - bool done = false; + unsigned int dma_dbg; + bool timeout; int ret = 0; xmit_size = uart_circ_chars_pending(xmit); @@ -932,41 +1103,80 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) msm_geni_serial_setup_tx(uport, xmit_size); ret = geni_se_tx_dma_prep(msm_port->wrapper_dev, uport->membase, &xmit->buf[xmit->tail], xmit_size, &msm_port->tx_dma); + if (!ret) { msm_port->xmit_size = xmit_size; - return ret; - } - - IPC_LOG_MSG(msm_port->ipc_log_misc, - "%s: TX DMA map Fail %d\n", __func__, ret); - geni_write_reg_nolog(0, uport->membase, - SE_UART_TX_TRANS_LEN); - geni_cancel_m_cmd(uport->membase); - if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_CANCEL_EN, true)) { - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); + } else { IPC_LOG_MSG(msm_port->ipc_log_misc, - "%s: TX Cancel Fail 0x%x\n", - __func__, geni_status); - geni_abort_m_cmd(uport->membase); - done = msm_geni_serial_poll_bit(uport, - SE_GENI_M_IRQ_STATUS, M_CMD_ABORT_EN, true); - if (!done) { - geni_status = - geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); + "%s: TX DMA map Fail %d\n", __func__, ret); + + geni_write_reg_nolog(0, uport->membase, SE_UART_TX_TRANS_LEN); + msm_port->m_cmd_done = false; + msm_port->m_cmd = true; + reinit_completion(&msm_port->m_cmd_timeout); + + /* + * Disabling the interrupts before giving the + * cancel command as this might be in an atomic context. + */ + msm_geni_serial_disable_interrupts(uport); + geni_cancel_m_cmd(uport->membase); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + IPC_LOG_MSG(msm_port->console_log, + "%s: tx_cancel fail 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + IPC_LOG_MSG(msm_port->ipc_log_misc, - "%s: TX Abort fail 0x%x\n", - __func__, geni_status); + "%s: tx_cancel failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + + msm_port->m_cmd_done = false; + reinit_completion(&msm_port->m_cmd_timeout); + /* Give abort command as cancel command failed */ + geni_abort_m_cmd(uport->membase); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + IPC_LOG_MSG(msm_port->console_log, + "%s: tx abort failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS)); + IPC_LOG_MSG(msm_port->ipc_log_misc, + "%s: tx abort failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS)); + } } - geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase, - SE_GENI_M_IRQ_CLEAR); + + if (msm_port->xfer_mode == SE_DMA) { + dma_dbg = geni_read_reg(uport->membase, + SE_DMA_DEBUG_REG0); + if (dma_dbg & DMA_TX_ACTIVE) { + msm_port->m_cmd_done = false; + reinit_completion(&msm_port->m_cmd_timeout); + geni_write_reg_nolog(1, uport->membase, + SE_DMA_TX_FSM_RST); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) + IPC_LOG_MSG(msm_port->ipc_log_misc, + "%s: tx fsm reset failed\n", __func__); + } + + if (msm_port->tx_dma) { + geni_se_tx_dma_unprep(msm_port->wrapper_dev, + msm_port->tx_dma, msm_port->xmit_size); + msm_port->tx_dma = (dma_addr_t)NULL; + } + } + msm_port->xmit_size = 0; + /* Enable the interrupts once the cancel operation is done. */ + msm_geni_serial_enable_interrupts(uport); + msm_port->m_cmd = false; } - geni_write_reg_nolog(M_CMD_CANCEL_EN, uport->membase, - SE_GENI_M_IRQ_CLEAR); - msm_port->tx_dma = (dma_addr_t)NULL; - msm_port->xmit_size = 0; + return ret; } @@ -1029,50 +1239,14 @@ static void msm_geni_serial_start_tx(struct uart_port *uport) msm_geni_serial_power_off(uport); } -static void msm_geni_serial_tx_fsm_rst(struct uart_port *uport) -{ - unsigned int tx_irq_en; - int done = 0; - int tries = 0; - - tx_irq_en = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_EN); - geni_write_reg_nolog(0, uport->membase, SE_DMA_TX_IRQ_EN_SET); - geni_write_reg_nolog(1, uport->membase, SE_DMA_TX_FSM_RST); - do { - done = msm_geni_serial_poll_bit(uport, SE_DMA_TX_IRQ_STAT, - TX_RESET_DONE, true); - tries++; - } while (!done && tries < 5); - geni_write_reg_nolog(TX_DMA_DONE | TX_RESET_DONE, uport->membase, - SE_DMA_TX_IRQ_CLR); - geni_write_reg_nolog(tx_irq_en, uport->membase, SE_DMA_TX_IRQ_EN_SET); -} - static void stop_tx_sequencer(struct uart_port *uport) { - unsigned int geni_m_irq_en; unsigned int geni_status; + bool timeout; + unsigned int dma_dbg; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); - bool done = false; - geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); - geni_m_irq_en &= ~M_CMD_DONE_EN; - if (port->xfer_mode == FIFO_MODE) { - geni_m_irq_en &= ~M_TX_FIFO_WATERMARK_EN; - geni_write_reg_nolog(0, uport->membase, - SE_GENI_TX_WATERMARK_REG); - } else if (port->xfer_mode == SE_DMA) { - if (port->tx_dma) { - msm_geni_serial_tx_fsm_rst(uport); - geni_se_tx_dma_unprep(port->wrapper_dev, port->tx_dma, - port->xmit_size); - port->tx_dma = (dma_addr_t)NULL; - } - } - port->xmit_size = 0; - geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN); - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); + geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); /* Possible stop tx is called multiple times. */ if (!(geni_status & M_GENI_CMD_ACTIVE)) return; @@ -1080,28 +1254,56 @@ static void stop_tx_sequencer(struct uart_port *uport) IPC_LOG_MSG(port->ipc_log_misc, "%s: Start GENI: 0x%x\n", __func__, geni_status); + port->m_cmd_done = false; + port->m_cmd = true; + reinit_completion(&port->m_cmd_timeout); geni_cancel_m_cmd(uport->membase); - if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_CANCEL_EN, true)) { - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); - IPC_LOG_MSG(port->ipc_log_misc, - "%s: TX Cancel Fail 0x%x\n", __func__, geni_status); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + IPC_LOG_MSG(port->console_log, "%s: tx_cancel failed 0x%x\n", + __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + IPC_LOG_MSG(port->ipc_log_misc, "%s: tx_cancel failed 0x%x\n", + __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + + port->m_cmd_done = false; + reinit_completion(&port->m_cmd_timeout); geni_abort_m_cmd(uport->membase); - done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_CMD_ABORT_EN, true); - if (!done) { - geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + IPC_LOG_MSG(port->console_log, + "%s: tx abort failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); IPC_LOG_MSG(port->ipc_log_misc, - "%s TX Abort fail 0x%x\n", - __func__, geni_status); + "%s: tx abort failed 0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); } - geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase, - SE_GENI_M_IRQ_CLEAR); } - geni_write_reg_nolog(M_CMD_CANCEL_EN, uport->membase, - SE_GENI_M_IRQ_CLEAR); + + if (port->xfer_mode == SE_DMA) { + dma_dbg = geni_read_reg(uport->membase, SE_DMA_DEBUG_REG0); + if (dma_dbg & DMA_TX_ACTIVE) { + port->m_cmd_done = false; + reinit_completion(&port->m_cmd_timeout); + geni_write_reg_nolog(1, uport->membase, + SE_DMA_TX_FSM_RST); + + timeout = geni_wait_for_cmd_done(uport); + if (timeout) + IPC_LOG_MSG(port->ipc_log_misc, + "%s: tx fsm reset failed\n", __func__); + } + + if (port->tx_dma) { + geni_se_tx_dma_unprep(port->wrapper_dev, + port->tx_dma, port->xmit_size); + port->tx_dma = (dma_addr_t)NULL; + } + } + port->m_cmd = false; + port->xmit_size = 0; + /* * If we end up having to cancel an on-going Tx for non-console usecase * then it means there was some unsent data in the Tx FIFO, consequently @@ -1113,6 +1315,7 @@ static void stop_tx_sequencer(struct uart_port *uport) IPC_LOG_MSG(port->ipc_log_misc, "%s:Removing vote\n", __func__); msm_geni_serial_power_off(uport); } + geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); IPC_LOG_MSG(port->ipc_log_misc, "%s: End GENI:0x%x\n", __func__, geni_status); @@ -1133,8 +1336,6 @@ static void msm_geni_serial_stop_tx(struct uart_port *uport) static void start_rx_sequencer(struct uart_port *uport) { - unsigned int geni_s_irq_en; - unsigned int geni_m_irq_en; unsigned int geni_status; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); u32 geni_se_param = UART_PARAM_RFR_OPEN; @@ -1157,29 +1358,14 @@ static void start_rx_sequencer(struct uart_port *uport) } /* Start RX with the RFR_OPEN to keep RFR in always ready state */ + msm_geni_serial_enable_interrupts(uport); geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param); - if (port->xfer_mode == FIFO_MODE) { - geni_s_irq_en = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_EN); - geni_m_irq_en = geni_read_reg_nolog(uport->membase, - SE_GENI_M_IRQ_EN); - - geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN; - geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN; - - geni_write_reg_nolog(geni_s_irq_en, uport->membase, - SE_GENI_S_IRQ_EN); - geni_write_reg_nolog(geni_m_irq_en, uport->membase, - SE_GENI_M_IRQ_EN); - } else if (port->xfer_mode == SE_DMA) { + if (port->xfer_mode == SE_DMA) geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE, &port->rx_dma); - } - /* - * Ensure the writes to the secondary sequencer and interrupt enables - * go through. - */ + + /* Ensure that the above writes go through */ mb(); geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x, dma_dbg:0x%x\n", __func__, @@ -1253,61 +1439,68 @@ static void msm_geni_serial_set_manual_flow(bool enable, static void stop_rx_sequencer(struct uart_port *uport) { - unsigned int geni_s_irq_en; - unsigned int geni_m_irq_en; unsigned int geni_status; + bool timeout; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); - u32 irq_clear = S_CMD_CANCEL_EN; - bool done; - if (port->xfer_mode == FIFO_MODE) { - geni_s_irq_en = geni_read_reg_nolog(uport->membase, - SE_GENI_S_IRQ_EN); - geni_m_irq_en = geni_read_reg_nolog(uport->membase, - SE_GENI_M_IRQ_EN); - geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN); - geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); - - geni_write_reg_nolog(geni_s_irq_en, uport->membase, - SE_GENI_S_IRQ_EN); - geni_write_reg_nolog(geni_m_irq_en, uport->membase, - SE_GENI_M_IRQ_EN); - } + IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__); geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); /* Possible stop rx is called multiple times. */ - if (!(geni_status & S_GENI_CMD_ACTIVE)) + if (!(geni_status & S_GENI_CMD_ACTIVE)) { + IPC_LOG_MSG(port->ipc_log_misc, + "%s: RX is Inactive, geni_sts: 0x%x\n", + __func__, geni_status); goto exit_rx_seq; + } + + port->s_cmd_done = false; + port->s_cmd = true; + reinit_completion(&port->s_cmd_timeout); IPC_LOG_MSG(port->ipc_log_misc, "%s: Start 0x%x\n", __func__, geni_status); + /* + * Disabling the interrupts before giving the + * cancel command as this might be in an atomic context. + */ + msm_geni_serial_disable_interrupts(uport); geni_cancel_s_cmd(uport->membase); + /* * Ensure that the cancel goes through before polling for the * cancel control bit. */ mb(); - if (!uart_console(uport)) - msm_geni_serial_complete_rx_eot(uport); + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + geni_status = geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS); + IPC_LOG_MSG(port->ipc_log_misc, + "%s cancel failed 0x%x\n", __func__, geni_status); + IPC_LOG_MSG(port->console_log, + "%s cancel failed 0x%x\n", __func__, geni_status); + port->s_cmd_done = false; + reinit_completion(&port->s_cmd_timeout); + geni_abort_s_cmd(uport->membase); + /* Ensure this goes through before polling. */ + mb(); - done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, - S_GENI_CMD_CANCEL, false); - if (done) { - geni_write_reg_nolog(irq_clear, uport->membase, - SE_GENI_S_IRQ_CLEAR); - goto exit_rx_seq; - } else { - IPC_LOG_MSG(port->ipc_log_misc, "%s Cancel fail 0x%x\n", - __func__, geni_status); + timeout = geni_wait_for_cmd_done(uport); + if (timeout) { + geni_status = geni_read_reg_nolog(uport->membase, + SE_GENI_STATUS); + IPC_LOG_MSG(port->ipc_log_misc, + "%s abort fail 0x%x\n", __func__, geni_status); + IPC_LOG_MSG(port->console_log, + "%s abort fail 0x%x\n", __func__, geni_status); + } } + /* Enable the interrupts once the cancel operation is done. */ + msm_geni_serial_enable_interrupts(uport); + port->s_cmd = false; - geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); - if ((geni_status & S_GENI_CMD_ACTIVE)) { - IPC_LOG_MSG(port->ipc_log_misc, "%s:Abort Rx, GENI:0x%x\n", - __func__, geni_status); - msm_geni_serial_abort_rx(uport); - } exit_rx_seq: if (port->xfer_mode == SE_DMA && port->rx_dma) msm_geni_serial_rx_fsm_rst(uport); @@ -1533,8 +1726,6 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0, rx_bytes); exit_handle_dma_rx: - geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE, - &msm_port->rx_dma); return ret; } @@ -1568,32 +1759,27 @@ static int msm_geni_serial_handle_dma_tx(struct uart_port *uport) return 0; } -static irqreturn_t msm_geni_serial_isr(int isr, void *dev) +static void msm_geni_serial_handle_isr(struct uart_port *uport) { unsigned int m_irq_status; unsigned int s_irq_status; unsigned int dma; unsigned int dma_tx_status; unsigned int dma_rx_status; - struct uart_port *uport = dev; unsigned int m_irq_en; unsigned int geni_status; struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); struct tty_port *tport = &uport->state->port; bool drop_rx = false; + bool s_cmd_done = false; + bool m_cmd_done = false; - spin_lock(&uport->lock); if (uart_console(uport) && uport->suspended) { IPC_LOG_MSG(msm_port->console_log, "%s. Console in suspend state\n", __func__); goto exit_geni_serial_isr; } - if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) { - dev_err(uport->dev, "%s.Device is suspended.\n", __func__); - IPC_LOG_MSG(msm_port->ipc_log_misc, - "%s.Device is suspended.\n", __func__); - goto exit_geni_serial_isr; - } + m_irq_status = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_STATUS); s_irq_status = geni_read_reg_nolog(uport->membase, @@ -1601,16 +1787,13 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) if (uart_console(uport)) IPC_LOG_MSG(msm_port->console_log, "%s. sirq 0x%x mirq:0x%x\n", __func__, s_irq_status, - m_irq_status); - m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); - dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN); - dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT); - dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT); - geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); - - geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR); - geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR); + m_irq_status); + geni_write_reg_nolog(m_irq_status, uport->membase, + SE_GENI_M_IRQ_CLEAR); + geni_write_reg_nolog(s_irq_status, uport->membase, + SE_GENI_S_IRQ_CLEAR); + m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); if ((m_irq_status & M_ILLEGAL_CMD_EN)) { WARN_ON(1); goto exit_geni_serial_isr; @@ -1624,49 +1807,72 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) __func__, s_irq_status, uport->icount.buf_overrun); } + dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN); if (!dma) { + geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); + if ((m_irq_status & m_irq_en) & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN)) msm_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN, geni_status & M_GENI_CMD_ACTIVE); - if ((s_irq_status & S_GP_IRQ_0_EN) || - (s_irq_status & S_GP_IRQ_1_EN)) { + if (m_irq_status & (M_CMD_CANCEL_EN | M_CMD_ABORT_EN)) + m_cmd_done = true; + + if (s_irq_status & (S_GP_IRQ_0_EN | S_GP_IRQ_1_EN)) { if (s_irq_status & S_GP_IRQ_0_EN) uport->icount.parity++; IPC_LOG_MSG(msm_port->ipc_log_misc, "%s.sirq 0x%x parity:%d\n", __func__, s_irq_status, uport->icount.parity); drop_rx = true; - } else if ((s_irq_status & S_GP_IRQ_2_EN) || - (s_irq_status & S_GP_IRQ_3_EN)) { + } else if (s_irq_status & (S_GP_IRQ_2_EN | S_GP_IRQ_3_EN)) { uport->icount.brk++; IPC_LOG_MSG(msm_port->ipc_log_misc, "%s.sirq 0x%x break:%d\n", __func__, s_irq_status, uport->icount.brk); } - if ((s_irq_status & S_RX_FIFO_WATERMARK_EN) || - (s_irq_status & S_RX_FIFO_LAST_EN)) + if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN)) + s_cmd_done = true; + + if (s_irq_status & (S_RX_FIFO_WATERMARK_EN | + S_RX_FIFO_LAST_EN)) msm_geni_serial_handle_rx(uport, drop_rx); } else { + dma_tx_status = geni_read_reg_nolog(uport->membase, + SE_DMA_TX_IRQ_STAT); + dma_rx_status = geni_read_reg_nolog(uport->membase, + SE_DMA_RX_IRQ_STAT); + if (dma_tx_status) { + geni_write_reg_nolog(dma_tx_status, uport->membase, - SE_DMA_TX_IRQ_CLR); + SE_DMA_TX_IRQ_CLR); + if (dma_tx_status & TX_DMA_DONE) msm_geni_serial_handle_dma_tx(uport); + + if (dma_tx_status & (TX_RESET_DONE | + TX_GENI_CANCEL_IRQ)) + m_cmd_done = true; + + if (m_irq_status & (M_CMD_CANCEL_EN | M_CMD_ABORT_EN)) + m_cmd_done = true; } if (dma_rx_status) { geni_write_reg_nolog(dma_rx_status, uport->membase, - SE_DMA_RX_IRQ_CLR); + SE_DMA_RX_IRQ_CLR); + if (dma_rx_status & RX_RESET_DONE) { IPC_LOG_MSG(msm_port->ipc_log_misc, "%s.Reset done. 0x%x.\n", __func__, dma_rx_status); goto exit_geni_serial_isr; } + if (dma_rx_status & UART_DMA_RX_ERRS) { if (dma_rx_status & UART_DMA_RX_PARITY_ERR) uport->icount.parity++; @@ -1682,13 +1888,53 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) __func__, dma_rx_status, uport->icount.brk); } - if (dma_rx_status & RX_DMA_DONE) - msm_geni_serial_handle_dma_rx(uport, drop_rx); + + if (dma_rx_status & RX_EOT || + dma_rx_status & RX_DMA_DONE) { + msm_geni_serial_handle_dma_rx(uport, + drop_rx); + if (!(dma_rx_status & RX_GENI_CANCEL_IRQ)) { + geni_se_rx_dma_start(uport->membase, + DMA_RX_BUF_SIZE, &msm_port->rx_dma); + } + } + + if (dma_rx_status & RX_SBE) { + IPC_LOG_MSG(msm_port->ipc_log_misc, + "%s.Rx Errors. 0x%x\n", + __func__, dma_rx_status); + WARN_ON(1); + } + + if (dma_rx_status & (RX_EOT | RX_GENI_CANCEL_IRQ | + RX_DMA_DONE)) + s_cmd_done = true; + + if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN)) + s_cmd_done = true; } } exit_geni_serial_isr: - spin_unlock(&uport->lock); + if (m_cmd_done) { + msm_port->m_cmd_done = true; + complete(&msm_port->m_cmd_timeout); + } + + if (s_cmd_done) { + msm_port->s_cmd_done = true; + complete(&msm_port->s_cmd_timeout); + } +} + +static irqreturn_t msm_geni_serial_isr(int isr, void *dev) +{ + struct uart_port *uport = dev; + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + msm_geni_serial_handle_isr(uport); + spin_unlock_irqrestore(&uport->lock, flags); return IRQ_HANDLED; } @@ -1780,9 +2026,6 @@ static void msm_geni_serial_shutdown(struct uart_port *uport) wait_for_transfers_inflight(uport); } - disable_irq(uport->irq); - free_irq(uport->irq, uport); - if (!uart_console(uport)) { if (msm_port->ioctl_count) { int i; @@ -1844,6 +2087,7 @@ static int msm_geni_serial_port_setup(struct uart_port *uport) msm_port->rx_buf = dma_alloc_coherent(msm_port->wrapper_dev, DMA_RX_BUF_SIZE, &dma_address, GFP_KERNEL); + if (!msm_port->rx_buf) { devm_kfree(uport->dev, msm_port->rx_fifo); msm_port->rx_fifo = NULL; @@ -1857,7 +2101,9 @@ static int msm_geni_serial_port_setup(struct uart_port *uport) * it else we could end up in data loss scenarios. */ msm_port->xfer_mode = FIFO_MODE; - msm_geni_serial_poll_cancel_tx(uport); + msm_geni_serial_disable_interrupts(uport); + msm_geni_serial_poll_tx_done(uport); + msm_geni_serial_enable_interrupts(uport); se_get_packing_config(8, 1, false, &cfg0, &cfg1); geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0); @@ -1869,6 +2115,7 @@ static int msm_geni_serial_port_setup(struct uart_port *uport) geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_RX_PACKING_CFG1); } + ret = geni_se_init(uport->membase, msm_port->rx_wm, msm_port->rx_rfr); if (ret) { dev_err(uport->dev, "%s: Fail\n", __func__); @@ -1903,8 +2150,6 @@ static int msm_geni_serial_startup(struct uart_port *uport) struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); IPC_LOG_MSG(msm_port->ipc_log_misc, "%s:\n", __func__); - scnprintf(msm_port->name, sizeof(msm_port->name), "msm_serial_geni%d", - uport->line); msm_port->startup_in_progress = true; @@ -1933,13 +2178,6 @@ static int msm_geni_serial_startup(struct uart_port *uport) * before returning to the framework. */ mb(); - ret = request_irq(uport->irq, msm_geni_serial_isr, IRQF_TRIGGER_HIGH, - msm_port->name, uport); - if (unlikely(ret)) { - dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n", - __func__, ret); - goto exit_startup; - } if (msm_port->wakeup_irq > 0) { ret = request_irq(msm_port->wakeup_irq, msm_geni_wakeup_isr, @@ -2053,7 +2291,6 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, unsigned long ser_clk_cfg = 0; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); unsigned long clk_rate; - unsigned long flags; unsigned long desired_rate; unsigned int clk_idx; int uart_sampling; @@ -2075,16 +2312,9 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, __func__, ret); return; } - disable_irq(uport->irq); msm_geni_serial_set_manual_flow(false, port); } - /* Take a spinlock else stop_rx causes a race with an ISR due to Cancel - * and FSM_RESET. This also has a potential race with the dma_map/unmap - * operations of ISR. - */ - spin_lock_irqsave(&uport->lock, flags); msm_geni_serial_stop_rx(uport); - spin_unlock_irqrestore(&uport->lock, flags); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); port->cur_baud = baud; @@ -2204,10 +2434,9 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n", bits_per_char, stop_bit_len); exit_set_termios: - if (!uart_console(uport)) { + if (!uart_console(uport)) msm_geni_serial_set_manual_flow(true, port); - enable_irq(uport->irq); - } + msm_geni_serial_start_rx(uport); if (!uart_console(uport)) msm_geni_serial_power_off(uport); @@ -2275,9 +2504,9 @@ static ssize_t xfer_mode_store(struct device *dev, return size; msm_geni_serial_power_on(uport); - spin_lock_irqsave(&uport->lock, flags); msm_geni_serial_stop_tx(uport); msm_geni_serial_stop_rx(uport); + spin_lock_irqsave(&uport->lock, flags); port->xfer_mode = xfer_mode; geni_se_select_mode(uport->membase, port->xfer_mode); spin_unlock_irqrestore(&uport->lock, flags); @@ -2361,6 +2590,45 @@ msm_geni_serial_early_console_write(struct console *con, const char *s, __msm_geni_serial_console_write(&dev->port, s, n); } +static void msm_geni_serial_cancel_rx(struct uart_port *uport) +{ + int done = 0; + int i = 0; + unsigned int irq_status; + u32 rx_fifo_status; + u32 rx_fifo_wc; + + geni_cancel_s_cmd(uport->membase); + /* Ensure this goes through before polling. */ + mb(); + + done = msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, + S_CMD_CANCEL_EN, true); + if (!done) { + geni_abort_s_cmd(uport->membase); + /* Ensure this goes through before polling. */ + mb(); + msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, + S_CMD_ABORT_EN, false); + } else if (msm_geni_serial_poll_bit(uport, + SE_GENI_S_IRQ_STATUS, S_RX_FIFO_LAST_EN, true)) { + rx_fifo_status = geni_read_reg_nolog(uport->membase, + SE_GENI_RX_FIFO_STATUS); + rx_fifo_wc = rx_fifo_status & RX_FIFO_WC_MSK; + for (i = 0; i < rx_fifo_wc; i++) + geni_read_reg_nolog(uport->membase, + SE_GENI_RX_FIFOn); + } + + irq_status = geni_read_reg_nolog(uport->membase, + SE_GENI_S_IRQ_STATUS); + geni_write_reg_nolog(irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR); + + if (!done) + geni_write_reg(FORCE_DEFAULT, uport->membase, + GENI_FORCE_DEFAULT_REG); +} + static int __init msm_geni_serial_earlycon_setup(struct earlycon_device *dev, const char *opt) @@ -2407,6 +2675,7 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev, stop_bit = 0; rx_stale = 0x18; clk_div = get_clk_div_rate(baud, &clk_rate); + if (clk_div <= 0) { ret = -EINVAL; goto exit_geni_serial_earlyconsetup; @@ -2419,10 +2688,17 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev, s_clk_cfg |= (clk_div << CLK_DIV_SHFT); /* - * Make an unconditional cancel on the main sequencer to reset - * it else we could end up in data loss scenarios. + * Here we need to poll for command done which indicates that + * the previous tx transfer is done. And if the command done interrupt + * is not getting set, then we need to cancel the command. */ - msm_geni_serial_poll_cancel_tx(uport); + msm_geni_serial_poll_tx_done(uport); + + /* + * Here cancel rx is done in polling mode as there is + * no isr support during early console time. + */ + msm_geni_serial_cancel_rx(uport); /* Only for earlyconsole */ if (IS_ENABLED(CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING)) { @@ -2441,8 +2717,14 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev, SE_UART_TX_TRANS_CFG); geni_write_reg_nolog(tx_parity_cfg, uport->membase, SE_UART_TX_PARITY_CFG); + geni_write_reg_nolog(rx_trans_cfg, uport->membase, + SE_UART_RX_TRANS_CFG); + geni_write_reg_nolog(rx_parity_cfg, uport->membase, + SE_UART_RX_PARITY_CFG); geni_write_reg_nolog(bits_per_char, uport->membase, SE_UART_TX_WORD_LEN); + geni_write_reg_nolog(bits_per_char, uport->membase, + SE_UART_RX_WORD_LEN); geni_write_reg_nolog(stop_bit, uport->membase, SE_UART_TX_STOP_BIT_LEN); geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG); geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG); @@ -2656,6 +2938,8 @@ static int msm_geni_serial_get_ver_info(struct uart_port *uport) __func__, msm_port->ver_info.hw_major_ver, msm_port->ver_info.hw_minor_ver, msm_port->ver_info.hw_step_ver); + + msm_geni_serial_enable_interrupts(uport); exit_ver_info: if (!msm_port->is_console) se_geni_clks_off(&msm_port->serial_rsc); @@ -2856,6 +3140,9 @@ static int msm_geni_serial_probe(struct platform_device *pdev) dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; uport->fifosize = ((dev_port->tx_fifo_depth * dev_port->tx_fifo_width) >> 3); + /* Complete signals to handle cancel cmd completion */ + init_completion(&dev_port->m_cmd_timeout); + init_completion(&dev_port->s_cmd_timeout); uport->irq = platform_get_irq(pdev, 0); if (uport->irq < 0) { @@ -2864,6 +3151,22 @@ static int msm_geni_serial_probe(struct platform_device *pdev) goto exit_geni_serial_probe; } + dev_port->name = devm_kasprintf(uport->dev, GFP_KERNEL, + "msm_serial_geni%d", uport->line); + ret = devm_request_irq(uport->dev, uport->irq, msm_geni_serial_isr, + IRQF_TRIGGER_HIGH, dev_port->name, uport); + if (ret) { + dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n", + __func__, ret); + goto exit_geni_serial_probe; + } + /* + * Console usecase requires irq to be in enable state to handle RX data. + * disable irq only for HSUART case from here. + */ + if (!is_console) + disable_irq(dev_port->uport.irq); + uport->private_data = (void *)drv; platform_set_drvdata(pdev, dev_port); if (is_console) { @@ -2886,6 +3189,12 @@ static int msm_geni_serial_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n", line, uport->fifosize, is_console); + /* + * We are using this spinlock before the serial layer initialises it. + * Hence, we are initializing it. + */ + spin_lock_init(&uport->lock); + device_create_file(uport->dev, &dev_attr_loopback); device_create_file(uport->dev, &dev_attr_xfer_mode); device_create_file(uport->dev, &dev_attr_ver_info); @@ -2930,21 +3239,24 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) wait_for_transfers_inflight(&port->uport); /* - * Disable Interrupt * Manual RFR On. * Stop Rx. + * Disable Interrupt * Resources off */ - disable_irq(port->uport.irq); stop_rx_sequencer(&port->uport); geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); + if ((geni_status & M_GENI_CMD_ACTIVE)) stop_tx_sequencer(&port->uport); + + disable_irq(port->uport.irq); ret = se_geni_resources_off(&port->serial_rsc); if (ret) { dev_err(dev, "%s: Error ret %d\n", __func__, ret); goto exit_runtime_suspend; } + if (port->wakeup_irq > 0) { port->edge_count = 0; enable_irq(port->wakeup_irq); @@ -2984,12 +3296,9 @@ static int msm_geni_serial_runtime_resume(struct device *dev) start_rx_sequencer(&port->uport); /* Ensure that the Rx is running before enabling interrupts */ mb(); - /* - * Do not enable irq before interrupt registration which happens - * at port open time. - */ - if (pm_runtime_enabled(dev) && port->xfer_mode != INVALID) - enable_irq(port->uport.irq); + /* Enable interrupt */ + enable_irq(port->uport.irq); + IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__); exit_runtime_resume: return ret; @@ -3034,7 +3343,6 @@ static int msm_geni_serial_sys_resume_noirq(struct device *dev) console_suspend_enabled && uport->suspended) { uart_resume_port((struct uart_driver *)uport->private_data, uport); - disable_irq(uport->irq); } return 0; } diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 1fd68e38cf2f..3e2888af87c3 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -330,6 +330,7 @@ struct se_geni_rsc { #define TX_EOT (BIT(1)) #define TX_SBE (BIT(2)) #define TX_RESET_DONE (BIT(3)) +#define TX_GENI_CANCEL_IRQ (BIT(14)) /* SE_DMA_RX_IRQ_STAT Register fields */ #define RX_DMA_DONE (BIT(0)) @@ -338,9 +339,15 @@ struct se_geni_rsc { #define RX_RESET_DONE (BIT(3)) #define RX_FLUSH_DONE (BIT(4)) #define RX_GENI_GP_IRQ (GENMASK(10, 5)) -#define RX_GENI_CANCEL_IRQ (BIT(11)) +#define RX_GENI_CANCEL_IRQ (BIT(14)) #define RX_GENI_GP_IRQ_EXT (GENMASK(13, 12)) +/* DMA DEBUG Register fields */ +#define DMA_TX_ACTIVE (BIT(0)) +#define DMA_RX_ACTIVE (BIT(1)) +#define DMA_TX_STATE (GENMASK(7, 4)) +#define DMA_RX_STATE (GENMASK(11, 8)) + #define DEFAULT_BUS_WIDTH (4) #define DEFAULT_SE_CLK (19200000) From ac16f8aa77a366b914d937cb735c81948d348019 Mon Sep 17 00:00:00 2001 From: Anmolpreet Kaur Date: Wed, 22 Apr 2020 13:55:39 +0530 Subject: [PATCH 40/90] smcinvoke : Add locking to shared variables This change adds locking to shared variables to prevent any data race conditions. cb_reqs_inflight and server_info should both be protected under locks. Change-Id: I3475ab3208c2fc91fce2865d64626efdcd404e70 Signed-off-by: Anmolpreet Kaur --- drivers/soc/qcom/smcinvoke.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c index b42a6e176f42..c28870cacf36 100644 --- a/drivers/soc/qcom/smcinvoke.c +++ b/drivers/soc/qcom/smcinvoke.c @@ -1558,16 +1558,19 @@ static long process_accept_req(struct file *filp, unsigned int cmd, mutex_lock(&g_smcinvoke_lock); server_info = get_cb_server_locked(server_obj->server_id); - mutex_unlock(&g_smcinvoke_lock); + if (!server_info) { pr_err("No matching server with server id : %u found\n", - server_obj->server_id); + server_obj->server_id); + mutex_unlock(&g_smcinvoke_lock); return -EINVAL; } if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) server_info->state = 0; + mutex_unlock(&g_smcinvoke_lock); + /* First check if it has response otherwise wait for req */ if (user_args.has_resp) { mutex_lock(&g_smcinvoke_lock); @@ -1946,7 +1949,6 @@ static int smcinvoke_probe(struct platform_device *pdev) goto exit_destroy_device; } smcinvoke_pdev = pdev; - cb_reqs_inflight = 0; return 0; @@ -1973,12 +1975,15 @@ static int smcinvoke_remove(struct platform_device *pdev) static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev, pm_message_t state) { + int ret = 0; + + mutex_lock(&g_smcinvoke_lock); if (cb_reqs_inflight) { pr_err("Failed to suspend smcinvoke driver\n"); - return -EIO; + ret = -EIO; } - - return 0; + mutex_unlock(&g_smcinvoke_lock); + return ret; } static int __maybe_unused smcinvoke_resume(struct platform_device *pdev) From 464998739f1e134401bd92366ed3fc1928eb07fd Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Sat, 11 Apr 2020 14:59:31 +0530 Subject: [PATCH 41/90] icnss: Add code to send hang event data in pd down Add code to send hang event data in pd down instead of smp2p interrupt handler. Add new uevent ICNSS_UEVENT_HANG_DATA to send the hang data to the host driver. Change-Id: I5c6ebce5b8c81d0f9c6b1cba75d7eb411f46253c Signed-off-by: Mohammed Siddiq --- drivers/soc/qcom/icnss.c | 74 +++++++++++++++++++++++----------------- include/soc/qcom/icnss.h | 6 ++++ 2 files changed, 49 insertions(+), 31 deletions(-) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 3a4aa853ea1d..e7e6fdb047f4 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -571,26 +571,6 @@ int icnss_power_off(struct device *dev) } EXPORT_SYMBOL(icnss_power_off); -int icnss_update_fw_down_params(struct icnss_priv *priv, - struct icnss_uevent_fw_down_data *fw_down_data, - bool crashed) -{ - fw_down_data->crashed = crashed; - - if (!priv->hang_event_data_va) - return -EINVAL; - - priv->hang_event_data = kmemdup(priv->hang_event_data_va, - priv->hang_event_data_len, - GFP_ATOMIC); - if (!priv->hang_event_data) - return -ENOMEM; - - // Update the hang event params - fw_down_data->hang_event_data = priv->hang_event_data; - fw_down_data->hang_event_data_len = priv->hang_event_data_len; - return 0; -} static irqreturn_t fw_error_fatal_handler(int irq, void *ctx) { @@ -608,7 +588,6 @@ static irqreturn_t fw_crash_indication_handler(int irq, void *ctx) { struct icnss_priv *priv = ctx; struct icnss_uevent_fw_down_data fw_down_data = {0}; - int ret = 0; icnss_pr_err("Received early crash indication from FW\n"); @@ -617,18 +596,9 @@ static irqreturn_t fw_crash_indication_handler(int irq, void *ctx) icnss_ignore_fw_timeout(true); if (test_bit(ICNSS_FW_READY, &priv->state)) { - ret = icnss_update_fw_down_params(priv, &fw_down_data, - true); - if (ret) - icnss_pr_err("Unable to allocate memory for Hang event data\n"); - + fw_down_data.crashed = true; icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data); - - if (!ret) { - kfree(priv->hang_event_data); - priv->hang_event_data = NULL; - } } } @@ -1231,6 +1201,46 @@ static int icnss_fw_crashed(struct icnss_priv *priv, return 0; } +int icnss_update_hang_event_data(struct icnss_priv *priv, + struct icnss_uevent_hang_data *hang_data) +{ + if (!priv->hang_event_data_va) + return -EINVAL; + + priv->hang_event_data = kmemdup(priv->hang_event_data_va, + priv->hang_event_data_len, + GFP_ATOMIC); + if (!priv->hang_event_data) + return -ENOMEM; + + // Update the hang event params + hang_data->hang_event_data = priv->hang_event_data; + hang_data->hang_event_data_len = priv->hang_event_data_len; + + return 0; +} + +int icnss_send_hang_event_data(struct icnss_priv *priv) +{ + struct icnss_uevent_hang_data hang_data = {0}; + int ret = 0xFF; + + if (priv->early_crash_ind) { + ret = icnss_update_hang_event_data(priv, &hang_data); + if (ret) + icnss_pr_err("Unable to allocate memory for Hang event data\n"); + } + icnss_call_driver_uevent(priv, ICNSS_UEVENT_HANG_DATA, + &hang_data); + + if (!ret) { + kfree(priv->hang_event_data); + priv->hang_event_data = NULL; + } + + return 0; +} + static int icnss_driver_event_pd_service_down(struct icnss_priv *priv, void *data) { @@ -1244,6 +1254,8 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv, if (priv->force_err_fatal) ICNSS_ASSERT(0); + icnss_send_hang_event_data(priv); + if (priv->early_crash_ind) { icnss_pr_dbg("PD Down ignored as early indication is processed: %d, state: 0x%lx\n", event_data->crashed, priv->state); diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h index 4eecdab051a6..c7129de08ca5 100644 --- a/include/soc/qcom/icnss.h +++ b/include/soc/qcom/icnss.h @@ -18,6 +18,12 @@ enum icnss_uevent { ICNSS_UEVENT_FW_CRASHED, ICNSS_UEVENT_FW_DOWN, + ICNSS_UEVENT_HANG_DATA, +}; + +struct icnss_uevent_hang_data { + void *hang_event_data; + uint16_t hang_event_data_len; }; struct icnss_uevent_fw_down_data { From c5094184044c6f9387a067a9ccf8da0f7a094fea Mon Sep 17 00:00:00 2001 From: Mahesh Sharma Date: Wed, 8 Apr 2020 22:19:31 -0700 Subject: [PATCH 42/90] Bluetooth: Disable ASD for HSP Keep ASD commonly enabled by default and disable it on Hastings prime. Change-Id: I42c2c37ea3c1910da2b3bdd417efb261cad53974 Signed-off-by: Mahesh Sharma --- drivers/bluetooth/bluetooth-power.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index b47dedef7228..c7f41ab161ef 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -28,6 +28,7 @@ #if defined CONFIG_BT_SLIM_QCA6390 || defined CONFIG_BTFM_SLIM_WCN3990 #include "btfm_slim.h" +#include "btfm_slim_slave.h" #endif #include @@ -846,6 +847,18 @@ int get_chipset_version(void) return soc_id; } +int bt_disable_asd(void) +{ + int rc = 0; + if (bt_power_pdata->bt_vdd_asd) { + BT_PWR_INFO("Disabling ASD regulator"); + rc = bt_vreg_disable(bt_power_pdata->bt_vdd_asd); + } else { + BT_PWR_INFO("ASD regulator is not configured"); + } + return rc; +} + static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0, pwr_cntrl = 0; @@ -882,6 +895,11 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) BT_PWR_ERR("BT_CMD_CHIP_VERS soc_version:%x", chipset_version); if (chipset_version) { soc_id = chipset_version; + if (soc_id == QCA_HSP_SOC_ID_0100 || + soc_id == QCA_HSP_SOC_ID_0110 || + soc_id == QCA_HSP_SOC_ID_0200) { + ret = bt_disable_asd(); + } } else { BT_PWR_ERR("got invalid soc version"); soc_id = 0; From 6c39fe962e9b2efbff6c6822c3a64e3dae279090 Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Thu, 16 Apr 2020 13:20:46 +0530 Subject: [PATCH 43/90] usb: gsi: Add NULL pointer check Static code analysis tool is detecting a possible NULL pointer dereference for gsi->d_port.in_ep in the error handling path. This change adds a NULL pointer check to avoid this issue. Change-Id: I4adf03e933cece0698cd3761c8758f4bd98925f4 Signed-off-by: Pratham Pratap --- drivers/usb/gadget/function/f_gsi.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 19da5e295016..83a6ad26313e 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -859,8 +859,10 @@ static int gsi_ep_enable(struct f_gsi *gsi) ret = usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request, GSI_EP_OP_CONFIG); if (ret) { - usb_gsi_ep_op(gsi->d_port.in_ep, - &gsi->d_port.in_request, GSI_EP_OP_DISABLE); + if (gsi->d_port.in_ep) + usb_gsi_ep_op(gsi->d_port.in_ep, + &gsi->d_port.in_request, + GSI_EP_OP_DISABLE); return ret; } } From 368a1104a4da3638fa9361e3c47adac41bc4bb32 Mon Sep 17 00:00:00 2001 From: Ashok Vuyyuru Date: Mon, 20 Apr 2020 20:05:06 +0530 Subject: [PATCH 44/90] msm: ipa3: Fix to reset TX/RX prop with correct size Fix to reset the TX/RX prop with correct size. Change-Id: I5e75e47949f4eb5df5930298ce8621e64fb7ab94 Signed-off-by: Ashok Vuyyuru --- drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c | 4 ++-- drivers/platform/msm/ipa/ipa_v3/ipa_intf.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c index 16de3d9f6bef..c9b7a82035a7 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c @@ -254,7 +254,7 @@ int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in) goto fail_commit_hdr; } tx.num_props = 2; - memset(tx_prop, 0, sizeof(*tx_prop)); + memset(tx_prop, 0, sizeof(*tx_prop) * IPA_TX_MAX_INTF_PROP); tx.prop = tx_prop; tx_prop[0].ip = IPA_IP_v4; @@ -286,7 +286,7 @@ int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in) goto fail_commit_hdr; } rx.num_props = 2; - memset(rx_prop, 0, sizeof(*rx_prop)); + memset(rx_prop, 0, sizeof(*rx_prop) * IPA_RX_MAX_INTF_PROP); rx.prop = rx_prop; rx_prop[0].ip = IPA_IP_v4; if (!ipa3_ctx->ipa_wdi3_over_gsi) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index df9acebab7ff..bb6942f99ea5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. */ #include @@ -114,6 +114,7 @@ int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, kfree(intf); return -ENOMEM; } + memcpy(intf->tx, tx->prop, len); } if (rx) { From d1db79deb63dd2b794cb374b85697223457a6886 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Tue, 21 Apr 2020 07:52:28 +0530 Subject: [PATCH 45/90] sched/walt: Improve the scheduler This change is for general scheduler improvements. Change-Id: I80606a09c3d09c65b7bcdf3940bee5e155d6318c Co-developed-by: Lingutla Chandrasekhar Signed-off-by: Lingutla Chandrasekhar Signed-off-by: Pavankumar Kondeti --- kernel/sched/boost.c | 4 +--- kernel/sched/fair.c | 1 - kernel/sched/sched.h | 3 +-- kernel/sched/walt.c | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c index 8a0064929e0f..bfdf8c237365 100644 --- a/kernel/sched/boost.c +++ b/kernel/sched/boost.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ #include "sched.h" @@ -78,12 +78,10 @@ static void sched_full_throttle_boost_exit(void) static void sched_conservative_boost_enter(void) { update_cgroup_boost_settings(); - sched_task_filter_util = sysctl_sched_min_task_util_for_boost; } static void sched_conservative_boost_exit(void) { - sched_task_filter_util = sysctl_sched_min_task_util_for_colocation; restore_cgroup_boost_settings(); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7b9cd3c1e49a..b95b25c6f888 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -175,7 +175,6 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = { unsigned int sysctl_sched_min_task_util_for_boost = 51; /* 0.68ms default for 20ms window size scaled to 1024 */ unsigned int sysctl_sched_min_task_util_for_colocation = 35; -unsigned int sched_task_filter_util = 35; __read_mostly unsigned int sysctl_sched_prefer_spread; #endif unsigned int sched_small_task_threshold = 102; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 102bda8b2aa3..067a5f474d17 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2802,7 +2802,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu) #define CPU_RESERVED 1 extern enum sched_boost_policy boost_policy; -extern unsigned int sched_task_filter_util; static inline enum sched_boost_policy sched_boost_policy(void) { return boost_policy; @@ -2928,7 +2927,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p) * under conservative boost. */ if (sched_boost() == CONSERVATIVE_BOOST && - task_util(p) <= sched_task_filter_util) + task_util(p) <= sysctl_sched_min_task_util_for_boost) policy = SCHED_BOOST_NONE; } diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 07bbd77bbfe5..b44ea6bde557 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -1856,7 +1856,7 @@ static void update_history(struct rq *rq, struct task_struct *p, p->ravg.pred_demand = pred_demand; p->ravg.pred_demand_scaled = pred_demand_scaled; - if (demand_scaled > sched_task_filter_util) + if (demand_scaled > sysctl_sched_min_task_util_for_colocation) p->unfilter = sysctl_sched_task_unfilter_period; else if (p->unfilter) From 14956156a50396f7886a8bae0f56a788ae816973 Mon Sep 17 00:00:00 2001 From: Yuanfang Zhang Date: Wed, 22 Apr 2020 16:04:21 +0800 Subject: [PATCH 46/90] coresight: tmc-etr: fix etr smmu unmap issue The return value 0 of dma_map_resource() is valid, so should use the dma_mapping_error() to check for errors. Change-Id: I368f480ed66bad0498e38da2fc6baa78b5935d7f Signed-off-by: Yuanfang Zhang --- drivers/hwtracing/coresight/coresight-tmc-etr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index afcdcc1307e7..df0da2bb87be 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -1065,7 +1065,7 @@ static int tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata) data_fifo_iova = dma_map_resource(drvdata->dev, bamdata->data_fifo.phys_base, bamdata->data_fifo.size, DMA_BIDIRECTIONAL, 0); - if (!data_fifo_iova) + if (dma_mapping_error(drvdata->dev, data_fifo_iova)) return -ENOMEM; dev_dbg(drvdata->dev, "%s:data p_addr:%pa,iova:%pad,size:%x\n", __func__, &(bamdata->data_fifo.phys_base), @@ -1074,7 +1074,7 @@ static int tmc_etr_fill_usb_bam_data(struct tmc_drvdata *drvdata) desc_fifo_iova = dma_map_resource(drvdata->dev, bamdata->desc_fifo.phys_base, bamdata->desc_fifo.size, DMA_BIDIRECTIONAL, 0); - if (!desc_fifo_iova) + if (dma_mapping_error(drvdata->dev, desc_fifo_iova)) return -ENOMEM; dev_dbg(drvdata->dev, "%s:desc p_addr:%pa,iova:%pad,size:%x\n", __func__, &(bamdata->desc_fifo.phys_base), @@ -1146,7 +1146,7 @@ static int get_usb_bam_iova(struct device *dev, unsigned long usb_bam_handle, return ret; } *iova = dma_map_resource(dev, p_addr, bam_size, DMA_BIDIRECTIONAL, 0); - if (!(*iova)) + if (dma_mapping_error(dev, *iova)) return -ENOMEM; return 0; } From 0102ba4f8d234a929c5aecf90b2069e78c16c6ad Mon Sep 17 00:00:00 2001 From: Yue Ma Date: Tue, 24 Mar 2020 07:26:50 +0800 Subject: [PATCH 47/90] cnss2: Update WLFW QMI messages to latest Update WLFW QMI messages to latest to avoid merge conflicts later. Change-Id: I39460e385fe49c64ee5eca3a910a3e0d1a08b0b2 Signed-off-by: Yue Ma --- .../cnss_utils/wlan_firmware_service_v01.c | 246 ++++++++++++++++++ .../cnss_utils/wlan_firmware_service_v01.h | 86 +++++- 2 files changed, 327 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c index 0dae083cf110..1899d4a61905 100644 --- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c +++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c @@ -747,6 +747,24 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .offset = offsetof(struct wlfw_ind_register_req_msg_v01, respond_get_info_enable), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + m3_dump_upload_req_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x20, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + m3_dump_upload_req_enable), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -802,6 +820,42 @@ struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { }; struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_msa_ready_ind_msg_v01, + hang_data_addr_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_msa_ready_ind_msg_v01, + hang_data_addr_offset), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_msa_ready_ind_msg_v01, + hang_data_length_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_msa_ready_ind_msg_v01, + hang_data_length), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -1318,6 +1372,24 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .offset = offsetof(struct wlfw_cap_resp_msg_v01, otp_version), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + eeprom_caldata_read_timeout_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + eeprom_caldata_read_timeout), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -1515,6 +1587,24 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .offset = offsetof(struct wlfw_cal_report_req_msg_v01, xo_cal_data), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + cal_remove_supported_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + cal_remove_supported), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -3892,3 +3982,159 @@ struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[] = { }, }; +struct qmi_elem_info wlfw_m3_dump_upload_req_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_m3_dump_upload_req_ind_msg_v01, + pdev_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_m3_dump_upload_req_ind_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct + wlfw_m3_dump_upload_req_ind_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_m3_dump_upload_done_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_m3_dump_upload_done_req_msg_v01, + pdev_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_m3_dump_upload_done_req_msg_v01, + status), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_m3_dump_upload_done_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_m3_dump_upload_done_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_soc_wake_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_soc_wake_req_msg_v01, + wake_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_soc_wake_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_soc_wake_req_msg_v01, + wake), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_soc_wake_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_soc_wake_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_exit_power_save_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_exit_power_save_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_exit_power_save_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h index b3b1ce7f819c..e105cd48a9c4 100644 --- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h @@ -17,6 +17,7 @@ #define QMI_WLFW_GET_INFO_REQ_V01 0x004A #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A #define QMI_WLFW_CAL_DONE_IND_V01 0x003E +#define QMI_WLFW_M3_DUMP_UPLOAD_REQ_IND_V01 0x004D #define QMI_WLFW_WFC_CALL_STATUS_RESP_V01 0x0049 #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B @@ -28,6 +29,7 @@ #define QMI_WLFW_RESPOND_GET_INFO_IND_V01 0x004B #define QMI_WLFW_M3_INFO_RESP_V01 0x003C #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 +#define QMI_WLFW_M3_DUMP_UPLOAD_DONE_RESP_V01 0x004E #define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 #define QMI_WLFW_XO_CAL_IND_V01 0x003D #define QMI_WLFW_INI_RESP_V01 0x002F @@ -41,12 +43,14 @@ #define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 #define QMI_WLFW_MSA_READY_IND_V01 0x002B #define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031 +#define QMI_WLFW_EXIT_POWER_SAVE_REQ_V01 0x0050 #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 #define QMI_WLFW_QDSS_TRACE_MODE_REQ_V01 0x0045 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_V01 0x0044 +#define QMI_WLFW_SOC_WAKE_RESP_V01 0x004F #define QMI_WLFW_REJUVENATE_IND_V01 0x0039 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B #define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 @@ -68,7 +72,9 @@ #define QMI_WLFW_MSA_INFO_RESP_V01 0x002D #define QMI_WLFW_MSA_READY_REQ_V01 0x002E #define QMI_WLFW_QDSS_TRACE_DATA_RESP_V01 0x0042 +#define QMI_WLFW_M3_DUMP_UPLOAD_DONE_REQ_V01 0x004E #define QMI_WLFW_CAP_RESP_V01 0x0024 +#define QMI_WLFW_SOC_WAKE_REQ_V01 0x004F #define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A #define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 #define QMI_WLFW_SHUTDOWN_REQ_V01 0x0043 @@ -76,6 +82,7 @@ #define QMI_WLFW_ANTENNA_SWITCH_RESP_V01 0x0047 #define QMI_WLFW_DEVICE_INFO_REQ_V01 0x004C #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 +#define QMI_WLFW_EXIT_POWER_SAVE_RESP_V01 0x0050 #define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 #define QMI_WLFW_VBATT_RESP_V01 0x0032 #define QMI_WLFW_MSA_INFO_REQ_V01 0x002D @@ -156,6 +163,13 @@ enum wlfw_qdss_trace_mode_enum_v01 { WLFW_QDSS_TRACE_MODE_ENUM_MAX_VAL_V01 = INT_MAX, }; +enum wlfw_soc_wake_enum_v01 { + WLFW_SOC_WAKE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_WAKE_REQUEST_V01 = 0, + QMI_WLFW_WAKE_RELEASE_V01 = 1, + WLFW_SOC_WAKE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) @@ -286,9 +300,11 @@ struct wlfw_ind_register_req_msg_v01 { u8 qdss_trace_free_enable; u8 respond_get_info_enable_valid; u8 respond_get_info_enable; + u8 m3_dump_upload_req_enable_valid; + u8 m3_dump_upload_req_enable; }; -#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 70 +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 74 extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[]; struct wlfw_ind_register_resp_msg_v01 { @@ -308,10 +324,13 @@ struct wlfw_fw_ready_ind_msg_v01 { extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[]; struct wlfw_msa_ready_ind_msg_v01 { - char placeholder; + u8 hang_data_addr_offset_valid; + u32 hang_data_addr_offset; + u8 hang_data_length_valid; + u16 hang_data_length; }; -#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0 +#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 12 extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[]; struct wlfw_pin_connect_result_ind_msg_v01 { @@ -403,9 +422,11 @@ struct wlfw_cap_resp_msg_v01 { u32 time_freq_hz; u8 otp_version_valid; u32 otp_version; + u8 eeprom_caldata_read_timeout_valid; + u32 eeprom_caldata_read_timeout; }; -#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 228 +#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 235 extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[]; struct wlfw_bdf_download_req_msg_v01 { @@ -440,9 +461,11 @@ struct wlfw_cal_report_req_msg_v01 { enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01]; u8 xo_cal_data_valid; u8 xo_cal_data; + u8 cal_remove_supported_valid; + u8 cal_remove_supported; }; -#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28 +#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 32 extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[]; struct wlfw_cal_report_resp_msg_v01 { @@ -1016,4 +1039,57 @@ struct wlfw_device_info_resp_msg_v01 { #define WLFW_DEVICE_INFO_RESP_MSG_V01_MAX_MSG_LEN 25 extern struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[]; +struct wlfw_m3_dump_upload_req_ind_msg_v01 { + u32 pdev_id; + u64 addr; + u64 size; +}; + +#define WLFW_M3_DUMP_UPLOAD_REQ_IND_MSG_V01_MAX_MSG_LEN 29 +extern struct qmi_elem_info wlfw_m3_dump_upload_req_ind_msg_v01_ei[]; + +struct wlfw_m3_dump_upload_done_req_msg_v01 { + u32 pdev_id; + u32 status; +}; + +#define WLFW_M3_DUMP_UPLOAD_DONE_REQ_MSG_V01_MAX_MSG_LEN 14 +extern struct qmi_elem_info wlfw_m3_dump_upload_done_req_msg_v01_ei[]; + +struct wlfw_m3_dump_upload_done_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_M3_DUMP_UPLOAD_DONE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_m3_dump_upload_done_resp_msg_v01_ei[]; + +struct wlfw_soc_wake_req_msg_v01 { + u8 wake_valid; + enum wlfw_soc_wake_enum_v01 wake; +}; + +#define WLFW_SOC_WAKE_REQ_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_soc_wake_req_msg_v01_ei[]; + +struct wlfw_soc_wake_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_SOC_WAKE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_soc_wake_resp_msg_v01_ei[]; + +struct wlfw_exit_power_save_req_msg_v01 { + char placeholder; +}; + +#define WLFW_EXIT_POWER_SAVE_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct qmi_elem_info wlfw_exit_power_save_req_msg_v01_ei[]; + +struct wlfw_exit_power_save_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_EXIT_POWER_SAVE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_exit_power_save_resp_msg_v01_ei[]; + #endif From 75e8789c151da20af98f926baa3953fac44c9d31 Mon Sep 17 00:00:00 2001 From: Satish Kodishala Date: Wed, 8 Apr 2020 15:07:31 +0530 Subject: [PATCH 48/90] Bluetooth:Change EA for HST and HSP before getting LA -Change EA for HSP before getting LA based on the soc type queried from BT power driver. -By default, HSP's Slimbus EA is added in DTSI file. Update HST's Slimbus slave EA address based on HST chip ver. Change-Id: I49e06e2e43025797e6110bf55304c7b3f0616c8e Signed-off-by: Satish Kodishala --- drivers/bluetooth/btfm_slim.c | 58 +++++++++++++++++++++++++++++ drivers/bluetooth/btfm_slim_slave.h | 6 +++ 2 files changed, 64 insertions(+) diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c index bf886e69050d..9ed65d837848 100644 --- a/drivers/bluetooth/btfm_slim.c +++ b/drivers/bluetooth/btfm_slim.c @@ -371,6 +371,9 @@ static int btfm_slim_alloc_port(struct btfmslim *btfmslim) int btfm_slim_hw_init(struct btfmslim *btfmslim) { int ret; + int chipset_ver; + struct slim_device *slim = btfmslim->slim_pgd; + struct slim_device *slim_ifd = &btfmslim->slim_ifd; BTFMSLIM_DBG(""); if (!btfmslim) @@ -381,6 +384,61 @@ int btfm_slim_hw_init(struct btfmslim *btfmslim) return 0; } mutex_lock(&btfmslim->io_lock); + BTFMSLIM_INFO( + "PGD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x", + slim->e_addr[0], slim->e_addr[1], slim->e_addr[2], + slim->e_addr[3], slim->e_addr[4], slim->e_addr[5]); + BTFMSLIM_INFO( + "IFD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x", + slim_ifd->e_addr[0], slim_ifd->e_addr[1], + slim_ifd->e_addr[2], slim_ifd->e_addr[3], + slim_ifd->e_addr[4], slim_ifd->e_addr[5]); + + chipset_ver = get_chipset_version(); + BTFMSLIM_INFO("chipset soc version:%x", chipset_ver); + + if (chipset_ver == QCA_HSP_SOC_ID_0100 || + chipset_ver == QCA_HSP_SOC_ID_0110 || + chipset_ver == QCA_HSP_SOC_ID_0200) { + BTFMSLIM_INFO("chipset is hastings prime, overwriting EA"); + slim->e_addr[0] = 0x00; + slim->e_addr[1] = 0x01; + slim->e_addr[2] = 0x21; + slim->e_addr[3] = 0x02; + slim->e_addr[4] = 0x17; + slim->e_addr[5] = 0x02; + + slim_ifd->e_addr[0] = 0x00; + slim_ifd->e_addr[1] = 0x00; + slim_ifd->e_addr[2] = 0x21; + slim_ifd->e_addr[3] = 0x02; + slim_ifd->e_addr[4] = 0x17; + slim_ifd->e_addr[5] = 0x02; + } else if (chipset_ver == QCA_HASTINGS_SOC_ID_0200) { + BTFMSLIM_INFO("chipset is hastings 2.0, overwriting EA"); + slim->e_addr[0] = 0x00; + slim->e_addr[1] = 0x01; + slim->e_addr[2] = 0x20; + slim->e_addr[3] = 0x02; + slim->e_addr[4] = 0x17; + slim->e_addr[5] = 0x02; + + slim_ifd->e_addr[0] = 0x00; + slim_ifd->e_addr[1] = 0x00; + slim_ifd->e_addr[2] = 0x20; + slim_ifd->e_addr[3] = 0x02; + slim_ifd->e_addr[4] = 0x17; + slim_ifd->e_addr[5] = 0x02; + } + BTFMSLIM_INFO( + "PGD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x", + slim->e_addr[0], slim->e_addr[1], slim->e_addr[2], + slim->e_addr[3], slim->e_addr[4], slim->e_addr[5]); + BTFMSLIM_INFO( + "IFD Enum Addr: %.02x:%.02x:%.02x:%.02x:%.02x: %.02x", + slim_ifd->e_addr[0], slim_ifd->e_addr[1], + slim_ifd->e_addr[2], slim_ifd->e_addr[3], + slim_ifd->e_addr[4], slim_ifd->e_addr[5]); /* Assign Logical Address for PGD (Ported Generic Device) * enumeration address diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h index 88d14849865f..67e08a6ce667 100644 --- a/drivers/bluetooth/btfm_slim_slave.h +++ b/drivers/bluetooth/btfm_slim_slave.h @@ -103,6 +103,12 @@ enum { QCA_HASTINGS_SOC_ID_0200 = 0x400A0200, }; +enum { + QCA_HSP_SOC_ID_0100 = 0x400C0100, + QCA_HSP_SOC_ID_0110 = 0x400C0110, + QCA_HSP_SOC_ID_0200 = 0x400C0200, +}; + /* Function Prototype */ /* From 6f18f9440ae341dbb53f0d79326cb6cfd37b2db1 Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Mon, 13 Apr 2020 15:59:44 +0530 Subject: [PATCH 49/90] icnss: serialize the driver remove in modem graceful shutdown Add code to post unregister driver event during modem graceful shutdown instead of directly calling driver remove. Change-Id: Ie8b7699bf4e9e346279feede68022cda20f93a69 Signed-off-by: Mohammed Siddiq --- drivers/soc/qcom/icnss.c | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index e7e6fdb047f4..d806f562978b 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -1156,32 +1156,6 @@ static int icnss_driver_event_unregister_driver(void *data) return 0; } -static int icnss_call_driver_remove(struct icnss_priv *priv) -{ - icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state); - - clear_bit(ICNSS_FW_READY, &priv->state); - - if (test_bit(ICNSS_DRIVER_UNLOADING, &priv->state)) - return 0; - - if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state)) - return 0; - - if (!priv->ops || !priv->ops->remove) - return 0; - - set_bit(ICNSS_DRIVER_UNLOADING, &priv->state); - priv->ops->remove(&priv->pdev->dev); - - clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state); - clear_bit(ICNSS_DRIVER_PROBED, &priv->state); - - icnss_hw_power_off(priv); - - return 0; -} - static int icnss_fw_crashed(struct icnss_priv *priv, struct icnss_event_pd_service_down_data *event_data) { @@ -1441,8 +1415,13 @@ static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv, if (atomic_read(&priv->is_shutdown)) { atomic_set(&priv->is_shutdown, false); if (!test_bit(ICNSS_PD_RESTART, &priv->state) && - !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) { - icnss_call_driver_remove(priv); + !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state) && + !test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) { + clear_bit(ICNSS_FW_READY, &priv->state); + icnss_driver_event_post( + ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER, + ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, + NULL); } } From 3e6b69f8243516c0f78f2e52bb5647239ffa2ccf Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Fri, 24 Apr 2020 22:37:37 +0530 Subject: [PATCH 50/90] cnss2: Add code to pick hang data offset based on deviceID Add code to pick hang data offset based on deviceID. Change-Id: I865fc90ea558d2626672b51c5cecadfdf93358b3 Signed-off-by: Mohammed Siddiq --- drivers/net/wireless/cnss2/pci.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 9eabe3c33efe..13893d441344 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -75,7 +75,8 @@ static DEFINE_SPINLOCK(time_sync_lock); #define LINK_TRAINING_RETRY_MAX_TIMES 3 #define HANG_DATA_LENGTH 384 -#define HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH) +#define HST_HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH) +#define HSP_HANG_DATA_OFFSET ((2 * 1024 * 1024) - HANG_DATA_LENGTH) static struct cnss_pci_reg ce_src[] = { { "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET }, @@ -3809,15 +3810,29 @@ static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv) struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; struct cnss_hang_event hang_event = {0}; void *hang_data_va = NULL; + u64 offset = 0; int i = 0; if (!fw_mem || !plat_priv->fw_mem_seg_len) return; + switch (pci_priv->device_id) { + case QCA6390_DEVICE_ID: + offset = HST_HANG_DATA_OFFSET; + break; + case QCA6490_DEVICE_ID: + offset = HSP_HANG_DATA_OFFSET; + break; + default: + cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n", + pci_priv->device_id); + return; + } + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 && fw_mem[i].va) { - hang_data_va = fw_mem[i].va + HANG_DATA_OFFSET; + hang_data_va = fw_mem[i].va + offset; hang_event.hang_event_data = kmemdup(hang_data_va, HANG_DATA_LENGTH, GFP_ATOMIC); From 0831714884242a5a22a2138563097542e9411ead Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Tue, 7 Apr 2020 21:33:26 +0530 Subject: [PATCH 51/90] icnss2: Add platform driver code for QDSS on moselle Add platform driver code to enable QDSS trace on moselle. Change-Id: I5c2d069fa547d22ca68da98fc7d2ea2ed7190036 Signed-off-by: Mohammed Siddiq --- drivers/soc/qcom/icnss2/Makefile | 1 + drivers/soc/qcom/icnss2/genl.c | 213 ++++++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/genl.h | 17 +++ drivers/soc/qcom/icnss2/main.c | 183 ++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/main.h | 15 +++ drivers/soc/qcom/icnss2/qmi.c | 218 +++++++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/qmi.h | 21 +++ 7 files changed, 668 insertions(+) create mode 100644 drivers/soc/qcom/icnss2/genl.c create mode 100644 drivers/soc/qcom/icnss2/genl.h diff --git a/drivers/soc/qcom/icnss2/Makefile b/drivers/soc/qcom/icnss2/Makefile index 433483ec7ada..aed4ac22bd95 100644 --- a/drivers/soc/qcom/icnss2/Makefile +++ b/drivers/soc/qcom/icnss2/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_ICNSS2) += icnss2.o icnss2-y := main.o icnss2-y += debug.o icnss2-y += power.o +icnss2-y += genl.o icnss2-$(CONFIG_ICNSS2_QMI) += qmi.o diff --git a/drivers/soc/qcom/icnss2/genl.c b/drivers/soc/qcom/icnss2/genl.c new file mode 100644 index 000000000000..d4d8570473e2 --- /dev/null +++ b/drivers/soc/qcom/icnss2/genl.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */ + +#define pr_fmt(fmt) "cnss_genl: " fmt + +#include +#include +#include +#include +#include + +#include "main.h" +#include "debug.h" + +#define ICNSS_GENL_FAMILY_NAME "cnss-genl" +#define ICNSS_GENL_MCAST_GROUP_NAME "cnss-genl-grp" +#define ICNSS_GENL_VERSION 1 +#define ICNSS_GENL_DATA_LEN_MAX (15 * 1024) +#define ICNSS_GENL_STR_LEN_MAX 16 + +enum { + ICNSS_GENL_ATTR_MSG_UNSPEC, + ICNSS_GENL_ATTR_MSG_TYPE, + ICNSS_GENL_ATTR_MSG_FILE_NAME, + ICNSS_GENL_ATTR_MSG_TOTAL_SIZE, + ICNSS_GENL_ATTR_MSG_SEG_ID, + ICNSS_GENL_ATTR_MSG_END, + ICNSS_GENL_ATTR_MSG_DATA_LEN, + ICNSS_GENL_ATTR_MSG_DATA, + __ICNSS_GENL_ATTR_MAX, +}; + +#define ICNSS_GENL_ATTR_MAX (__ICNSS_GENL_ATTR_MAX - 1) + +enum { + ICNSS_GENL_CMD_UNSPEC, + ICNSS_GENL_CMD_MSG, + __ICNSS_GENL_CMD_MAX, +}; + +#define ICNSS_GENL_CMD_MAX (__ICNSS_GENL_CMD_MAX - 1) + +static struct nla_policy icnss_genl_msg_policy[ICNSS_GENL_ATTR_MAX + 1] = { + [ICNSS_GENL_ATTR_MSG_TYPE] = { .type = NLA_U8 }, + [ICNSS_GENL_ATTR_MSG_FILE_NAME] = { .type = NLA_NUL_STRING, + .len = ICNSS_GENL_STR_LEN_MAX }, + [ICNSS_GENL_ATTR_MSG_TOTAL_SIZE] = { .type = NLA_U32 }, + [ICNSS_GENL_ATTR_MSG_SEG_ID] = { .type = NLA_U32 }, + [ICNSS_GENL_ATTR_MSG_END] = { .type = NLA_U8 }, + [ICNSS_GENL_ATTR_MSG_DATA_LEN] = { .type = NLA_U32 }, + [ICNSS_GENL_ATTR_MSG_DATA] = { .type = NLA_BINARY, + .len = ICNSS_GENL_DATA_LEN_MAX }, +}; + +static int icnss_genl_process_msg(struct sk_buff *skb, struct genl_info *info) +{ + return 0; +} + +static struct genl_ops icnss_genl_ops[] = { + { + .cmd = ICNSS_GENL_CMD_MSG, + .policy = icnss_genl_msg_policy, + .doit = icnss_genl_process_msg, + }, +}; + +static struct genl_multicast_group icnss_genl_mcast_grp[] = { + { + .name = ICNSS_GENL_MCAST_GROUP_NAME, + }, +}; + +static struct genl_family icnss_genl_family = { + .id = 0, + .hdrsize = 0, + .name = ICNSS_GENL_FAMILY_NAME, + .version = ICNSS_GENL_VERSION, + .maxattr = ICNSS_GENL_ATTR_MAX, + .module = THIS_MODULE, + .ops = icnss_genl_ops, + .n_ops = ARRAY_SIZE(icnss_genl_ops), + .mcgrps = icnss_genl_mcast_grp, + .n_mcgrps = ARRAY_SIZE(icnss_genl_mcast_grp), +}; + +static int icnss_genl_send_data(u8 type, char *file_name, u32 total_size, + u32 seg_id, u8 end, u32 data_len, u8 *msg_buff) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + char filename[ICNSS_GENL_STR_LEN_MAX + 1]; + + icnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n", + type, file_name, total_size, seg_id, end, data_len); + + if (!file_name) + strlcpy(filename, "default", sizeof(filename)); + else + strlcpy(filename, file_name, sizeof(filename)); + + skb = genlmsg_new(NLMSG_HDRLEN + + nla_total_size(sizeof(type)) + + nla_total_size(strlen(filename) + 1) + + nla_total_size(sizeof(total_size)) + + nla_total_size(sizeof(seg_id)) + + nla_total_size(sizeof(end)) + + nla_total_size(sizeof(data_len)) + + nla_total_size(data_len), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + msg_header = genlmsg_put(skb, 0, 0, + &icnss_genl_family, 0, + ICNSS_GENL_CMD_MSG); + if (!msg_header) { + ret = -ENOMEM; + goto fail; + } + + ret = nla_put_u8(skb, ICNSS_GENL_ATTR_MSG_TYPE, type); + if (ret < 0) + goto fail; + ret = nla_put_string(skb, ICNSS_GENL_ATTR_MSG_FILE_NAME, filename); + if (ret < 0) + goto fail; + ret = nla_put_u32(skb, ICNSS_GENL_ATTR_MSG_TOTAL_SIZE, total_size); + if (ret < 0) + goto fail; + ret = nla_put_u32(skb, ICNSS_GENL_ATTR_MSG_SEG_ID, seg_id); + if (ret < 0) + goto fail; + ret = nla_put_u8(skb, ICNSS_GENL_ATTR_MSG_END, end); + if (ret < 0) + goto fail; + ret = nla_put_u32(skb, ICNSS_GENL_ATTR_MSG_DATA_LEN, data_len); + if (ret < 0) + goto fail; + ret = nla_put(skb, ICNSS_GENL_ATTR_MSG_DATA, data_len, msg_buff); + if (ret < 0) + goto fail; + + genlmsg_end(skb, msg_header); + ret = genlmsg_multicast(&icnss_genl_family, skb, 0, 0, GFP_KERNEL); + if (ret < 0) + icnss_pr_err("Fail to send genl msg: %d\n", ret); + + return ret; +fail: + icnss_pr_err("Fail to generate genl msg: %d\n", ret); + if (skb) + nlmsg_free(skb); + return ret; +} + +int icnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size) +{ + int ret = 0; + u8 *msg_buff = buff; + u32 remaining = total_size; + u32 seg_id = 0; + u32 data_len = 0; + u8 end = 0; + u8 retry; + + icnss_pr_dbg("type: %u, total_size: %x\n", type, total_size); + + while (remaining) { + if (remaining > ICNSS_GENL_DATA_LEN_MAX) { + data_len = ICNSS_GENL_DATA_LEN_MAX; + } else { + data_len = remaining; + end = 1; + } + + for (retry = 0; retry < 2; retry++) { + ret = icnss_genl_send_data(type, file_name, total_size, + seg_id, end, data_len, + msg_buff); + if (ret >= 0) + break; + msleep(100); + } + + if (ret < 0) { + icnss_pr_err("fail to send genl data, ret %d\n", ret); + return ret; + } + + remaining -= data_len; + msg_buff += data_len; + seg_id++; + } + + return ret; +} + +int icnss_genl_init(void) +{ + int ret = 0; + + ret = genl_register_family(&icnss_genl_family); + if (ret != 0) + icnss_pr_err("genl_register_family fail: %d\n", ret); + + return ret; +} + +void icnss_genl_exit(void) +{ + genl_unregister_family(&icnss_genl_family); +} diff --git a/drivers/soc/qcom/icnss2/genl.h b/drivers/soc/qcom/icnss2/genl.h new file mode 100644 index 000000000000..6cc04c9fcb15 --- /dev/null +++ b/drivers/soc/qcom/icnss2/genl.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */ + +#ifndef __ICNSS_GENL_H__ +#define __ICNSS_GENL_H__ + +enum icnss_genl_msg_type { + ICNSS_GENL_MSG_TYPE_UNSPEC, + ICNSS_GENL_MSG_TYPE_QDSS, +}; + +int icnss_genl_init(void); +void icnss_genl_exit(void); +int icnss_genl_send_msg(void *buff, u8 type, + char *file_name, u32 total_size); + +#endif diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index 1c33f1ab42ed..27c0484393f2 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -43,6 +43,7 @@ #include "qmi.h" #include "debug.h" #include "power.h" +#include "genl.h" #define MAX_PROP_SIZE 32 #define NUM_LOG_PAGES 10 @@ -163,6 +164,12 @@ char *icnss_driver_event_to_str(enum icnss_driver_event_type type) return "IDLE_RESTART"; case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND: return "FW_INIT_DONE"; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM: + return "QDSS_TRACE_REQ_MEM"; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE: + return "QDSS_TRACE_SAVE"; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE: + return "QDSS_TRACE_FREE"; case ICNSS_DRIVER_EVENT_MAX: return "EVENT_MAX"; } @@ -728,6 +735,159 @@ static int icnss_driver_event_fw_init_done(struct icnss_priv *priv, void *data) return ret; } +int icnss_alloc_qdss_mem(struct icnss_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct icnss_fw_mem *qdss_mem = priv->qdss_mem; + int i, j; + + for (i = 0; i < priv->qdss_mem_seg_len; i++) { + if (!qdss_mem[i].va && qdss_mem[i].size) { + qdss_mem[i].va = + dma_alloc_coherent(&pdev->dev, + qdss_mem[i].size, + &qdss_mem[i].pa, + GFP_KERNEL); + if (!qdss_mem[i].va) { + icnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n", + qdss_mem[i].size, + qdss_mem[i].type, i); + break; + } + } + } + + /* Best-effort allocation for QDSS trace */ + if (i < priv->qdss_mem_seg_len) { + for (j = i; j < priv->qdss_mem_seg_len; j++) { + qdss_mem[j].type = 0; + qdss_mem[j].size = 0; + } + priv->qdss_mem_seg_len = i; + } + + return 0; +} + +void icnss_free_qdss_mem(struct icnss_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct icnss_fw_mem *qdss_mem = priv->qdss_mem; + int i; + + for (i = 0; i < priv->qdss_mem_seg_len; i++) { + if (qdss_mem[i].va && qdss_mem[i].size) { + icnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n", + &qdss_mem[i].pa, qdss_mem[i].size, + qdss_mem[i].type); + dma_free_coherent(&pdev->dev, + qdss_mem[i].size, qdss_mem[i].va, + qdss_mem[i].pa); + qdss_mem[i].va = NULL; + qdss_mem[i].pa = 0; + qdss_mem[i].size = 0; + qdss_mem[i].type = 0; + } + } + priv->qdss_mem_seg_len = 0; +} + +static int icnss_qdss_trace_req_mem_hdlr(struct icnss_priv *priv) +{ + int ret = 0; + + ret = icnss_alloc_qdss_mem(priv); + if (ret < 0) + return ret; + + return wlfw_qdss_trace_mem_info_send_sync(priv); +} + +static void *icnss_qdss_trace_pa_to_va(struct icnss_priv *priv, + u64 pa, u32 size, int *seg_id) +{ + int i = 0; + struct icnss_fw_mem *qdss_mem = priv->qdss_mem; + u64 offset = 0; + void *va = NULL; + u64 local_pa; + u32 local_size; + + for (i = 0; i < priv->qdss_mem_seg_len; i++) { + local_pa = (u64)qdss_mem[i].pa; + local_size = (u32)qdss_mem[i].size; + if (pa == local_pa && size <= local_size) { + va = qdss_mem[i].va; + break; + } + if (pa > local_pa && + pa < local_pa + local_size && + pa + size <= local_pa + local_size) { + offset = pa - local_pa; + va = qdss_mem[i].va + offset; + break; + } + } + + *seg_id = i; + return va; +} + +static int icnss_qdss_trace_save_hdlr(struct icnss_priv *priv, + void *data) +{ + struct icnss_qmi_event_qdss_trace_save_data *event_data = data; + struct icnss_fw_mem *qdss_mem = priv->qdss_mem; + int ret = 0; + int i; + void *va = NULL; + u64 pa; + u32 size; + int seg_id = 0; + + if (!priv->qdss_mem_seg_len) { + icnss_pr_err("Memory for QDSS trace is not available\n"); + return -ENOMEM; + } + + if (event_data->mem_seg_len == 0) { + for (i = 0; i < priv->qdss_mem_seg_len; i++) { + ret = icnss_genl_send_msg(qdss_mem[i].va, + ICNSS_GENL_MSG_TYPE_QDSS, + event_data->file_name, + qdss_mem[i].size); + if (ret < 0) { + icnss_pr_err("Fail to save QDSS data: %d\n", + ret); + break; + } + } + } else { + for (i = 0; i < event_data->mem_seg_len; i++) { + pa = event_data->mem_seg[i].addr; + size = event_data->mem_seg[i].size; + va = icnss_qdss_trace_pa_to_va(priv, pa, + size, &seg_id); + if (!va) { + icnss_pr_err("Fail to find matching va for pa %pa\n", + &pa); + ret = -EINVAL; + break; + } + ret = icnss_genl_send_msg(va, ICNSS_GENL_MSG_TYPE_QDSS, + event_data->file_name, size); + if (ret < 0) { + icnss_pr_err("Fail to save QDSS data: %d\n", + ret); + break; + } + } + } + + kfree(data); + return ret; +} + static int icnss_driver_event_register_driver(struct icnss_priv *priv, void *data) { @@ -955,6 +1115,13 @@ static int icnss_driver_event_idle_restart(struct icnss_priv *priv, return ret; } +static int icnss_qdss_trace_free_hdlr(struct icnss_priv *priv) +{ + icnss_free_qdss_mem(priv); + + return 0; +} + static void icnss_driver_event_work(struct work_struct *work) { struct icnss_priv *priv = @@ -1018,6 +1185,16 @@ static void icnss_driver_event_work(struct work_struct *work) ret = icnss_driver_event_fw_init_done(priv, event->data); break; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM: + ret = icnss_qdss_trace_req_mem_hdlr(priv); + break; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE: + ret = icnss_qdss_trace_save_hdlr(priv, + event->data); + break; + case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE: + ret = icnss_qdss_trace_free_hdlr(priv); + break; default: icnss_pr_err("Invalid Event type: %d", event->type); kfree(event); @@ -2486,6 +2663,10 @@ static int icnss_probe(struct platform_device *pdev) init_completion(&priv->unblock_shutdown); + ret = icnss_genl_init(); + if (ret < 0) + icnss_pr_err("ICNSS genl init failed %d\n", ret); + icnss_pr_info("Platform driver probed successfully\n"); return 0; @@ -2506,6 +2687,8 @@ static int icnss_remove(struct platform_device *pdev) icnss_pr_info("Removing driver: state: 0x%lx\n", priv->state); + icnss_genl_exit(); + device_init_wakeup(&priv->pdev->dev, false); icnss_debugfs_destroy(priv); diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index 26741b0813fc..f90ef2cf1011 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -21,6 +21,7 @@ #define WCN6750_DEVICE_ID 0x6750 #define ADRASTEA_DEVICE_ID 0xabcd +#define QMI_WLFW_MAX_NUM_MEM_SEG 32 extern uint64_t dynamic_feature_mask; @@ -48,6 +49,9 @@ enum icnss_driver_event_type { ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN, ICNSS_DRIVER_EVENT_IDLE_RESTART, ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND, + ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM, + ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE, + ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE, ICNSS_DRIVER_EVENT_MAX, }; @@ -130,6 +134,15 @@ struct icnss_clk_info { u32 enabled; }; +struct icnss_fw_mem { + size_t size; + void *va; + phys_addr_t pa; + u8 valid; + u32 type; + unsigned long attrs; +}; + struct icnss_stats { struct { uint32_t posted; @@ -322,6 +335,8 @@ struct icnss_priv { bool is_ssr; struct kobject *icnss_kobject; atomic_t is_shutdown; + u32 qdss_mem_seg_len; + struct icnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG]; }; struct icnss_reg_info { diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c index 9dd0eedd0f41..1ed8df0efc37 100644 --- a/drivers/soc/qcom/icnss2/qmi.c +++ b/drivers/soc/qcom/icnss2/qmi.c @@ -381,6 +381,12 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) req->fw_init_done_enable = 1; req->cal_done_enable_valid = 1; req->cal_done_enable = 1; + req->qdss_trace_req_mem_enable_valid = 1; + req->qdss_trace_req_mem_enable = 1; + req->qdss_trace_save_enable_valid = 1; + req->qdss_trace_save_enable = 1; + req->qdss_trace_free_enable_valid = 1; + req->qdss_trace_free_enable = 1; } priv->stats.ind_register_req++; @@ -1313,6 +1319,82 @@ void icnss_handle_rejuvenate(struct icnss_priv *priv) 0, event_data); } +int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv) +{ + struct wlfw_qdss_trace_mem_info_req_msg_v01 *req; + struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp; + struct qmi_txn txn; + struct icnss_fw_mem *qdss_mem = priv->qdss_mem; + int ret = 0; + int i; + + icnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n", + priv->state); + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + req->mem_seg_len = priv->qdss_mem_seg_len; + for (i = 0; i < req->mem_seg_len; i++) { + icnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", + qdss_mem[i].va, &qdss_mem[i].pa, + qdss_mem[i].size, qdss_mem[i].type); + + req->mem_seg[i].addr = qdss_mem[i].pa; + req->mem_seg[i].size = qdss_mem[i].size; + req->mem_seg[i].type = qdss_mem[i].type; + } + + ret = qmi_txn_init(&priv->qmi, &txn, + wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp); + if (ret < 0) { + icnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n", + ret); + goto out; + } + + ret = qmi_send_request(&priv->qmi, NULL, &txn, + QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01, + WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_qdss_trace_mem_info_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + icnss_pr_err("Fail to send QDSS trace mem info request: err %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, priv->ctrl_params.qmi_timeout); + if (ret < 0) { + icnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n", + ret); + goto out; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + icnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n", + resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + goto out; + } + + kfree(req); + kfree(resp); + return 0; + +out: + kfree(req); + kfree(resp); + return ret; +} + static void fw_ready_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *data) { @@ -1445,6 +1527,118 @@ static void fw_init_done_ind_cb(struct qmi_handle *qmi, 0, NULL); } +static void wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct icnss_priv *priv = + container_of(qmi, struct icnss_priv, qmi); + const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data; + int i; + + icnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n"); + + if (!txn) { + icnss_pr_err("Spurious indication\n"); + return; + } + + if (priv->qdss_mem_seg_len) { + icnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n", + priv->qdss_mem_seg_len); + return; + } + + priv->qdss_mem_seg_len = ind_msg->mem_seg_len; + for (i = 0; i < priv->qdss_mem_seg_len; i++) { + icnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n", + ind_msg->mem_seg[i].size, + ind_msg->mem_seg[i].type); + priv->qdss_mem[i].type = ind_msg->mem_seg[i].type; + priv->qdss_mem[i].size = ind_msg->mem_seg[i].size; + } + + icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM, + 0, NULL); +} + +static void wlfw_qdss_trace_save_ind_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct icnss_priv *priv = + container_of(qmi, struct icnss_priv, qmi); + const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data; + struct icnss_qmi_event_qdss_trace_save_data *event_data; + int i = 0; + + icnss_pr_dbg("Received QMI WLFW QDSS trace save indication\n"); + + if (!txn) { + icnss_pr_err("Spurious indication\n"); + return; + } + + icnss_pr_dbg("QDSS_trace_save info: source %u, total_size %u, file_name_valid %u, file_name %s\n", + ind_msg->source, ind_msg->total_size, + ind_msg->file_name_valid, ind_msg->file_name); + + if (ind_msg->source == 1) + return; + + event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); + if (!event_data) + return; + + if (ind_msg->mem_seg_valid) { + if (ind_msg->mem_seg_len > QDSS_TRACE_SEG_LEN_MAX) { + icnss_pr_err("Invalid seg len %u\n", + ind_msg->mem_seg_len); + goto free_event_data; + } + icnss_pr_dbg("QDSS_trace_save seg len %u\n", + ind_msg->mem_seg_len); + event_data->mem_seg_len = ind_msg->mem_seg_len; + for (i = 0; i < ind_msg->mem_seg_len; i++) { + event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr; + event_data->mem_seg[i].size = ind_msg->mem_seg[i].size; + icnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n", + i, ind_msg->mem_seg[i].addr, + ind_msg->mem_seg[i].size); + } + } + + event_data->total_size = ind_msg->total_size; + + if (ind_msg->file_name_valid) + strlcpy(event_data->file_name, ind_msg->file_name, + QDSS_TRACE_FILE_NAME_MAX + 1); + else + strlcpy(event_data->file_name, "qdss_trace", + QDSS_TRACE_FILE_NAME_MAX + 1); + + icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE, + 0, event_data); + + return; + +free_event_data: + kfree(event_data); +} + +static void wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct icnss_priv *priv = + container_of(qmi, struct icnss_priv, qmi); + + icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE, + 0, NULL); +} static struct qmi_msg_handler wlfw_msg_handlers[] = { { .type = QMI_INDICATION, @@ -1489,6 +1683,30 @@ static struct qmi_msg_handler wlfw_msg_handlers[] = { .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01), .fn = fw_init_done_ind_cb }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01, + .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei, + .decoded_size = + sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01), + .fn = wlfw_qdss_trace_req_mem_ind_cb + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01, + .ei = wlfw_qdss_trace_save_ind_msg_v01_ei, + .decoded_size = + sizeof(struct wlfw_qdss_trace_save_ind_msg_v01), + .fn = wlfw_qdss_trace_save_ind_cb + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01, + .ei = wlfw_qdss_trace_free_ind_msg_v01_ei, + .decoded_size = + sizeof(struct wlfw_qdss_trace_free_ind_msg_v01), + .fn = wlfw_qdss_trace_free_ind_cb + }, {} }; diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h index 964579b9aac0..9a053d9b7f9d 100644 --- a/drivers/soc/qcom/icnss2/qmi.h +++ b/drivers/soc/qcom/icnss2/qmi.h @@ -6,6 +6,21 @@ #ifndef __ICNSS_QMI_H__ #define __ICNSS_QMI_H__ +#define QDSS_TRACE_SEG_LEN_MAX 32 +#define QDSS_TRACE_FILE_NAME_MAX 16 + +struct icnss_mem_seg { + u64 addr; + u32 size; +}; + +struct icnss_qmi_event_qdss_trace_save_data { + u32 total_size; + u32 mem_seg_len; + struct icnss_mem_seg mem_seg[QDSS_TRACE_SEG_LEN_MAX]; + char file_name[QDSS_TRACE_FILE_NAME_MAX + 1]; +}; + #ifndef CONFIG_ICNSS2_QMI static inline int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) @@ -108,6 +123,11 @@ int icnss_wlfw_bdf_dnld_send_sync(struct icnss_priv *priv, u32 bdf_type) { return 0; } + +int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv) +{ + return 0; +} #else int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv); int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data); @@ -142,6 +162,7 @@ int wlfw_device_info_send_msg(struct icnss_priv *priv); int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, enum wlfw_driver_mode_enum_v01 mode); int icnss_wlfw_bdf_dnld_send_sync(struct icnss_priv *priv, u32 bdf_type); +int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv); #endif #endif /* __ICNSS_QMI_H__*/ From 84780328ad73679ac90b1cac25b22fd9ee238089 Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Tue, 7 Jan 2020 11:32:54 +0530 Subject: [PATCH 52/90] diag: Prevent resource leakage while copying to userspace Possibility of resource leakage while checking for validity of pid and its task structures is prevented by proper update of pid reference count. Change-Id: Ifb38f91a5c3e45248bb08c5341c8a3095585c16f Signed-off-by: Manoj Prabhu B --- drivers/char/diag/diag_memorydevice.c | 50 +++++++++++++++++++-------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index 64ecb3d1e115..6177f2b34b4b 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -330,7 +331,10 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, int peripheral = 0; struct diag_md_session_t *session_info = NULL; struct pid *pid_struct = NULL; + struct task_struct *task_s = NULL; + if (!info) + return -EINVAL; for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) { ch = &diag_md[i]; if (!ch->md_info_inited) @@ -353,11 +357,11 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, if (!session_info) goto drop_data; - if (session_info && info && + if (session_info && (session_info->pid != info->pid)) continue; - if ((info && (info->peripheral_mask[i] & - MD_PERIPHERAL_MASK(peripheral)) == 0)) + if ((info->peripheral_mask[i] & + MD_PERIPHERAL_MASK(peripheral)) == 0) goto drop_data; pid_struct = find_get_pid(session_info->pid); if (!pid_struct) { @@ -386,35 +390,45 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, } if (i > 0) { remote_token = diag_get_remote(i); - if (get_pid_task(pid_struct, PIDTYPE_PID)) { + task_s = get_pid_task(pid_struct, PIDTYPE_PID); + if (task_s) { err = copy_to_user(buf + ret, &remote_token, sizeof(int)); - if (err) + if (err) { + put_task_struct(task_s); goto drop_data; + } ret += sizeof(int); + put_task_struct(task_s); } } - /* Copy the length of data being passed */ - if (get_pid_task(pid_struct, PIDTYPE_PID)) { + task_s = get_pid_task(pid_struct, PIDTYPE_PID); + if (task_s) { + + /* Copy the length of data being passed */ err = copy_to_user(buf + ret, (void *)&(entry->len), sizeof(int)); - if (err) + if (err) { + put_task_struct(task_s); goto drop_data; + } ret += sizeof(int); - } - /* Copy the actual data being passed */ - if (get_pid_task(pid_struct, PIDTYPE_PID)) { + /* Copy the actual data being passed */ err = copy_to_user(buf + ret, (void *)entry->buf, entry->len); - if (err) + if (err) { + put_task_struct(task_s); goto drop_data; + } ret += entry->len; + put_task_struct(task_s); } + /* * The data is now copied to the user space client, * Notify that the write is complete and delete its @@ -432,14 +446,22 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, entry->len = 0; entry->ctx = 0; spin_unlock_irqrestore(&ch->lock, flags); + + put_pid(pid_struct); } } *pret = ret; - if (pid_struct && get_pid_task(pid_struct, PIDTYPE_PID)) { - err = copy_to_user(buf + sizeof(int), + pid_struct = find_get_pid(info->pid); + if (pid_struct) { + task_s = get_pid_task(pid_struct, PIDTYPE_PID); + if (task_s) { + err = copy_to_user(buf + sizeof(int), (void *)&num_data, sizeof(int)); + put_task_struct(task_s); + } + put_pid(pid_struct); } diag_ws_on_copy_complete(DIAG_WS_MUX); if (drain_again) From 2895dad73bafb82232337177936e4237f9269f67 Mon Sep 17 00:00:00 2001 From: Kiran Gunda Date: Fri, 24 Apr 2020 10:27:02 +0530 Subject: [PATCH 53/90] power: smb1398: Update the max-ilim current settings Update the max current limit configuration as per the hardware capability. - max-lim for master only config - 5A - max-ilim for master and slave config - 10A While at it, update the min-ilim setting to 750mA as per the hardware recommendation. Change-Id: I08793aafd0b416a597c3b51afff4ef624129f731 Signed-off-by: Kiran Gunda --- drivers/power/supply/qcom/smb1398-charger.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/power/supply/qcom/smb1398-charger.c b/drivers/power/supply/qcom/smb1398-charger.c index 9999181c9506..3d6c3ae2a6a7 100644 --- a/drivers/power/supply/qcom/smb1398-charger.c +++ b/drivers/power/supply/qcom/smb1398-charger.c @@ -205,9 +205,8 @@ #define TAPER_MAIN_ICL_LIMIT_VOTER "TAPER_MAIN_ICL_LIMIT_VOTER" /* Constant definitions */ -/* Need to define max ILIM for smb1398 */ -#define DIV2_MAX_ILIM_UA 3200000 -#define DIV2_MAX_ILIM_DUAL_CP_UA 6400000 +#define DIV2_MAX_ILIM_UA 5000000 +#define DIV2_MAX_ILIM_DUAL_CP_UA 10000000 #define DIV2_ILIM_CFG_PCT 105 #define TAPER_STEPPER_UA_DEFAULT 100000 @@ -1947,7 +1946,7 @@ static int smb1398_div2_cp_parse_dt(struct smb1398_chip *chip) return rc; } - chip->div2_cp_min_ilim_ua = 1000000; + chip->div2_cp_min_ilim_ua = 750000; of_property_read_u32(chip->dev->of_node, "qcom,div2-cp-min-ilim-ua", &chip->div2_cp_min_ilim_ua); From 98d54697a29dfdd2bda8b6ebd75e01edcfe2455b Mon Sep 17 00:00:00 2001 From: Neeraj Soni Date: Sun, 8 Mar 2020 15:44:20 +0530 Subject: [PATCH 54/90] qseecom: Invalidate the buffer after listener operation Listener operation switches the context between secure and non-secure world while user requested qseecom operation is put to wait. Between lisetner operation start and finish CPU can do a speculative data fetch before secure world updates the physical memory of user requested response data buffer which was waiting in qseecom for listener operation to complete. This speculative fetch will pull old data from RAM to cache and user will end up reading stale data. Fix this by invalidating the response data dma buffer after listener call completes. Also remove the invalidate call for listener dma after scm call as data will not be accessed in kernel after scm call is completed. Change-Id: I68730a17b515f07a71a040a534f97ce97c65026c Signed-off-by: Neeraj Soni --- drivers/misc/qseecom.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 6c2080d2a3aa..1ca68205775d 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -2252,10 +2252,6 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, goto exit; } - ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf, - QSEECOM_CACHE_INVALIDATE); - if (ret) - goto exit; } else { ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, resp, sizeof(*resp)); @@ -2587,10 +2583,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd( ret, data->client.app_id); goto exit; } - ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf, - QSEECOM_CACHE_INVALIDATE); - if (ret) - goto exit; } else { ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, resp, sizeof(*resp)); @@ -3761,14 +3753,6 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, ret, data->client.app_id); goto exit; } - if (data->client.dmabuf) { - ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, - QSEECOM_CACHE_INVALIDATE); - if (ret) { - pr_err("cache operation failed %d\n", ret); - goto exit; - } - } if (qseecom.qsee_reentrancy_support) { ret = __qseecom_process_reentrancy(&resp, ptr_app, data); @@ -3791,6 +3775,15 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, } } } + + if (data->client.dmabuf) { + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit; + } + } exit: return ret; } From c883524e08c6184826398e4bd8a93a44aad45c55 Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Fri, 24 Apr 2020 11:20:32 +0530 Subject: [PATCH 55/90] diag: Configure diag over STM for APPS by HW ACCEL command Extend support to configure diag over STM using HW ACCEL command for APPS. Change-Id: Ifd9bf9867e910e9df4e1b6bddd94012c82f2be4f Signed-off-by: Manoj Prabhu B --- drivers/char/diag/diagfwd.h | 4 +++- drivers/char/diag/diagfwd_cntl.c | 11 ++++++++++- drivers/char/diag/diagfwd_cntl.h | 6 +++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h index fd794912ab22..8960b724684b 100644 --- a/drivers/char/diag/diagfwd.h +++ b/drivers/char/diag/diagfwd.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. */ #ifndef DIAGFWD_H @@ -44,4 +44,6 @@ void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type); int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf); void diag_md_hdlc_reset_timer_func(struct timer_list *tlist); void diag_update_md_clients(unsigned int type); +void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, + int data_type); #endif diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 5acb25ff7a6a..c41839f98fd2 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. */ #include @@ -1894,12 +1894,18 @@ int diag_send_passthru_ctrl_pkt(struct diag_hw_accel_cmd_req_t *req_params) pr_err("diag: Unable to send PASSTHRU ctrl packet to peripheral %d, err: %d\n", i, err); } + if ((diagid_mask & DIAG_ID_APPS) && + (hw_accel_type == DIAG_HW_ACCEL_TYPE_STM)) { + diag_process_stm_mask(req_params->operation, + DIAG_STM_APPS, APPS_DATA); + } return 0; } int diagfwd_cntl_init(void) { uint8_t peripheral = 0; + uint32_t diagid_mask = 0; driver->polling_reg_flag = 0; driver->log_on_demand_support = 1; @@ -1920,6 +1926,9 @@ int diagfwd_cntl_init(void) if (!driver->cntl_wq) return -ENOMEM; + diagid_mask = (BITMASK_DIAGID_FMASK | BITMASK_HW_ACCEL_STM_V1); + process_diagid_v2_feature_mask(DIAG_ID_APPS, diagid_mask); + return 0; } diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h index b714d5e61933..b78f7e33e366 100644 --- a/drivers/char/diag/diagfwd_cntl.h +++ b/drivers/char/diag/diagfwd_cntl.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. */ #ifndef DIAGFWD_CNTL_H @@ -91,6 +91,10 @@ #define MAX_DIAGID_STR_LEN 30 #define MIN_DIAGID_STR_LEN 5 +#define BITMASK_DIAGID_FMASK 0x0001 +#define BITMASK_HW_ACCEL_STM_V1 0x0002 +#define BITMASK_HW_ACCEL_ATB_V1 0x0004 + struct diag_ctrl_pkt_header_t { uint32_t pkt_id; uint32_t len; From 26bd9fb0571c4ab0c894daefeb3b9d8837508ac9 Mon Sep 17 00:00:00 2001 From: rbandi Date: Wed, 15 Apr 2020 14:44:37 -0700 Subject: [PATCH 56/90] HID: qvr: axis orientation correction Refactoring code to avoid sensor data being copied twice. Change-Id: I83bbc2da7777291c1560fe00675c603b087fa0ad Signed-off-by: rbandi --- drivers/hid/hid-qvr.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/hid/hid-qvr.c b/drivers/hid/hid-qvr.c index b5e51b2a2a43..b7bc8cde2499 100644 --- a/drivers/hid/hid-qvr.c +++ b/drivers/hid/hid-qvr.c @@ -305,15 +305,6 @@ static int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid) data->mts = data->ats; data->gts = data->ats; - data->ax = imuData.ax0; - data->ay = imuData.ay0; - data->az = imuData.az0; - data->gx = imuData.gx0; - data->gy = imuData.gy0; - data->gz = imuData.gz0; - data->mx = imuData.my0; - data->my = imuData.mx0; - data->mz = imuData.mz0; data->ax = imuData.ax0; data->ay = imuData.ay0; data->az = imuData.az0; From 46fc664eefbf2218afa72ba3896f896545d50a8e Mon Sep 17 00:00:00 2001 From: Rajesh Bharathwaj Date: Mon, 27 Jan 2020 15:54:03 -0800 Subject: [PATCH 57/90] max31760: Maxim Fan Controller Driver This adds support for Maxim(MAX31760) Fan Controller. Enables and runs the fan at 25Khz by default. Change-Id: I6553f4846e0420c80aaabd611510baa756871cdf Signed-off-by: Rajesh Bharathwaj --- drivers/misc/Kconfig | 9 + drivers/misc/Makefile | 1 + drivers/misc/max31760.c | 374 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 384 insertions(+) create mode 100644 drivers/misc/max31760.c diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 986061f8b265..7fe67786c2ad 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -604,6 +604,15 @@ config QTI_XR_SMRTVWR_MISC driver initializes gpios, enables/disables LDOs that are part of XR standalone reference device. +config QTI_MAXIM_FAN_CONTROLLER + tristate "QTI MAXIM fan controller driver support" + help + This driver supports the Maxim(MAX31760) fan controller. + This driver exposes i2c control to control registers for + setting different PWM, different temperature settings etc. + Also, this driver initializes the power for the fan controller + and exposes sysfs node to control different speeds of fan. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 4968aeb15431..78f1822fb456 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -72,5 +72,6 @@ obj-$(CONFIG_OKL4_USER_VIPC) += okl4-vipc.o obj-$(CONFIG_OKL4_GUEST) += okl4-panic.o obj-$(CONFIG_OKL4_LINK_SHBUF) += okl4-link-shbuf.o obj-$(CONFIG_WIGIG_SENSING_SPI) += wigig_sensing.o +obj-$(CONFIG_QTI_MAXIM_FAN_CONTROLLER) += max31760.o obj-$(CONFIG_QTI_XR_SMRTVWR_MISC) += qxr-stdalonevwr.o diff --git a/drivers/misc/max31760.c b/drivers/misc/max31760.c new file mode 100644 index 000000000000..2479583fd2f5 --- /dev/null +++ b/drivers/misc/max31760.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct max31760 { + struct device *dev; + u8 i2c_addr; + struct regmap *regmap; + u32 fan_pwr_en; + u32 fan_pwr_bp; + struct i2c_client *i2c_client; + int pwm; + bool fan_off; +}; + +static void turn_gpio(struct max31760 *pdata, bool on) +{ + if (on) { + gpio_direction_output(pdata->fan_pwr_en, 0); + gpio_set_value(pdata->fan_pwr_en, 1); + pr_debug("%s gpio:%d set to high\n", __func__, + pdata->fan_pwr_en); + msleep(20); + gpio_direction_output(pdata->fan_pwr_bp, 0); + gpio_set_value(pdata->fan_pwr_bp, 1); + pr_debug("%s gpio:%d set to high\n", __func__, + pdata->fan_pwr_bp); + msleep(20); + } else { + gpio_direction_output(pdata->fan_pwr_en, 1); + gpio_set_value(pdata->fan_pwr_en, 0); + pr_debug("%s gpio:%d set to low\n", __func__, + pdata->fan_pwr_en); + msleep(20); + gpio_direction_output(pdata->fan_pwr_bp, 1); + gpio_set_value(pdata->fan_pwr_bp, 0); + pr_debug("%s gpio:%d set to low\n", __func__, + pdata->fan_pwr_bp); + msleep(20); + } +} + +static int max31760_i2c_reg_get(struct max31760 *pdata, + u8 reg) +{ + int ret; + u32 val1; + + pr_debug("%s, reg:%x\n", __func__, reg); + ret = regmap_read(pdata->regmap, (unsigned int)reg, &val1); + if (ret < 0) { + pr_err("%s failed reading reg 0x%02x failure\n", __func__, reg); + return ret; + } + + pr_debug("%s success reading reg 0x%x=0x%x, val1=%x\n", + __func__, reg, val1, val1); + + return 0; +} + +static int max31760_i2c_reg_set(struct max31760 *pdata, + u8 reg, u8 val) +{ + int ret; + int i; + + for (i = 0; i < 10; i++) { + ret = regmap_write(pdata->regmap, reg, val); + if (ret >= 0) + return ret; + msleep(20); + } + if (ret < 0) + pr_err("%s loop:%d failed to write reg 0x%02x=0x%02x\n", + __func__, i, reg, val); + return ret; +} + +static ssize_t fan_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct max31760 *pdata; + int ret; + + pdata = dev_get_drvdata(dev); + if (!pdata) { + pr_err("invalid driver pointer\n"); + return -ENODEV; + } + + if (pdata->fan_off) + ret = scnprintf(buf, PAGE_SIZE, "off\n"); + else + ret = scnprintf(buf, PAGE_SIZE, "0x%x\n", pdata->pwm); + + return ret; +} + +static ssize_t fan_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + long val; + struct max31760 *pdata; + + pdata = dev_get_drvdata(dev); + if (!pdata) { + pr_err("invalid driver pointer\n"); + return -ENODEV; + } + + kstrtol(buf, 0, &val); + pr_debug("%s, count:%d val:%lx, buf:%s\n", + __func__, count, val, buf); + + if (val == 0xff) { + turn_gpio(pdata, false); + pdata->fan_off = true; + } else if (val == 0xfe) { + pdata->fan_off = false; + turn_gpio(pdata, true); + max31760_i2c_reg_set(pdata, 0x00, pdata->pwm); + } else { + max31760_i2c_reg_set(pdata, 0x00, (int)val); + pdata->pwm = (int)val; + } + + return count; +} + +static DEVICE_ATTR_RW(fan); + +static struct attribute *max31760_fs_attrs[] = { + &dev_attr_fan.attr, + NULL +}; + +static struct attribute_group max31760_fs_attr_group = { + .attrs = max31760_fs_attrs, +}; + +static int max31760_parse_dt(struct device *dev, + struct max31760 *pdata) +{ + struct device_node *np = dev->of_node; + int ret; + + pdata->fan_pwr_en = + of_get_named_gpio(np, "qcom,fan-pwr-en", 0); + if (!gpio_is_valid(pdata->fan_pwr_en)) { + pr_err("%s fan_pwr_en gpio not specified\n", __func__); + ret = -EINVAL; + } else { + ret = gpio_request(pdata->fan_pwr_en, "fan_pwr_en"); + if (ret) { + pr_err("max31760 fan_pwr_en gpio request failed\n"); + goto error1; + } + } + + pdata->fan_pwr_bp = + of_get_named_gpio(np, "qcom,fan-pwr-bp", 0); + if (!gpio_is_valid(pdata->fan_pwr_bp)) { + pr_err("%s fan_pwr_bp gpio not specified\n", __func__); + ret = -EINVAL; + } else + ret = gpio_request(pdata->fan_pwr_bp, "fan_pwr_bp"); + if (ret) { + pr_err("max31760 fan_pwr_bp gpio request failed\n"); + goto error2; + } + turn_gpio(pdata, true); + + return ret; + +error2: + gpio_free(pdata->fan_pwr_bp); +error1: + gpio_free(pdata->fan_pwr_en); + return ret; +} + +static int max31760_fan_pwr_enable_vregs(struct device *dev, + struct max31760 *pdata) +{ + int ret; + struct regulator *reg; + + /* Fan Control LDO L10A */ + reg = devm_regulator_get(dev, "pm8150_l10"); + if (!IS_ERR(reg)) { + regulator_set_load(reg, 600000); + ret = regulator_enable(reg); + if (ret < 0) { + pr_err("%s pm8150_l10 failed\n", __func__); + return -EINVAL; + } + } + + /* Fan Control LDO S4 */ + reg = devm_regulator_get(dev, "pm8150_s4"); + if (!IS_ERR(reg)) { + regulator_set_load(reg, 600000); + ret = regulator_enable(reg); + if (ret < 0) { + pr_err("%s pm8150_s4 failed\n", __func__); + return -EINVAL; + } + } + + return ret; +} + +static const struct regmap_config max31760_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xFF, +}; + +static int max31760_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct max31760 *pdata; + + if (!client || !client->dev.of_node) { + pr_err("%s invalid input\n", __func__); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("%s device doesn't support I2C\n", __func__); + return -ENODEV; + } + + pdata = devm_kzalloc(&client->dev, + sizeof(struct max31760), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->regmap = devm_regmap_init_i2c(client, &max31760_regmap); + if (IS_ERR(pdata->regmap)) { + ret = PTR_ERR(pdata->regmap); + pr_err("%s Failed to allocate regmap: %d\n", __func__, ret); + return -EINVAL; + } + + ret = max31760_parse_dt(&client->dev, pdata); + if (ret) { + pr_err("%s failed to parse device tree\n", __func__); + return -EINVAL; + } + + ret = max31760_fan_pwr_enable_vregs(&client->dev, pdata); + if (ret) { + pr_err("%s failed to pwr regulators\n", __func__); + return -EINVAL; + } + + pdata->dev = &client->dev; + i2c_set_clientdata(client, pdata); + + pdata->i2c_client = client; + + dev_set_drvdata(&client->dev, pdata); + + ret = sysfs_create_group(&pdata->dev->kobj, &max31760_fs_attr_group); + if (ret) + pr_err("%s unable to register max31760 sysfs nodes\n"); + + /* 00 - 0x01 -- 33Hz */ + /* 01 - 0x09 -- 150Hz */ + /* 10 - 0x11 -- 1500Hz */ + /* 11 - 0x19 -- 25Khz */ + pdata->pwm = 0x19; + max31760_i2c_reg_set(pdata, 0x00, pdata->pwm); + max31760_i2c_reg_set(pdata, 0x01, 0x11); + max31760_i2c_reg_set(pdata, 0x02, 0x31); + max31760_i2c_reg_set(pdata, 0x03, 0x45); + max31760_i2c_reg_set(pdata, 0x04, 0xff); + max31760_i2c_reg_set(pdata, 0x50, 0xcf); + max31760_i2c_reg_set(pdata, 0x01, 0x11); + max31760_i2c_reg_set(pdata, 0x00, pdata->pwm); + max31760_i2c_reg_get(pdata, 0x00); + + return ret; +} + +static int max31760_remove(struct i2c_client *client) +{ + struct max31760 *pdata = i2c_get_clientdata(client); + + if (!pdata) + goto end; + + sysfs_remove_group(&pdata->dev->kobj, &max31760_fs_attr_group); + turn_gpio(pdata, false); +end: + return 0; +} + + +static void max31760_shutdown(struct i2c_client *client) +{ +} + +static int max31760_suspend(struct device *dev, pm_message_t state) +{ + struct max31760 *pdata = dev_get_drvdata(dev); + + dev_dbg(dev, "suspend\n"); + if (pdata) + turn_gpio(pdata, false); + return 0; +} + +static int max31760_resume(struct device *dev) +{ + struct max31760 *pdata = dev_get_drvdata(dev); + + dev_dbg(dev, "resume\n"); + if (pdata) { + turn_gpio(pdata, true); + max31760_i2c_reg_set(pdata, 0x00, pdata->pwm); + } + return 0; +} + +static const struct of_device_id max31760_id_table[] = { + { .compatible = "maxim,xrfancontroller",}, + { }, +}; +static const struct i2c_device_id max31760_i2c_table[] = { + { "xrfancontroller", 0 }, + { }, +}; + +static struct i2c_driver max31760_i2c_driver = { + .probe = max31760_probe, + .remove = max31760_remove, + .shutdown = max31760_shutdown, + .driver = { + .name = "maxim xrfancontroller", + .of_match_table = max31760_id_table, + .suspend = max31760_suspend, + .resume = max31760_resume, + }, + .id_table = max31760_i2c_table, +}; +module_i2c_driver(max31760_i2c_driver); +MODULE_DEVICE_TABLE(i2c, max31760_i2c_table); +MODULE_DESCRIPTION("Maxim 31760 Fan Controller"); +MODULE_LICENSE("GPL v2"); From e96d676bd389b35bc08c944be27d72013819c6fc Mon Sep 17 00:00:00 2001 From: Yue Ma Date: Fri, 24 Apr 2020 15:01:30 -0700 Subject: [PATCH 58/90] cnss2: Clear host driver ops if register driver gets killed Avoid any host driver ops access after register driver gets killed by marking it as NULL. Change-Id: Idd268d5d8d31569c4272fde566866c4d79bc1cad Signed-off-by: Yue Ma --- drivers/net/wireless/cnss2/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 13893d441344..5d7849ddf864 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -2078,6 +2078,7 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) if (ret == -EINTR) { cnss_pr_dbg("Register driver work is killed\n"); del_timer(&plat_priv->fw_boot_timer); + pci_priv->driver_ops = NULL; } return ret; From 833af105c3e9654b625b17c10255209e62579bec Mon Sep 17 00:00:00 2001 From: Ram Prakash Gupta Date: Wed, 18 Mar 2020 14:01:17 +0530 Subject: [PATCH 59/90] mmc: core: Enable force hw reset During error recovery set need hw reset to handle ICE error where cqe reset is must. Change-Id: I198cc06c18910c48b85a5298aebb9024b94d2470 Signed-off-by: Ram Prakash Gupta --- drivers/mmc/core/block.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9e7fd7f604d4..f2b852e22782 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1541,9 +1541,10 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); err = mmc_cqe_recovery(host); - if (err) + if (err || host->need_hw_reset) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + host->need_hw_reset = false; pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } From 6ac831999ab03cb9a7033d3987f126b6257611a4 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Sun, 19 Apr 2020 15:51:29 +0530 Subject: [PATCH 60/90] sched: Improve the scheduler This change is for general scheduler improvements. Change-Id: I3a220142f08a9664845b4d0e9918ec7c48bb11f7 Signed-off-by: Pavankumar Kondeti --- kernel/sched/core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d8ca365c476a..c2b618dfb67a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5042,7 +5042,7 @@ bool is_sched_lib_based_app(pid_t pid) char *libname, *lib_list; struct vm_area_struct *vma; char path_buf[LIB_PATH_LENGTH]; - char tmp_lib_name[LIB_PATH_LENGTH]; + char *tmp_lib_name; bool found = false; struct task_struct *p; struct mm_struct *mm; @@ -5050,11 +5050,16 @@ bool is_sched_lib_based_app(pid_t pid) if (strnlen(sched_lib_name, LIB_PATH_LENGTH) == 0) return false; + tmp_lib_name = kmalloc(LIB_PATH_LENGTH, GFP_KERNEL); + if (!tmp_lib_name) + return false; + rcu_read_lock(); p = find_process_by_pid(pid); if (!p) { rcu_read_unlock(); + kfree(tmp_lib_name); return false; } @@ -5092,6 +5097,7 @@ bool is_sched_lib_based_app(pid_t pid) mmput(mm); put_task_struct: put_task_struct(p); + kfree(tmp_lib_name); return found; } From 5a2092b1b728ef7fc8a4ee7054bb6bfff15751f1 Mon Sep 17 00:00:00 2001 From: Charan Teja Reddy Date: Thu, 5 Mar 2020 17:57:49 +0530 Subject: [PATCH 61/90] mm: allocate swapin pages from movable zone We allow pages to movable zone only when __GFP_CMA is set along with other required flags. When an anon page first allocated will go to the movable zone but when the same is swapped out and then swapin is not allowed to go. Fix it. Fixes: 0157fbd79dbe ("mm: Allow only __GFP_CMA allocations from Movable zone") Change-Id: I746299259a91819499e948ca34fe8880c8eaa175 Signed-off-by: Charan Teja Reddy --- mm/memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 8fec1a42129b..f0e4d0bb0742 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3145,8 +3145,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!page) { if (skip_swapcache) { - page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, - vmf->address); + page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, + vma, vmf->address); if (page) { __SetPageLocked(page); __SetPageSwapBacked(page); From b6860ef198650257f8808a8cab96d4d37c80dc78 Mon Sep 17 00:00:00 2001 From: Prudhvi Yarlagadda Date: Tue, 7 Apr 2020 18:05:40 +0530 Subject: [PATCH 62/90] msm_geni_serial: Use the common driver API for dma_alloc Use the common driver APIs instead of directly using dma_alloc_coherent,dma_free_coherent APIs. Issue is that we need to pass iommu dev as a handler while using dma APIs where as we were passing common driver dev node as a handler which is not correct. Change-Id: Ia8da31f8b9d761a723b3712c7b0738e71838fa56 Signed-off-by: Prudhvi Yarlagadda --- drivers/tty/serial/msm_geni_serial.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 3449f1e81227..095e5cba5fa2 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -2085,9 +2085,9 @@ static int msm_geni_serial_port_setup(struct uart_port *uport) goto exit_portsetup; } - msm_port->rx_buf = dma_alloc_coherent(msm_port->wrapper_dev, - DMA_RX_BUF_SIZE, &dma_address, GFP_KERNEL); - + msm_port->rx_buf = + geni_se_iommu_alloc_buf(msm_port->wrapper_dev, + &dma_address, DMA_RX_BUF_SIZE); if (!msm_port->rx_buf) { devm_kfree(uport->dev, msm_port->rx_fifo); msm_port->rx_fifo = NULL; @@ -2136,8 +2136,8 @@ static int msm_geni_serial_port_setup(struct uart_port *uport) return 0; free_dma: if (msm_port->rx_dma) { - dma_free_coherent(msm_port->wrapper_dev, DMA_RX_BUF_SIZE, - msm_port->rx_buf, msm_port->rx_dma); + geni_se_iommu_free_buf(msm_port->wrapper_dev, + &msm_port->rx_dma, msm_port->rx_buf, DMA_RX_BUF_SIZE); msm_port->rx_dma = (dma_addr_t)NULL; } exit_portsetup: @@ -3220,8 +3220,8 @@ static int msm_geni_serial_remove(struct platform_device *pdev) wakeup_source_trash(&port->geni_wake); uart_remove_one_port(drv, &port->uport); if (port->rx_dma) { - dma_free_coherent(port->wrapper_dev, DMA_RX_BUF_SIZE, - port->rx_buf, port->rx_dma); + geni_se_iommu_free_buf(port->wrapper_dev, &port->rx_dma, + port->rx_buf, DMA_RX_BUF_SIZE); port->rx_dma = (dma_addr_t)NULL; } return 0; From 51e6542e2d3b4fec9ed5f6f6861c56cd22ea0753 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 20:50:37 -0600 Subject: [PATCH 63/90] msm: kgsl: Remove the extra recovery hop Currently, when we try to boot the gmu and it fails, we force off the gmu gdsc followed by an inline attempt to boot the gmu again. To do this, we go an extra mile to do complex snapshot and power state magic. The same goal can be achieved by a simpler and easier to maintain code path: Set the state to KGSL_STATE_RESET upon gmu boot failure so that the error path performs a RESET -> SLUMBER transition. Here, we can reset the GMU so that next SLUMBER exit can start afresh. Change-Id: I4d3e75f40062e9fe5d71fe1e6010dfff75e09321 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl_gmu.c | 3 ++ drivers/gpu/msm/kgsl_pwrctrl.c | 70 +++++++++------------------------- 2 files changed, 20 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 7efe0278691a..a669768cdeba 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1532,6 +1532,9 @@ static int gmu_suspend(struct kgsl_device *device) regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_NORMAL); dev_err(&gmu->pdev->dev, "Suspended GMU\n"); + + clear_bit(GMU_FAULT, &device->gmu_core.flags); + return 0; } diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index e60d1a5a6b1c..7830cce74d5a 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -2740,7 +2740,6 @@ static int _aware(struct kgsl_device *device) { int status = 0; - unsigned int state = device->state; switch (device->state) { case KGSL_STATE_RESET: @@ -2767,65 +2766,23 @@ _aware(struct kgsl_device *device) kgsl_pwrscale_midframe_timer_cancel(device); break; case KGSL_STATE_SLUMBER: - /* if GMU already in FAULT */ - if (gmu_core_isenabled(device) && - test_bit(GMU_FAULT, &device->gmu_core.flags)) { - status = -EINVAL; - break; - } - status = kgsl_pwrctrl_enable(device); + if (status && gmu_core_isenabled(device)) + /* + * SLUMBER -> AWARE failed which means GMU boot failed. + * Make sure we reset the GMU while transitioning back + * to SLUMBER. + */ + kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); + break; default: status = -EINVAL; } - if (status) { - if (gmu_core_isenabled(device)) { - /* GMU hang recovery */ - kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); - set_bit(GMU_FAULT, &device->gmu_core.flags); - status = kgsl_pwrctrl_enable(device); - /* Cannot recover GMU failure GPU will not power on */ - - if (WARN_ONCE(status, "Failed to recover GMU\n")) { - if (device->snapshot) - device->snapshot->recovered = false; - /* - * On recovery failure, we are clearing - * GMU_FAULT bit and also not keeping - * the state as RESET to make sure any - * attempt to wake GMU/GPU after this - * is treated as a fresh start. But on - * recovery failure, GMU HS, clocks and - * IRQs are still ON/enabled because of - * which next GMU/GPU wakeup results in - * multiple warnings from GMU start as HS, - * clocks and IRQ were ON while doing a - * fresh start i.e. wake from SLUMBER. - * - * Suspend the GMU on recovery failure - * to make sure next attempt to wake up - * GMU/GPU is indeed a fresh start. - */ - kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); - gmu_core_suspend(device); - kgsl_pwrctrl_set_state(device, state); - } else { - if (device->snapshot) - device->snapshot->recovered = true; - kgsl_pwrctrl_set_state(device, - KGSL_STATE_AWARE); - } - - clear_bit(GMU_FAULT, &device->gmu_core.flags); - return status; - } - - kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); - } else { + if (!status) kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE); - } + return status; } @@ -2912,6 +2869,13 @@ _slumber(struct kgsl_device *device) kgsl_pwrctrl_disable(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); break; + case KGSL_STATE_RESET: + if (gmu_core_isenabled(device)) { + /* Reset the GMU if we failed to boot the GMU */ + gmu_core_suspend(device); + kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); + } + break; default: kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); break; From bfc96c98ec0aacb82e6272d42925b7287a99c924 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 21:00:42 -0600 Subject: [PATCH 64/90] msm: kgsl: Set gmu fault inside gmu_snapshot The two always happen together. So set the gmu_fault inside the gmu_snapshot function. Also, if we have already recorded a gmu fault, then do not send nmi or try to snapshot a gmu which is already in nmi. Change-Id: I403a9c2c3cb7a1330a7931c41a23b4b4a2b66998 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl_gmu.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 7efe0278691a..c10fcb62fb4a 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1541,6 +1541,10 @@ static void gmu_snapshot(struct kgsl_device *device) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + /* Abstain from sending another nmi or over-writing snapshot */ + if (test_and_set_bit(GMU_FAULT, &device->gmu_core.flags)) + return; + adreno_gmu_send_nmi(adreno_dev); /* Wait for the NMI to be handled */ udelay(100); @@ -1707,12 +1711,6 @@ static void gmu_stop(struct kgsl_device *device) return; error: - /* - * The power controller will change state to SLUMBER anyway - * Set GMU_FAULT flag to indicate to power contrller - * that hang recovery is needed to power on GPU - */ - set_bit(GMU_FAULT, &device->gmu_core.flags); dev_err(&gmu->pdev->dev, "Failed to stop GMU\n"); gmu_core_snapshot(device); } From 6ef85558ced728713c1dfa79163c6348452bb09f Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 21:03:19 -0600 Subject: [PATCH 65/90] msm: kgsl: Correctly handle CP_INIT failure A stuck CP means that we will not be able to enter slumber because gmu to cp interaction is impacted. Therefore, take a gmu snapshot which also sets gmu fault. The gmu fault will indicate the clean up code to force a gmu_suspend() so as to set the stage for the next submission to start afresh. Change-Id: Ia90e6c447e9c1c87e04cf9ca3ed87eed5c17b07c Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 1a28f59dd8ea..e041f87b73a6 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -3052,7 +3052,15 @@ void adreno_spin_idle_debug(struct adreno_device *adreno_dev, dev_err(device->dev, " hwfault=%8.8X\n", hwfault); - kgsl_device_snapshot(device, NULL, adreno_gmu_gpu_fault(adreno_dev)); + /* + * If CP is stuck, gmu may not perform as expected. So force a gmu + * snapshot which captures entire state as well as sets the gmu fault + * because things need to be reset anyway. + */ + if (gmu_core_isenabled(device)) + gmu_core_snapshot(device); + else + kgsl_device_snapshot(device, NULL, false); } /** From 21c4babb8d8ca6e5f959635a4b0a667ca0eb4694 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 20:58:55 -0600 Subject: [PATCH 66/90] msm: kgsl: Handle the very first gmu boot failure The very first boot starts from the INIT state. If the gmu fails to boot here, set the state to KGSL_STATE_RESET to effect a RESET -> INIT transition where we reset the GMU. Change-Id: I0fddf745d299b855aab0c1989997dcab5e77954a Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl_pwrctrl.c | 41 ++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 7830cce74d5a..0e41cca60468 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -2632,12 +2632,22 @@ static int _init(struct kgsl_device *device) int status = 0; switch (device->state) { + case KGSL_STATE_RESET: + if (gmu_core_isenabled(device)) { + /* + * If we fail a INIT -> AWARE transition, we will + * transition back to INIT. However, we must hard reset + * the GMU as we go back to INIT. This is done by + * forcing a RESET -> INIT transition. + */ + gmu_core_suspend(device); + kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); + } + break; case KGSL_STATE_NAP: /* Force power on to do the stop */ status = kgsl_pwrctrl_enable(device); case KGSL_STATE_ACTIVE: - /* fall through */ - case KGSL_STATE_RESET: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); del_timer_sync(&device->idle_timer); kgsl_pwrscale_midframe_timer_cancel(device); @@ -2749,12 +2759,6 @@ _aware(struct kgsl_device *device) status = gmu_core_start(device); break; case KGSL_STATE_INIT: - /* if GMU already in FAULT */ - if (gmu_core_isenabled(device) && - test_bit(GMU_FAULT, &device->gmu_core.flags)) { - status = -EINVAL; - break; - } status = kgsl_pwrctrl_enable(device); break; /* The following 3 cases shouldn't occur, but don't panic. */ @@ -2767,20 +2771,23 @@ _aware(struct kgsl_device *device) break; case KGSL_STATE_SLUMBER: status = kgsl_pwrctrl_enable(device); - if (status && gmu_core_isenabled(device)) - /* - * SLUMBER -> AWARE failed which means GMU boot failed. - * Make sure we reset the GMU while transitioning back - * to SLUMBER. - */ - kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); - break; default: status = -EINVAL; } - if (!status) + if (status && gmu_core_isenabled(device)) + /* + * If a SLUMBER/INIT -> AWARE fails, we transition back to + * SLUMBER/INIT state. We must hard reset the GMU while + * transitioning back to SLUMBER/INIT. A RESET -> AWARE + * transition is different. It happens when dispatcher is + * attempting reset/recovery as part of fault handling. If it + * fails, we should still transition back to RESET in case + * we want to attempt another reset/recovery. + */ + kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET); + else kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE); return status; From b7e01cc17059f3a643f67be0945c78c13b4fe7be Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Tue, 4 Dec 2018 13:55:44 -0700 Subject: [PATCH 67/90] msm: kgsl: Correctly handle gmu fault interrupts Send NMI to gmu so that we don't lose critical gmu state. We do not want to trigger dispatcher because we don't know whether the gpu was active or not. The best course of action is to wait for the next kgsl -> GMU interaction to timeout thus triggering GMU snapshot and appropriate recovery steps based on whether gpu was active or not. Change-Id: I17b4245f4e0113bfc902d7dae46bb24d0bc2b65d Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl_gmu.c | 2 -- drivers/gpu/msm/kgsl_hfi.c | 9 +++------ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 7efe0278691a..1dc5b11e9b42 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -927,8 +927,6 @@ static irqreturn_t gmu_irq_handler(int irq, void *data) dev_err_ratelimited(&gmu->pdev->dev, "GMU watchdog expired interrupt received\n"); - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); - adreno_dispatcher_schedule(device); } if (status & GMU_INT_HOST_AHB_BUS_ERR) dev_err_ratelimited(&gmu->pdev->dev, diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index cf82890114b9..ec5354857742 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ #include @@ -853,7 +853,6 @@ irqreturn_t hfi_irq_handler(int irq, void *data) struct kgsl_device *device = data; struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct kgsl_hfi *hfi = &gmu->hfi; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int status = 0; adreno_read_gmureg(ADRENO_DEVICE(device), @@ -863,12 +862,10 @@ irqreturn_t hfi_irq_handler(int irq, void *data) if (status & HFI_IRQ_DBGQ_MASK) tasklet_hi_schedule(&hfi->tasklet); - if (status & HFI_IRQ_CM3_FAULT_MASK) { + if (status & HFI_IRQ_CM3_FAULT_MASK) dev_err_ratelimited(&gmu->pdev->dev, "GMU CM3 fault interrupt received\n"); - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); - adreno_dispatcher_schedule(device); - } + if (status & ~HFI_IRQ_MASK) dev_err_ratelimited(&gmu->pdev->dev, "Unhandled HFI interrupts 0x%lx\n", From 811638a4fdfb1a74d0db3f64577c2ae012679302 Mon Sep 17 00:00:00 2001 From: Yue Ma Date: Mon, 27 Apr 2020 15:03:40 -0700 Subject: [PATCH 68/90] cnss2: Fix a few issues during platform reboot or shutdown Make unregister driver work unkillable to avoid WLAN host driver handling issues after it is killed by platform reboot or shutdown. Also add proper handlings and checks for platform reboot or shutdown to avoid unnecessary asserts for calibration cases. Change-Id: Ide137dc351ca48ef23573abb325db011b8460f54 Signed-off-by: Yue Ma --- drivers/net/wireless/cnss2/main.c | 2 ++ drivers/net/wireless/cnss2/pci.c | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 0b9417925852..4246f46f9598 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1978,6 +1978,7 @@ static ssize_t shutdown_store(struct kobject *kobj, set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state); del_timer(&plat_priv->fw_boot_timer); complete_all(&plat_priv->power_up_complete); + complete_all(&plat_priv->cal_complete); } cnss_pr_dbg("Received shutdown notification\n"); @@ -2119,6 +2120,7 @@ static int cnss_reboot_notifier(struct notifier_block *nb, set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state); del_timer(&plat_priv->fw_boot_timer); complete_all(&plat_priv->power_up_complete); + complete_all(&plat_priv->cal_complete); cnss_pr_dbg("Reboot is in progress with action %d\n", action); return NOTIFY_DONE; diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 5d7849ddf864..a4a51ecfa0ee 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -2058,7 +2058,8 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) msecs_to_jiffies(timeout) << 2); if (!ret) { cnss_pr_err("Timeout waiting for calibration to complete\n"); - CNSS_ASSERT(0); + if (!test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) + CNSS_ASSERT(0); cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL); if (!cal_info) @@ -2070,6 +2071,11 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) 0, cal_info); } + if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { + cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n"); + return -EINVAL; + } + register_driver: ret = cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REGISTER_DRIVER, @@ -2125,7 +2131,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) skip_wait_recovery: cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_UNREGISTER_DRIVER, - CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL); + CNSS_EVENT_SYNC_UNKILLABLE, NULL); } EXPORT_SYMBOL(cnss_wlan_unregister_driver); From 18cb1e36bebefabe92db708b8e4db60bb385dd3a Mon Sep 17 00:00:00 2001 From: priyankar Date: Wed, 25 Mar 2020 14:59:37 +0530 Subject: [PATCH 69/90] Add support for Moselle SoC (WCN6750) This patch contains changes for supporting new BT chip Moselle. Change-Id: Iecafc1ed4f1ca5147056502d4323837c5a34ff53 Signed-off-by: priyankar --- drivers/bluetooth/bluetooth-power.c | 39 +++++++++++++++++++---------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index c7f41ab161ef..cb5670e785e0 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -41,6 +41,7 @@ static const struct of_device_id bt_power_match_table[] = { { .compatible = "qca,qca6174" }, { .compatible = "qca,wcn3990" }, { .compatible = "qca,qca6390" }, + { .compatible = "qca,wcn6750" }, {} }; @@ -271,10 +272,14 @@ static int bt_configure_gpios(int on) return rc; } msleep(50); - BT_PWR_ERR("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)\n", - bt_reset_gpio, gpio_get_value(bt_reset_gpio)); - BT_PWR_ERR("BTON:Turn Bt Off bt-sw-ctrl-gpio(%d) value(%d)\n", - bt_sw_ctrl_gpio, gpio_get_value(bt_sw_ctrl_gpio)); + BT_PWR_INFO("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)\n", + bt_reset_gpio, gpio_get_value(bt_reset_gpio)); + if (bt_sw_ctrl_gpio >= 0) { + BT_PWR_INFO("BTON:Turn Bt Off"); + BT_PWR_INFO("bt-sw-ctrl-gpio(%d) value(%d)", + bt_sw_ctrl_gpio, + gpio_get_value(bt_sw_ctrl_gpio)); + } rc = gpio_direction_output(bt_reset_gpio, 1); if (rc) { @@ -305,22 +310,30 @@ static int bt_configure_gpios(int on) BT_PWR_ERR("Prob: Set Debug-Gpio\n"); } } - BT_PWR_ERR("BTON:Turn Bt On bt-reset-gpio(%d) value(%d)\n", - bt_reset_gpio, gpio_get_value(bt_reset_gpio)); - BT_PWR_ERR("BTON:Turn Bt On bt-sw-ctrl-gpio(%d) value(%d)\n", - bt_sw_ctrl_gpio, gpio_get_value(bt_sw_ctrl_gpio)); + BT_PWR_INFO("BTON:Turn Bt On bt-reset-gpio(%d) value(%d)\n", + bt_reset_gpio, gpio_get_value(bt_reset_gpio)); + if (bt_sw_ctrl_gpio >= 0) { + BT_PWR_INFO("BTON:Turn Bt On"); + BT_PWR_INFO("bt-sw-ctrl-gpio(%d) value(%d)", + bt_sw_ctrl_gpio, + gpio_get_value(bt_sw_ctrl_gpio)); + } } else { gpio_set_value(bt_reset_gpio, 0); if (bt_debug_gpio >= 0) gpio_set_value(bt_debug_gpio, 0); msleep(100); - BT_PWR_ERR("BT-OFF:bt-reset-gpio(%d) value(%d)\n", - bt_reset_gpio, gpio_get_value(bt_reset_gpio)); - BT_PWR_ERR("BT-OFF:bt-sw-ctrl-gpio(%d) value(%d)\n", - bt_sw_ctrl_gpio, gpio_get_value(bt_sw_ctrl_gpio)); + BT_PWR_INFO("BT-OFF:bt-reset-gpio(%d) value(%d)\n", + bt_reset_gpio, gpio_get_value(bt_reset_gpio)); + + if (bt_sw_ctrl_gpio >= 0) { + BT_PWR_INFO("BT-OFF:bt-sw-ctrl-gpio(%d) value(%d)", + bt_sw_ctrl_gpio, + gpio_get_value(bt_sw_ctrl_gpio)); + } } - BT_PWR_ERR("bt_gpio= %d on: %d is successful", bt_reset_gpio, on); + BT_PWR_INFO("bt_gpio= %d on: %d is successful", bt_reset_gpio, on); return rc; } From 4966f856b9b82b6acc896dbf50fd1a05ed77e352 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Fri, 24 Apr 2020 15:46:03 +0530 Subject: [PATCH 70/90] usb: f_qdss: Remove QDSS read functionality as not in use Removing the APIs related to ctrl_read since those are not used by any client of f_qdss. Change-Id: Idf7f1ffd8afe8a72f83e036a7986d35f2f71bea8 Signed-off-by: Udipto Goswami --- .../hwtracing/coresight/coresight-byte-cntr.c | 2 +- drivers/soc/qcom/qdss_bridge.c | 2 +- drivers/usb/gadget/function/f_qdss.c | 92 +------------------ drivers/usb/gadget/function/f_qdss.h | 1 - include/linux/usb/usb_qdss.h | 14 +-- 5 files changed, 6 insertions(+), 105 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c index 48ca137650f0..d0b7a897e884 100644 --- a/drivers/hwtracing/coresight/coresight-byte-cntr.c +++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c @@ -522,7 +522,7 @@ void usb_bypass_notifier(void *priv, unsigned int event, switch (event) { case USB_QDSS_CONNECT: - usb_qdss_alloc_req(ch, USB_BUF_NUM, 0); + usb_qdss_alloc_req(ch, USB_BUF_NUM); usb_bypass_start(drvdata); queue_work(drvdata->usb_wq, &(drvdata->read_work)); break; diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index 40a6d5c7a321..d9e14229ff31 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -455,7 +455,7 @@ static void usb_notifier(void *priv, unsigned int event, switch (event) { case USB_QDSS_CONNECT: - usb_qdss_alloc_req(ch, drvdata->nr_trbs, 0); + usb_qdss_alloc_req(ch, drvdata->nr_trbs); mhi_queue_read(drvdata); break; diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index 96a35288c409..c3c18e9ab58e 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -252,27 +252,6 @@ static void qdss_write_complete(struct usb_ep *ep, qdss->ch.notify(qdss->ch.priv, state, d_req, NULL); } -static void qdss_ctrl_read_complete(struct usb_ep *ep, - struct usb_request *req) -{ - struct f_qdss *qdss = ep->driver_data; - struct qdss_request *d_req = req->context; - unsigned long flags; - - qdss_log("%s\n", __func__); - - d_req->actual = req->actual; - d_req->status = req->status; - - spin_lock_irqsave(&qdss->lock, flags); - list_add_tail(&req->list, &qdss->ctrl_read_pool); - spin_unlock_irqrestore(&qdss->lock, flags); - - if (qdss->ch.notify) - qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_READ_DONE, d_req, - NULL); -} - void usb_qdss_free_req(struct usb_qdss_ch *ch) { struct f_qdss *qdss; @@ -298,17 +277,10 @@ void usb_qdss_free_req(struct usb_qdss_ch *ch) list_del(&req->list); usb_ep_free_request(qdss->port.ctrl_in, req); } - - list_for_each_safe(act, tmp, &qdss->ctrl_read_pool) { - req = list_entry(act, struct usb_request, list); - list_del(&req->list); - usb_ep_free_request(qdss->port.ctrl_out, req); - } } EXPORT_SYMBOL(usb_qdss_free_req); -int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf, - int no_read_buf) +int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf) { struct f_qdss *qdss = ch->priv_usb; struct usb_request *req; @@ -323,10 +295,8 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf, return -ENODEV; } - if ((qdss->debug_inface_enabled && - (no_write_buf <= 0 || no_read_buf <= 0)) || - (!qdss->debug_inface_enabled && - (no_write_buf <= 0 || no_read_buf))) { + if ((qdss->debug_inface_enabled && no_write_buf <= 0) || + (!qdss->debug_inface_enabled && no_write_buf <= 0)) { pr_err("%s: missing params\n", __func__); return -ENODEV; } @@ -349,16 +319,6 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf, list_add_tail(&req->list, list_pool); } - for (i = 0; i < no_read_buf; i++) { - req = usb_ep_alloc_request(qdss->port.ctrl_out, GFP_ATOMIC); - if (!req) { - pr_err("%s: ctrl_out allocation err\n", __func__); - goto fail; - } - req->complete = qdss_ctrl_read_complete; - list_add_tail(&req->list, &qdss->ctrl_read_pool); - } - return 0; fail: @@ -794,7 +754,6 @@ static struct f_qdss *alloc_usb_qdss(char *channel_name) spin_unlock_irqrestore(&qdss_lock, flags); spin_lock_init(&qdss->lock); - INIT_LIST_HEAD(&qdss->ctrl_read_pool); INIT_LIST_HEAD(&qdss->ctrl_write_pool); INIT_LIST_HEAD(&qdss->data_write_pool); INIT_LIST_HEAD(&qdss->queued_data_pool); @@ -804,51 +763,6 @@ static struct f_qdss *alloc_usb_qdss(char *channel_name) return qdss; } -int usb_qdss_ctrl_read(struct usb_qdss_ch *ch, struct qdss_request *d_req) -{ - struct f_qdss *qdss = ch->priv_usb; - unsigned long flags; - struct usb_request *req = NULL; - - qdss_log("%s\n", __func__); - - if (!qdss) - return -ENODEV; - - spin_lock_irqsave(&qdss->lock, flags); - - if (qdss->usb_connected == 0) { - spin_unlock_irqrestore(&qdss->lock, flags); - return -EIO; - } - - if (list_empty(&qdss->ctrl_read_pool)) { - spin_unlock_irqrestore(&qdss->lock, flags); - pr_err("error: %s list is empty\n", __func__); - return -EAGAIN; - } - - req = list_first_entry(&qdss->ctrl_read_pool, struct usb_request, list); - list_del(&req->list); - spin_unlock_irqrestore(&qdss->lock, flags); - - req->buf = d_req->buf; - req->length = d_req->length; - req->context = d_req; - - if (usb_ep_queue(qdss->port.ctrl_out, req, GFP_ATOMIC)) { - /* If error add the link to linked list again*/ - spin_lock_irqsave(&qdss->lock, flags); - list_add_tail(&req->list, &qdss->ctrl_read_pool); - spin_unlock_irqrestore(&qdss->lock, flags); - pr_err("qdss usb_ep_queue failed\n"); - return -EIO; - } - - return 0; -} -EXPORT_SYMBOL(usb_qdss_ctrl_read); - int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) { struct f_qdss *qdss = ch->priv_usb; diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h index fe847d2c947d..f25cd947dfde 100644 --- a/drivers/usb/gadget/function/f_qdss.h +++ b/drivers/usb/gadget/function/f_qdss.h @@ -50,7 +50,6 @@ struct f_qdss { bool debug_inface_enabled; struct usb_request *endless_req; struct usb_qdss_ch ch; - struct list_head ctrl_read_pool; struct list_head ctrl_write_pool; /* for mdm channel SW path */ diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h index ae5cfa3abfc3..8af6f6e81eb0 100644 --- a/include/linux/usb/usb_qdss.h +++ b/include/linux/usb/usb_qdss.h @@ -46,12 +46,10 @@ struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, void (*notify)(void *priv, unsigned int event, struct qdss_request *d_req, struct usb_qdss_ch *ch)); void usb_qdss_close(struct usb_qdss_ch *ch); -int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int n_write, int n_read); +int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int n_write); void usb_qdss_free_req(struct usb_qdss_ch *ch); -int usb_qdss_read(struct usb_qdss_ch *ch, struct qdss_request *d_req); int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req); int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req); -int usb_qdss_ctrl_read(struct usb_qdss_ch *ch, struct qdss_request *d_req); #else static inline struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, void (*n)(void *, unsigned int event, @@ -60,11 +58,6 @@ static inline struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, return ERR_PTR(-ENODEV); } -static inline int usb_qdss_read(struct usb_qdss_ch *c, struct qdss_request *d) -{ - return -ENODEV; -} - static inline int usb_qdss_write(struct usb_qdss_ch *c, struct qdss_request *d) { return -ENODEV; @@ -76,11 +69,6 @@ static inline int usb_qdss_ctrl_write(struct usb_qdss_ch *c, return -ENODEV; } -static inline int usb_qdss_ctrl_read(struct usb_qdss_ch *c, - struct qdss_request *d) -{ - return -ENODEV; -} static inline int usb_qdss_alloc_req(struct usb_qdss_ch *c, int n_wr, int n_rd) { return -ENODEV; From 0c7b6ed521dbd1b26ec4ac4efcb2813eba840fc3 Mon Sep 17 00:00:00 2001 From: Rajesh Bharathwaj Date: Fri, 27 Mar 2020 22:57:48 -0700 Subject: [PATCH 71/90] defconfig: add Maxim Fan controller Adding Maxim(MAX31760) Fan Controller configs to kona defconfigs. Change-Id: I27d1540cf49490ba620a5dd8935ca03ac58a7efa Signed-off-by: Rajesh Bharathwaj --- arch/arm64/configs/vendor/kona-perf_defconfig | 1 + arch/arm64/configs/vendor/kona_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig index c60c9874543b..eee1eea0726a 100644 --- a/arch/arm64/configs/vendor/kona-perf_defconfig +++ b/arch/arm64/configs/vendor/kona-perf_defconfig @@ -285,6 +285,7 @@ CONFIG_UID_SYS_STATS=y CONFIG_OKL4_USER_VIRQ=y CONFIG_WIGIG_SENSING_SPI=m CONFIG_QTI_XR_SMRTVWR_MISC=y +CONFIG_QTI_MAXIM_FAN_CONTROLLER=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig index c7fe961d93a4..6d80aef5a681 100644 --- a/arch/arm64/configs/vendor/kona_defconfig +++ b/arch/arm64/configs/vendor/kona_defconfig @@ -297,6 +297,7 @@ CONFIG_UID_SYS_STATS=y CONFIG_OKL4_USER_VIRQ=y CONFIG_WIGIG_SENSING_SPI=m CONFIG_QTI_XR_SMRTVWR_MISC=y +CONFIG_QTI_MAXIM_FAN_CONTROLLER=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y From 0023a0f1485f551edffd169798272afa1d6b2205 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Fri, 10 Apr 2020 17:15:14 +0530 Subject: [PATCH 72/90] usb: f_qdss: Fix watchdog bark issue on wait_for_completion The coresight driver is freeing up d_req as part of notify from write_complete. Accessing write_done form qdss_close, wait_for_completion will be stuck there causing watchdog bark. Fix this by making f_qdss independent of coresight & bridge driver from initializing complete. Initialization of completion now occurs from alloc_req. Change-Id: Ie6b1cd445ed6bb38c1a555f6c4c41068090e4fac Signed-off-by: Udipto Goswami --- .../hwtracing/coresight/coresight-byte-cntr.c | 3 - drivers/soc/qcom/qdss_bridge.c | 1 - drivers/usb/gadget/function/f_qdss.c | 70 +++++++++++-------- include/linux/usb/usb_qdss.h | 8 ++- 4 files changed, 48 insertions(+), 34 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c index d0b7a897e884..6353106a048d 100644 --- a/drivers/hwtracing/coresight/coresight-byte-cntr.c +++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c @@ -340,8 +340,6 @@ static int usb_transfer_small_packet(struct qdss_request *usb_req, goto out; } - init_completion(&usb_req->write_done); - actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset, req_size, &usb_req->buf); usb_req->length = actual; @@ -425,7 +423,6 @@ static void usb_read_work_fn(struct work_struct *work) sizeof(*usb_req), GFP_KERNEL); if (!usb_req) return; - init_completion(&usb_req->write_done); usb_req->sg = devm_kzalloc(tmcdrvdata->dev, sizeof(*(usb_req->sg)) * req_sg_num, GFP_KERNEL); diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index d9e14229ff31..4d15790f486f 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -108,7 +108,6 @@ static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata) buf = kzalloc(drvdata->mtu, GFP_KERNEL); usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL); - init_completion(&usb_req->write_done); entry->buf = buf; entry->usb_req = usb_req; diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index c3c18e9ab58e..dff1b5d45489 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -219,7 +219,8 @@ static void qdss_write_complete(struct usb_ep *ep, struct usb_request *req) { struct f_qdss *qdss = ep->driver_data; - struct qdss_request *d_req = req->context; + struct qdss_req *qreq = req->context; + struct qdss_request *d_req = qreq->qdss_req; struct usb_ep *in; struct list_head *list_pool; enum qdss_state state; @@ -239,9 +240,9 @@ static void qdss_write_complete(struct usb_ep *ep, spin_lock_irqsave(&qdss->lock, flags); if (!qdss->debug_inface_enabled) - list_del(&req->list); - list_add_tail(&req->list, list_pool); - complete(&d_req->write_done); + list_del(&qreq->list); + list_add_tail(&qreq->list, list_pool); + complete(&qreq->write_done); if (req->length != 0) { d_req->actual = req->actual; d_req->status = req->status; @@ -255,8 +256,8 @@ static void qdss_write_complete(struct usb_ep *ep, void usb_qdss_free_req(struct usb_qdss_ch *ch) { struct f_qdss *qdss; - struct usb_request *req; struct list_head *act, *tmp; + struct qdss_req *qreq; qdss = ch->priv_usb; if (!qdss) { @@ -267,15 +268,19 @@ void usb_qdss_free_req(struct usb_qdss_ch *ch) qdss_log("%s: channel name = %s\n", __func__, qdss->ch.name); list_for_each_safe(act, tmp, &qdss->data_write_pool) { - req = list_entry(act, struct usb_request, list); - list_del(&req->list); - usb_ep_free_request(qdss->port.data, req); + qreq = list_entry(act, struct qdss_req, list); + list_del(&qreq->list); + usb_ep_free_request(qdss->port.data, qreq->usb_req); + kfree(qreq); + } list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) { - req = list_entry(act, struct usb_request, list); - list_del(&req->list); - usb_ep_free_request(qdss->port.ctrl_in, req); + qreq = list_entry(act, struct qdss_req, list); + list_del(&qreq->list); + usb_ep_free_request(qdss->port.ctrl_in, qreq->usb_req); + kfree(qreq); + } } EXPORT_SYMBOL(usb_qdss_free_req); @@ -287,6 +292,7 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf) struct usb_ep *in; struct list_head *list_pool; int i; + struct qdss_req *qreq; qdss_log("%s\n", __func__); @@ -310,13 +316,17 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf) } for (i = 0; i < no_write_buf; i++) { + qreq = kzalloc(sizeof(struct qdss_req), GFP_KERNEL); req = usb_ep_alloc_request(in, GFP_ATOMIC); if (!req) { pr_err("%s: ctrl_in allocation err\n", __func__); goto fail; } + qreq->usb_req = req; + req->context = qreq; req->complete = qdss_write_complete; - list_add_tail(&req->list, list_pool); + list_add_tail(&qreq->list, list_pool); + init_completion(&qreq->write_done); } return 0; @@ -768,6 +778,7 @@ int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) struct f_qdss *qdss = ch->priv_usb; unsigned long flags; struct usb_request *req = NULL; + struct qdss_req *qreq; qdss_log("%s\n", __func__); @@ -787,17 +798,18 @@ int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) return -EAGAIN; } - req = list_first_entry(&qdss->ctrl_write_pool, struct usb_request, + qreq = list_first_entry(&qdss->ctrl_write_pool, struct qdss_req, list); - list_del(&req->list); + list_del(&qreq->list); spin_unlock_irqrestore(&qdss->lock, flags); + qreq->qdss_req = d_req; + req = qreq->usb_req; req->buf = d_req->buf; req->length = d_req->length; - req->context = d_req; if (usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC)) { spin_lock_irqsave(&qdss->lock, flags); - list_add_tail(&req->list, &qdss->ctrl_write_pool); + list_add_tail(&qreq->list, &qdss->ctrl_write_pool); spin_unlock_irqrestore(&qdss->lock, flags); pr_err("%s usb_ep_queue failed\n", __func__); return -EIO; @@ -812,6 +824,7 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) struct f_qdss *qdss = ch->priv_usb; unsigned long flags; struct usb_request *req = NULL; + struct qdss_req *qreq; qdss_log("usb_qdss_data_write\n"); @@ -831,23 +844,24 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req) return -EAGAIN; } - req = list_first_entry(&qdss->data_write_pool, struct usb_request, + qreq = list_first_entry(&qdss->data_write_pool, struct qdss_req, list); - list_move_tail(&req->list, &qdss->queued_data_pool); + list_move_tail(&qreq->list, &qdss->queued_data_pool); spin_unlock_irqrestore(&qdss->lock, flags); + qreq->qdss_req = d_req; + req = qreq->usb_req; req->buf = d_req->buf; req->length = d_req->length; - req->context = d_req; req->sg = d_req->sg; req->num_sgs = d_req->num_sgs; req->num_mapped_sgs = d_req->num_mapped_sgs; - reinit_completion(&d_req->write_done); + reinit_completion(&qreq->write_done); if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) { spin_lock_irqsave(&qdss->lock, flags); /* Remove from queued pool and add back to data pool */ - list_move_tail(&req->list, &qdss->data_write_pool); - complete(&d_req->write_done); + list_move_tail(&qreq->list, &qdss->data_write_pool); + complete(&qreq->write_done); spin_unlock_irqrestore(&qdss->lock, flags); pr_err("qdss usb_ep_queue failed\n"); return -EIO; @@ -911,8 +925,7 @@ void usb_qdss_close(struct usb_qdss_ch *ch) struct usb_gadget *gadget; unsigned long flags; int status; - struct usb_request *req; - struct qdss_request *d_req; + struct qdss_req *qreq; qdss_log("%s\n", __func__); @@ -921,12 +934,11 @@ void usb_qdss_close(struct usb_qdss_ch *ch) goto close; qdss->qdss_close = true; while (!list_empty(&qdss->queued_data_pool)) { - req = list_first_entry(&qdss->queued_data_pool, - struct usb_request, list); - d_req = req->context; + qreq = list_first_entry(&qdss->queued_data_pool, + struct qdss_req, list); spin_unlock_irqrestore(&qdss_lock, flags); - usb_ep_dequeue(qdss->port.data, req); - wait_for_completion(&d_req->write_done); + usb_ep_dequeue(qdss->port.data, qreq->usb_req); + wait_for_completion(&qreq->write_done); spin_lock_irqsave(&qdss_lock, flags); } usb_qdss_free_req(ch); diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h index 8af6f6e81eb0..645d6f68cc7c 100644 --- a/include/linux/usb/usb_qdss.h +++ b/include/linux/usb/usb_qdss.h @@ -20,7 +20,6 @@ struct qdss_request { struct scatterlist *sg; unsigned int num_sgs; unsigned int num_mapped_sgs; - struct completion write_done; }; struct usb_qdss_ch { @@ -41,6 +40,13 @@ enum qdss_state { USB_QDSS_CTRL_WRITE_DONE, }; +struct qdss_req { + struct usb_request *usb_req; + struct completion write_done; + struct qdss_request *qdss_req; + struct list_head list; +}; + #if IS_ENABLED(CONFIG_USB_F_QDSS) struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv, void (*notify)(void *priv, unsigned int event, From 4101687e420f96f74d273e6ff7916130a4f35f1d Mon Sep 17 00:00:00 2001 From: Prudhvi Yarlagadda Date: Tue, 25 Feb 2020 18:09:27 +0530 Subject: [PATCH 73/90] spi-geni-qcom: Add shared_ee dtsi property for spi driver When shared_ee dtsi flag is set then don't depend on the shared_se checks in prepare/unprepare transfer hardware and runtime resume/suspend APIs of spi driver. Also use runtime resume/suspend calls from prepare/unprepare message so that dual EE use case for spi will not be effected. This change is to help in the cases where spi can be used from secure and non-secure use cases. Spi framework calls unprepare transfer hardware from a kthread after the spi_pump_message call is completed and by that time spi driver might be getting used from a secure use case and at the same time unprepare transfer hardware is selecting the pinctrl to sleep state. Change-Id: Ic8ea126ca5cddd3ca45c080a39841dd6ec1f6760 Signed-off-by: Prudhvi Yarlagadda --- drivers/spi/spi-geni-qcom.c | 109 +++++++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 19 deletions(-) diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 373e5f00ec01..00bfed4ddfbd 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ @@ -148,7 +148,8 @@ struct spi_geni_master { int num_rx_eot; int num_xfers; void *ipc; - bool shared_se; + bool shared_se; /* GSI Mode */ + bool shared_ee; /* Dual EE use case */ bool dis_autosuspend; bool cmd_done; }; @@ -717,6 +718,32 @@ static int spi_geni_prepare_message(struct spi_master *spi, { int ret = 0; struct spi_geni_master *mas = spi_master_get_devdata(spi); + int count; + + if (mas->shared_ee) { + if (mas->setup) { + ret = pm_runtime_get_sync(mas->dev); + if (ret < 0) { + dev_err(mas->dev, + "%s:pm_runtime_get_sync failed %d\n", + __func__, ret); + pm_runtime_put_noidle(mas->dev); + goto exit_prepare_message; + } + ret = 0; + + if (mas->dis_autosuspend) { + count = + atomic_read(&mas->dev->power.usage_count); + if (count <= 0) + GENI_SE_ERR(mas->ipc, false, NULL, + "resume usage count mismatch:%d", + count); + } + } else { + mas->setup = true; + } + } mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg); @@ -734,6 +761,7 @@ static int spi_geni_prepare_message(struct spi_master *spi, ret = setup_fifo_params(spi_msg->spi, spi); } +exit_prepare_message: return ret; } @@ -741,11 +769,27 @@ static int spi_geni_unprepare_message(struct spi_master *spi_mas, struct spi_message *spi_msg) { struct spi_geni_master *mas = spi_master_get_devdata(spi_mas); + int count = 0; mas->cur_speed_hz = 0; mas->cur_word_len = 0; if (mas->cur_xfer_mode == GSI_DMA) spi_geni_unmap_buf(mas, spi_msg); + + if (mas->shared_ee) { + if (mas->dis_autosuspend) { + pm_runtime_put_sync(mas->dev); + count = atomic_read(&mas->dev->power.usage_count); + if (count < 0) + GENI_SE_ERR(mas->ipc, false, NULL, + "suspend usage count mismatch:%d", + count); + } else { + pm_runtime_mark_last_busy(mas->dev); + pm_runtime_put_autosuspend(mas->dev); + } + } + return 0; } @@ -758,7 +802,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) /* Adjust the IB based on the max speed of the slave.*/ rsc->ib = max_speed * DEFAULT_BUS_WIDTH; - if (mas->shared_se) { + if (mas->shared_se && !mas->shared_ee) { struct se_geni_rsc *rsc; int ret = 0; @@ -770,20 +814,23 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) "%s: Error %d pinctrl_select_state\n", __func__, ret); } - ret = pm_runtime_get_sync(mas->dev); - if (ret < 0) { - dev_err(mas->dev, "%s:Error enabling SE resources %d\n", + if (!mas->setup || !mas->shared_ee) { + ret = pm_runtime_get_sync(mas->dev); + if (ret < 0) { + dev_err(mas->dev, + "%s:pm_runtime_get_sync failed %d\n", __func__, ret); - pm_runtime_put_noidle(mas->dev); - goto exit_prepare_transfer_hardware; - } else { + pm_runtime_put_noidle(mas->dev); + goto exit_prepare_transfer_hardware; + } ret = 0; - } - if (mas->dis_autosuspend) { - count = atomic_read(&mas->dev->power.usage_count); - if (count <= 0) - GENI_SE_ERR(mas->ipc, false, NULL, + + if (mas->dis_autosuspend) { + count = atomic_read(&mas->dev->power.usage_count); + if (count <= 0) + GENI_SE_ERR(mas->ipc, false, NULL, "resume usage count mismatch:%d", count); + } } if (unlikely(!mas->setup)) { int proto = get_se_proto(mas->base); @@ -857,7 +904,8 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n", mas->tx_fifo_depth, mas->rx_fifo_depth, mas->tx_fifo_width); - mas->setup = true; + if (!mas->shared_ee) + mas->setup = true; hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major, &minor, &step); if (hw_ver) @@ -886,6 +934,9 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi) struct spi_geni_master *mas = spi_master_get_devdata(spi); int count = 0; + if (mas->shared_ee) + return 0; + if (mas->shared_se) { struct se_geni_rsc *rsc; int ret = 0; @@ -908,6 +959,7 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi) pm_runtime_mark_last_busy(mas->dev); pm_runtime_put_autosuspend(mas->dev); } + return 0; } @@ -1459,6 +1511,15 @@ static int spi_geni_probe(struct platform_device *pdev) geni_mas->dis_autosuspend = of_property_read_bool(pdev->dev.of_node, "qcom,disable-autosuspend"); + /* + * This property will be set when spi is being used from + * dual Execution Environments unlike shared_se flag + * which is set if SE is in GSI mode. + */ + geni_mas->shared_ee = + of_property_read_bool(pdev->dev.of_node, + "qcom,shared_ee"); + geni_mas->phys_addr = res->start; geni_mas->size = resource_size(res); geni_mas->base = devm_ioremap(&pdev->dev, res->start, @@ -1536,14 +1597,19 @@ static int spi_geni_runtime_suspend(struct device *dev) struct spi_master *spi = get_spi_master(dev); struct spi_geni_master *geni_mas = spi_master_get_devdata(spi); + if (geni_mas->shared_ee) + goto exit_rt_suspend; + if (geni_mas->shared_se) { ret = se_geni_clks_off(&geni_mas->spi_rsc); if (ret) GENI_SE_ERR(geni_mas->ipc, false, NULL, "%s: Error %d turning off clocks\n", __func__, ret); - } else { - ret = se_geni_resources_off(&geni_mas->spi_rsc); + return ret; } + +exit_rt_suspend: + ret = se_geni_resources_off(&geni_mas->spi_rsc); return ret; } @@ -1553,14 +1619,19 @@ static int spi_geni_runtime_resume(struct device *dev) struct spi_master *spi = get_spi_master(dev); struct spi_geni_master *geni_mas = spi_master_get_devdata(spi); + if (geni_mas->shared_ee) + goto exit_rt_resume; + if (geni_mas->shared_se) { ret = se_geni_clks_on(&geni_mas->spi_rsc); if (ret) GENI_SE_ERR(geni_mas->ipc, false, NULL, "%s: Error %d turning on clocks\n", __func__, ret); - } else { - ret = se_geni_resources_on(&geni_mas->spi_rsc); + return ret; } + +exit_rt_resume: + ret = se_geni_resources_on(&geni_mas->spi_rsc); return ret; } From 0ad90ad8b845701ea07bfefabcded3442505cd91 Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Mon, 20 Apr 2020 19:19:53 +0530 Subject: [PATCH 74/90] msm: kgsl: Print always on counters in gmu core snapshot Print always on counters before collecting gmu core snapshot. This will help to sync and analyze gmu traces with dmesg. Change-Id: Icd2223662cce503f8d464592eb4f2307b519f015 Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno_a6xx_gmu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 3a20e0b7bd54..c580165ab7f9 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -1631,6 +1631,8 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device, { unsigned int val; + dev_err(device->dev, "GMU snapshot started at 0x%llx ticks\n", + a6xx_gmu_read_ao_counter(device)); a6xx_gmu_snapshot_versions(device, snapshot); a6xx_gmu_snapshot_memories(device, snapshot); From d290a1fc288fa94ce83bb6e7139b08e8936eb961 Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Mon, 27 Apr 2020 19:21:33 +0530 Subject: [PATCH 75/90] cnss2: Add code to update cnss soc info Add code to update cnss soc info from the info received as part of target capability response. Change-Id: I6b88192d772dd30a8aaeb90725141c7c38b2c50c Signed-off-by: Mohammed Siddiq --- drivers/net/wireless/cnss2/pci.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index a4a51ecfa0ee..808b1585e878 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -3373,7 +3373,14 @@ int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) info->va = pci_priv->bar; info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM); - + info->chip_id = plat_priv->chip_info.chip_id; + info->chip_family = plat_priv->chip_info.chip_family; + info->board_id = plat_priv->board_info.board_id; + info->soc_id = plat_priv->soc_info.soc_id; + info->fw_version = plat_priv->fw_version_info.fw_version; + strlcpy(info->fw_build_timestamp, + plat_priv->fw_version_info.fw_build_timestamp, + sizeof(info->fw_build_timestamp)); memcpy(&info->device_version, &plat_priv->device_version, sizeof(info->device_version)); From d941ff957049cd3f0ecb95be82039b23eba814a3 Mon Sep 17 00:00:00 2001 From: Yue Ma Date: Thu, 30 Apr 2020 11:52:43 -0700 Subject: [PATCH 76/90] cnss2: Post register driver work as unkillable event Register driver work handler will only power up device, resume PCIe link and start MHI to download firmware. It may only block there for a timeout, so there is no need to post it as killable event. Change-Id: Ib82050358dc8df4a8163ecb43702aa86ed34f23d Signed-off-by: Yue Ma --- drivers/net/wireless/cnss2/pci.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index a4a51ecfa0ee..972f65fe7f2b 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -2079,13 +2079,8 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) register_driver: ret = cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REGISTER_DRIVER, - CNSS_EVENT_SYNC_UNINTERRUPTIBLE, + CNSS_EVENT_SYNC_UNKILLABLE, driver_ops); - if (ret == -EINTR) { - cnss_pr_dbg("Register driver work is killed\n"); - del_timer(&plat_priv->fw_boot_timer); - pci_priv->driver_ops = NULL; - } return ret; } From 548ba6d90755ff46d2f61ed58ec8e871f445ff19 Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Wed, 29 Apr 2020 12:21:10 +0530 Subject: [PATCH 77/90] mfd: qcom-i2c-pmic: Reduce the stat-toggle delay Reduce the stat-toggle delays to 50ms (overall) to optimize boot time. Change-Id: I58027cb94500cf93eb42b44dcec9a5e68670b5e0 Signed-off-by: Anirudh Ghayal --- drivers/mfd/qcom-i2c-pmic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mfd/qcom-i2c-pmic.c b/drivers/mfd/qcom-i2c-pmic.c index d0ba0c5494f6..8c9c24987a57 100644 --- a/drivers/mfd/qcom-i2c-pmic.c +++ b/drivers/mfd/qcom-i2c-pmic.c @@ -552,7 +552,7 @@ static int i2c_pmic_toggle_stat(struct i2c_pmic *chip) goto exit; } - usleep_range(10000, 11000); + usleep_range(5000, 5500); rc = regmap_write(chip->regmap, chip->periph[0].addr | INT_TEST_VAL_OFFSET, @@ -570,7 +570,7 @@ static int i2c_pmic_toggle_stat(struct i2c_pmic *chip) goto exit; } - usleep_range(10000, 11000); + usleep_range(5000, 5500); } exit: regmap_write(chip->regmap, chip->periph[0].addr | INT_TEST_OFFSET, 0); From 379d3dbc9ed2714d8fb3910f12f964757c741e3d Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 3 Apr 2020 14:13:12 -0700 Subject: [PATCH 78/90] mhi: core: improve time synchronization events handling Validate event ring accesses before walking special event rings and update processing time synchronization such that a doorbell is rung and future requests are queued until a response is received. Also, ensure events are only processed when they are pending. Change-Id: Ib8c9385530f822af3e53efb96b82c0f26c3438b7 Signed-off-by: Bhaumik Bhatt --- drivers/bus/mhi/core/mhi_init.c | 2 +- drivers/bus/mhi/core/mhi_internal.h | 8 +- drivers/bus/mhi/core/mhi_main.c | 150 ++++++++++++++++------------ drivers/bus/mhi/core/mhi_pm.c | 3 + 4 files changed, 92 insertions(+), 71 deletions(-) diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index 9c8be597aba8..32d62856f8c5 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -1225,7 +1225,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, mhi_event->process_event = mhi_process_ctrl_ev_ring; break; case MHI_ER_TSYNC_ELEMENT_TYPE: - mhi_event->process_event = mhi_process_tsync_event_ring; + mhi_event->process_event = mhi_process_tsync_ev_ring; break; case MHI_ER_BW_SCALE_ELEMENT_TYPE: mhi_event->process_event = mhi_process_bw_scale_ev_ring; diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index c885d63ab8e4..c2b99e802158 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -717,8 +717,6 @@ struct mhi_chan { struct tsync_node { struct list_head node; u32 sequence; - u32 int_sequence; - u64 local_time; u64 remote_time; struct mhi_device *mhi_dev; void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, @@ -728,7 +726,9 @@ struct tsync_node { struct mhi_timesync { void __iomem *time_reg; u32 int_sequence; + u64 local_time; bool db_support; + bool db_response_pending; spinlock_t lock; /* list protection */ struct list_head head; }; @@ -787,8 +787,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); -int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 594284a45aac..59f49cdad092 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -1347,82 +1347,89 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, return count; } -int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota) +int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) { - struct mhi_tre *dev_rp, *local_rp; + struct mhi_tre *dev_rp; struct mhi_ring *ev_ring = &mhi_event->ring; struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; - int count = 0; - u32 int_sequence, unit; + u32 sequence; u64 remote_time; + int ret = 0; - if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { - MHI_LOG("No EV access, PM_STATE:%s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state)); - return -EIO; + spin_lock_bh(&mhi_event->lock); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (ev_ring->rp == dev_rp) { + spin_unlock_bh(&mhi_event->lock); + goto exit_tsync_process; } - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); - local_rp = ev_ring->rp; + /* if rp points to base, we need to wrap it around */ + if (dev_rp == ev_ring->base) + dev_rp = ev_ring->base + ev_ring->len; + dev_rp--; - while (dev_rp != local_rp) { - enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); - struct tsync_node *tsync_node; + /* fast forward to currently processed element and recycle er */ + ev_ring->rp = dev_rp; + ev_ring->wp = dev_rp - 1; + if (ev_ring->wp < ev_ring->base) + ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size; + mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring); - MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", - local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_TSYNC_EVENT, + "!TSYNC event"); - MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + sequence = MHI_TRE_GET_EV_TSYNC_SEQ(dev_rp); + remote_time = MHI_TRE_GET_EV_TIME(dev_rp); - int_sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp); - unit = MHI_TRE_GET_EV_TSYNC_UNIT(local_rp); - remote_time = MHI_TRE_GET_EV_TIME(local_rp); + MHI_VERB("Received TSYNC event with seq:0x%llx time:0x%llx\n", + sequence, remote_time); - do { - spin_lock(&mhi_tsync->lock); - tsync_node = list_first_entry_or_null(&mhi_tsync->head, - struct tsync_node, node); - if (!tsync_node) { - spin_unlock(&mhi_tsync->lock); - break; - } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_event->lock); + + mutex_lock(&mhi_cntrl->tsync_mutex); + + if (unlikely(mhi_tsync->int_sequence != sequence)) { + MHI_ASSERT(1, "Unexpected response:0x%llx Expected:0x%llx\n", + sequence, mhi_tsync->int_sequence); + mutex_unlock(&mhi_cntrl->tsync_mutex); + goto exit_tsync_process; + } + + do { + struct tsync_node *tsync_node; - list_del(&tsync_node->node); + spin_lock(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + if (!tsync_node) { spin_unlock(&mhi_tsync->lock); + break; + } - /* - * device may not able to process all time sync commands - * host issue and only process last command it receive - */ - if (tsync_node->int_sequence == int_sequence) { - tsync_node->cb_func(tsync_node->mhi_dev, - tsync_node->sequence, - tsync_node->local_time, - remote_time); - kfree(tsync_node); - } else { - kfree(tsync_node); - } - } while (true); + list_del(&tsync_node->node); + spin_unlock(&mhi_tsync->lock); - mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); - local_rp = ev_ring->rp; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); - count++; - } + tsync_node->cb_func(tsync_node->mhi_dev, + tsync_node->sequence, + mhi_tsync->local_time, remote_time); + kfree(tsync_node); + } while (true); - read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) - mhi_ring_er_db(mhi_event); - read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_tsync->db_response_pending = false; + mutex_unlock(&mhi_cntrl->tsync_mutex); +exit_tsync_process: MHI_VERB("exit er_index:%u\n", mhi_event->er_index); - return count; + return ret; } int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl, @@ -2575,7 +2582,7 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; struct tsync_node *tsync_node; - int ret; + int ret = 0; /* not all devices support all time features */ mutex_lock(&mhi_cntrl->tsync_mutex); @@ -2599,6 +2606,10 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, } read_unlock_bh(&mhi_cntrl->pm_lock); + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + /* * technically we can use GFP_KERNEL, but wants to avoid * # of times scheduling out @@ -2609,15 +2620,17 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, goto error_no_mem; } - mhi_tsync->int_sequence++; - if (mhi_tsync->int_sequence == 0xFFFFFFFF) - mhi_tsync->int_sequence = 0; - tsync_node->sequence = sequence; - tsync_node->int_sequence = mhi_tsync->int_sequence; tsync_node->cb_func = cb_func; tsync_node->mhi_dev = mhi_dev; + if (mhi_tsync->db_response_pending) + goto skip_tsync_db; + + mhi_tsync->int_sequence++; + if (mhi_tsync->int_sequence == 0xFFFFFFFF) + mhi_tsync->int_sequence = 0; + /* disable link level low power modes */ ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); if (ret) { @@ -2626,10 +2639,6 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, goto error_invalid_state; } - spin_lock(&mhi_tsync->lock); - list_add_tail(&tsync_node->node, &mhi_tsync->head); - spin_unlock(&mhi_tsync->lock); - /* * time critical code, delay between these two steps should be * deterministic as possible. @@ -2637,9 +2646,9 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, preempt_disable(); local_irq_disable(); - tsync_node->local_time = + mhi_tsync->local_time = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); - writel_relaxed_no_log(tsync_node->int_sequence, mhi_cntrl->tsync_db); + writel_relaxed_no_log(mhi_tsync->int_sequence, mhi_cntrl->tsync_db); /* write must go thru immediately */ wmb(); @@ -2648,6 +2657,15 @@ int mhi_get_remote_time(struct mhi_device *mhi_dev, mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + MHI_VERB("time DB request with seq:0x%llx\n", mhi_tsync->int_sequence); + + mhi_tsync->db_response_pending = true; + +skip_tsync_db: + spin_lock(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock(&mhi_tsync->lock); + ret = 0; error_invalid_state: diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index 703dcbad8f87..a4e63bc9456b 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -824,6 +824,9 @@ void mhi_special_purpose_work(struct work_struct *work) TO_MHI_STATE_STR(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return; + /* check special purpose event rings and process events */ list_for_each_entry(mhi_event, &mhi_cntrl->sp_ev_rings, node) mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); From 04edeb48d6997f61cd9e88ab02ee4cc3f2452921 Mon Sep 17 00:00:00 2001 From: Ashok Vuyyuru Date: Wed, 8 Apr 2020 18:07:45 +0530 Subject: [PATCH 79/90] msm: ipa4: Fix to exit from tasklet after processing max packet If TX packets processing continuously in tasklet other softIrqs are not able to run on that core which is leading to watchdog bark. For avoiding these scenarios exit from tasklet after reaching max TX limit. Change-Id: Ia5a8f20776b5cfdcb296311d72ffc48eda4970cf Signed-off-by: Ashok Vuyyuru --- drivers/platform/msm/ipa/ipa_v3/ipa_dp.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index e897ae2ff6df..4fe58bd69ca3 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -87,6 +87,8 @@ #define IPA_QMAP_ID_BYTE 0 +#define IPA_TX_MAX_DESC (20) + static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags); static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys); static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys); @@ -212,6 +214,7 @@ static void ipa3_tasklet_write_done(unsigned long data) struct ipa3_sys_context *sys; struct ipa3_tx_pkt_wrapper *this_pkt; bool xmit_done = false; + unsigned int max_tx_pkt = 0; sys = (struct ipa3_sys_context *)data; spin_lock_bh(&sys->spinlock); @@ -223,9 +226,17 @@ static void ipa3_tasklet_write_done(unsigned long data) spin_unlock_bh(&sys->spinlock); ipa3_wq_write_done_common(sys, this_pkt); spin_lock_bh(&sys->spinlock); + max_tx_pkt++; if (xmit_done) break; } + /* If TX packets processing continuously in tasklet other + * softirqs are not able to run on that core which is leading + * to watchdog bark. For avoiding these scenarios exit from + * tasklet after reaching max limit. + */ + if (max_tx_pkt == IPA_TX_MAX_DESC) + break; } spin_unlock_bh(&sys->spinlock); } From 704c7a2414658fb683452710c696b3e620ef7198 Mon Sep 17 00:00:00 2001 From: Mohammed Siddiq Date: Fri, 17 Apr 2020 15:27:57 +0530 Subject: [PATCH 80/90] icnss2: Add code to send exit power save over qmi Add code to send exit power save over qmi during wow exit. Change-Id: I926e85c17e16341446e56d8145de7333ac332aab Signed-off-by: Mohammed Siddiq --- drivers/soc/qcom/icnss2/main.c | 8 ++++ drivers/soc/qcom/icnss2/main.h | 3 ++ drivers/soc/qcom/icnss2/qmi.c | 73 ++++++++++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/qmi.h | 6 +++ 4 files changed, 90 insertions(+) diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index 27c0484393f2..7836d52e9c7a 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -2763,6 +2763,14 @@ static int icnss_pm_resume(struct device *dev) !test_bit(ICNSS_DRIVER_PROBED, &priv->state)) goto out; + if (priv->device_id == WCN6750_DEVICE_ID) { + ret = wlfw_exit_power_save_send_msg(priv); + if (ret) { + priv->stats.pm_resume_err++; + return ret; + } + } + ret = priv->ops->pm_resume(dev); out: diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index f90ef2cf1011..c4311353c9e8 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -207,6 +207,9 @@ struct icnss_stats { uint32_t device_info_req; uint32_t device_info_resp; uint32_t device_info_err; + u32 exit_power_save_req; + u32 exit_power_save_resp; + u32 exit_power_save_err; }; #define WLFW_MAX_TIMESTAMP_LEN 32 diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c index 1ed8df0efc37..21ccbe81888e 100644 --- a/drivers/soc/qcom/icnss2/qmi.c +++ b/drivers/soc/qcom/icnss2/qmi.c @@ -338,6 +338,79 @@ int wlfw_device_info_send_msg(struct icnss_priv *priv) return ret; } +int wlfw_exit_power_save_send_msg(struct icnss_priv *priv) +{ + int ret; + struct wlfw_exit_power_save_req_msg_v01 *req; + struct wlfw_exit_power_save_resp_msg_v01 *resp; + struct qmi_txn txn; + + if (!priv) + return -ENODEV; + + if (test_bit(ICNSS_FW_DOWN, &priv->state)) + return -EINVAL; + + icnss_pr_dbg("Sending exit power save, state: 0x%lx\n", + priv->state); + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + priv->stats.exit_power_save_req++; + + ret = qmi_txn_init(&priv->qmi, &txn, + wlfw_exit_power_save_resp_msg_v01_ei, resp); + if (ret < 0) { + icnss_qmi_fatal_err("Fail to init txn for exit power save%d\n", + ret); + goto out; + } + + ret = qmi_send_request(&priv->qmi, NULL, &txn, + QMI_WLFW_EXIT_POWER_SAVE_REQ_V01, + WLFW_EXIT_POWER_SAVE_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_exit_power_save_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + icnss_qmi_fatal_err("Fail to send exit power save req %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, priv->ctrl_params.qmi_timeout); + if (ret < 0) { + icnss_qmi_fatal_err("Exit power save wait failed with ret %d\n", + ret); + goto out; + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + icnss_qmi_fatal_err( + "QMI exit power save request rejected,result:%d error:%d\n", + resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + goto out; + } + + priv->stats.exit_power_save_resp++; + + kfree(resp); + kfree(req); + return 0; + +out: + kfree(resp); + kfree(req); + priv->stats.exit_power_save_err++; + return ret; +} + int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) { int ret; diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h index 9a053d9b7f9d..b088d966cefa 100644 --- a/drivers/soc/qcom/icnss2/qmi.h +++ b/drivers/soc/qcom/icnss2/qmi.h @@ -128,6 +128,11 @@ int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv) { return 0; } + +int wlfw_exit_power_save_send_msg(struct icnss_priv *priv) +{ + return 0; +} #else int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv); int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data); @@ -163,6 +168,7 @@ int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, enum wlfw_driver_mode_enum_v01 mode); int icnss_wlfw_bdf_dnld_send_sync(struct icnss_priv *priv, u32 bdf_type); int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv); +int wlfw_exit_power_save_send_msg(struct icnss_priv *priv); #endif #endif /* __ICNSS_QMI_H__*/ From 14fbbc7c4247b593a5d3f5a0bbf0eb68c3c74d6b Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Wed, 15 Apr 2020 13:32:54 +0530 Subject: [PATCH 81/90] usb: phy: qusb2: Do not perform phy reset if EUD is enabled If EUD is enabled and phy reset is performed due to spoof connect, EUD enumeration fails. Fix this issue by reading EUD enable register before phy init. Check the status in and qusb_phy_dpdm_regulator_enable() which also performs phy reset. Change-Id: Ia2321d8608842036b9048767d72b485340b2c6ef Signed-off-by: Pratham Pratap --- drivers/usb/phy/phy-msm-qusb-v2.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index b113be2785be..91b953fb05b0 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -96,6 +96,7 @@ struct qusb_phy { void __iomem *base; void __iomem *efuse_reg; void __iomem *refgen_north_bg_reg; + void __iomem *eud_enable_reg; struct clk *ref_clk_src; struct clk *ref_clk; @@ -518,6 +519,11 @@ static int qusb_phy_init(struct usb_phy *phy) dev_dbg(phy->dev, "%s\n", __func__); + if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) { + dev_err(qphy->phy.dev, "eud is enabled\n"); + return 0; + } + qusb_phy_reset(qphy); if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE) { @@ -825,6 +831,11 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev) dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n", __func__, qphy->dpdm_enable); + if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) { + dev_err(qphy->phy.dev, "eud is enabled\n"); + return 0; + } + if (!qphy->dpdm_enable) { ret = qusb_phy_enable_power(qphy); if (ret < 0) { @@ -1000,6 +1011,16 @@ static int qusb_phy_probe(struct platform_device *pdev) qphy->refgen_north_bg_reg = devm_ioremap(dev, res->start, resource_size(res)); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "eud_enable_reg"); + if (res) { + qphy->eud_enable_reg = devm_ioremap_resource(dev, res); + if (IS_ERR(qphy->eud_enable_reg)) { + dev_err(dev, "err getting eud_enable_reg address\n"); + return PTR_ERR(qphy->eud_enable_reg); + } + } + /* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */ qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src"); if (IS_ERR(qphy->ref_clk_src)) { From 6d76c8cfc60d6008ff0a80633e60154f29c0c8b2 Mon Sep 17 00:00:00 2001 From: Naman Padhiar Date: Fri, 17 Apr 2020 21:50:20 +0530 Subject: [PATCH 82/90] icnss2: Add get info QMI messages support These QMI messages are used to get certain info from WLAN firmware. Results of the info request will be passed back to WLAN driver to do further processing. These QMI messages can be used to WMI command over QMI and get WMI event as QMI Indication and send it to WLAN driver. Change-Id: I8a7b513d95e9f10c815445b8ebd78f81607fb910 Signed-off-by: Naman Padhiar --- drivers/soc/qcom/icnss2/main.c | 26 ++++++++ drivers/soc/qcom/icnss2/main.h | 2 + drivers/soc/qcom/icnss2/qmi.c | 110 +++++++++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/qmi.h | 8 +++ include/soc/qcom/icnss2.h | 3 + 5 files changed, 149 insertions(+) diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index 7836d52e9c7a..b869497d911c 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -1613,6 +1613,32 @@ static int icnss_enable_recovery(struct icnss_priv *priv) return 0; } +int icnss_qmi_send(struct device *dev, int type, void *cmd, + int cmd_len, void *cb_ctx, + int (*cb)(void *ctx, void *event, int event_len)) +{ + struct icnss_priv *priv = icnss_get_plat_priv(); + int ret; + + if (!priv) + return -ENODEV; + + if (!test_bit(ICNSS_WLFW_CONNECTED, &priv->state)) + return -EINVAL; + + priv->get_info_cb = cb; + priv->get_info_cb_ctx = cb_ctx; + + ret = icnss_wlfw_get_info_send_sync(priv, type, cmd, cmd_len); + if (ret) { + priv->get_info_cb = NULL; + priv->get_info_cb_ctx = NULL; + } + + return ret; +} +EXPORT_SYMBOL(icnss_qmi_send); + int __icnss_register_driver(struct icnss_driver_ops *ops, struct module *owner, const char *mod_name) { diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index c4311353c9e8..cd5d6dd244f3 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -340,6 +340,8 @@ struct icnss_priv { atomic_t is_shutdown; u32 qdss_mem_seg_len; struct icnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG]; + void *get_info_cb_ctx; + int (*get_info_cb)(void *ctx, void *event, int event_len); }; struct icnss_reg_info { diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c index 21ccbe81888e..9a371e9dc834 100644 --- a/drivers/soc/qcom/icnss2/qmi.c +++ b/drivers/soc/qcom/icnss2/qmi.c @@ -460,6 +460,8 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) req->qdss_trace_save_enable = 1; req->qdss_trace_free_enable_valid = 1; req->qdss_trace_free_enable = 1; + req->respond_get_info_enable_valid = 1; + req->respond_get_info_enable = 1; } priv->stats.ind_register_req++; @@ -1712,6 +1714,32 @@ static void wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi, icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE, 0, NULL); } + +static void icnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi); + const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data; + + icnss_pr_vdbg("Received QMI WLFW respond get info indication\n"); + + if (!txn) { + icnss_pr_err("Spurious indication\n"); + return; + } + + icnss_pr_vdbg("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n", + ind_msg->data_len, ind_msg->type, + ind_msg->is_last, ind_msg->seq_no); + + if (priv->get_info_cb_ctx && priv->get_info_cb) + priv->get_info_cb(priv->get_info_cb_ctx, + (void *)ind_msg->data, + ind_msg->data_len); +} + static struct qmi_msg_handler wlfw_msg_handlers[] = { { .type = QMI_INDICATION, @@ -1780,6 +1808,14 @@ static struct qmi_msg_handler wlfw_msg_handlers[] = { sizeof(struct wlfw_qdss_trace_free_ind_msg_v01), .fn = wlfw_qdss_trace_free_ind_cb }, + { + .type = QMI_INDICATION, + .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01, + .ei = wlfw_respond_get_info_ind_msg_v01_ei, + .decoded_size = + sizeof(struct wlfw_respond_get_info_ind_msg_v01), + .fn = icnss_wlfw_respond_get_info_ind_cb + }, {} }; @@ -2141,3 +2177,77 @@ int wlfw_host_cap_send_sync(struct icnss_priv *priv) kfree(resp); return ret; } + +int icnss_wlfw_get_info_send_sync(struct icnss_priv *plat_priv, int type, + void *cmd, int cmd_len) +{ + struct wlfw_get_info_req_msg_v01 *req; + struct wlfw_get_info_resp_msg_v01 *resp; + struct qmi_txn txn; + int ret = 0; + + icnss_pr_dbg("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n", + type, cmd_len, plat_priv->state); + + if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01) + return -EINVAL; + + if (test_bit(ICNSS_FW_DOWN, &priv->state)) + return -EINVAL; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + req->type = type; + req->data_len = cmd_len; + memcpy(req->data, cmd, req->data_len); + + ret = qmi_txn_init(&plat_priv->qmi, &txn, + wlfw_get_info_resp_msg_v01_ei, resp); + if (ret < 0) { + icnss_pr_err("Failed to initialize txn for get info request, err: %d\n", + ret); + goto out; + } + + ret = qmi_send_request(&plat_priv->qmi, NULL, &txn, + QMI_WLFW_GET_INFO_REQ_V01, + WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_get_info_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + icnss_pr_err("Failed to send get info request, err: %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout); + if (ret < 0) { + icnss_pr_err("Failed to wait for response of get info request, err: %d\n", + ret); + goto out; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + icnss_pr_err("Get info request failed, result: %d, err: %d\n", + resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + goto out; + } + + kfree(req); + kfree(resp); + return 0; + +out: + kfree(req); + kfree(resp); + return ret; +} diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h index b088d966cefa..c4c42cec5d4c 100644 --- a/drivers/soc/qcom/icnss2/qmi.h +++ b/drivers/soc/qcom/icnss2/qmi.h @@ -133,6 +133,12 @@ int wlfw_exit_power_save_send_msg(struct icnss_priv *priv) { return 0; } + +int icnss_wlfw_get_info_send_sync(struct icnss_priv *priv, int type, + void *cmd, int cmd_len) +{ + return 0; +} #else int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv); int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data); @@ -169,6 +175,8 @@ int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, int icnss_wlfw_bdf_dnld_send_sync(struct icnss_priv *priv, u32 bdf_type); int wlfw_qdss_trace_mem_info_send_sync(struct icnss_priv *priv); int wlfw_exit_power_save_send_msg(struct icnss_priv *priv); +int icnss_wlfw_get_info_send_sync(struct icnss_priv *priv, int type, + void *cmd, int cmd_len); #endif #endif /* __ICNSS_QMI_H__*/ diff --git a/include/soc/qcom/icnss2.h b/include/soc/qcom/icnss2.h index fca498fe20b7..64128de92986 100644 --- a/include/soc/qcom/icnss2.h +++ b/include/soc/qcom/icnss2.h @@ -164,4 +164,7 @@ extern int icnss_get_user_msi_assignment(struct device *dev, char *user_name, extern int icnss_get_msi_irq(struct device *dev, unsigned int vector); extern void icnss_get_msi_address(struct device *dev, u32 *msi_addr_low, u32 *msi_addr_high); +extern int icnss_qmi_send(struct device *dev, int type, void *cmd, + int cmd_len, void *cb_ctx, + int (*cb)(void *ctx, void *event, int event_len)); #endif /* _ICNSS_WLAN_H_ */ From 78db636fb3b05570155ac0e64c087b7f9f56c40f Mon Sep 17 00:00:00 2001 From: Naman Padhiar Date: Sat, 11 Apr 2020 20:06:18 +0530 Subject: [PATCH 83/90] icnss2: Correct bdf file name formation ICNSS2 platform driver form bdf file name based on board id and download bdf data to FW. Correct the formation of file name based on board id. Change-Id: I3a101d04f19199b10f9fa6d869f3f436eb88b4b2 Signed-off-by: Naman Padhiar --- drivers/soc/qcom/icnss2/qmi.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c index 21ccbe81888e..e65ce87b6d43 100644 --- a/drivers/soc/qcom/icnss2/qmi.c +++ b/drivers/soc/qcom/icnss2/qmi.c @@ -36,7 +36,9 @@ #define MAX_BDF_FILE_NAME 13 #define BDF_FILE_NAME_PREFIX "bdwlan" #define ELF_BDF_FILE_NAME "bdwlan.elf" +#define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e" #define BIN_BDF_FILE_NAME "bdwlan.bin" +#define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b" #define REGDB_FILE_NAME "regdb.bin" #define DUMMY_BDF_FILE_NAME "bdwlan.dmy" @@ -628,24 +630,26 @@ static int icnss_get_bdf_file_name(struct icnss_priv *priv, snprintf(filename, filename_len, ELF_BDF_FILE_NAME); else if (priv->board_id < 0xFF) snprintf(filename, filename_len, - BDF_FILE_NAME_PREFIX "e%02x", + ELF_BDF_FILE_NAME_PREFIX "%02x", priv->board_id); else snprintf(filename, filename_len, - BDF_FILE_NAME_PREFIX "%03x", - priv->board_id); + BDF_FILE_NAME_PREFIX "%02x.e%02x", + priv->board_id >> 8 & 0xFF, + priv->board_id & 0xFF); break; case ICNSS_BDF_BIN: if (priv->board_id == 0xFF) snprintf(filename, filename_len, BIN_BDF_FILE_NAME); else if (priv->board_id < 0xFF) snprintf(filename, filename_len, - BDF_FILE_NAME_PREFIX "b%02x", + BIN_BDF_FILE_NAME_PREFIX "%02x", priv->board_id); else snprintf(filename, filename_len, - BDF_FILE_NAME_PREFIX "%03x", - priv->board_id); + BDF_FILE_NAME_PREFIX "%02x.b%02x", + priv->board_id >> 8 & 0xFF, + priv->board_id & 0xFF); break; case ICNSS_BDF_REGDB: snprintf(filename, filename_len, REGDB_FILE_NAME); From e28cee8d25cf2a52584e81cefa1e4e214f6cd2dd Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 21:01:28 -0600 Subject: [PATCH 84/90] msm: kgsl: Take GMU snapshot on GMU failures GMU can fail to turn on GX or it can fail the START HFI. So take snapshot and put GMU in NMI. When this happens, kgsl will call gmu_stop() and as GMU is already in fault aka NMI, reset the GMU and GPU. Change-Id: Iafc9b34063a7ff2415d3462dd289b52e425fbf3b Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno.c | 33 ++++++++++++++++++--------------- drivers/gpu/msm/kgsl_gmu.c | 11 +++++++++++ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 1a28f59dd8ea..0824ae79ed70 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -2042,12 +2042,16 @@ static int _adreno_start(struct adreno_device *adreno_dev) /* Send OOB request to turn on the GX */ status = gmu_core_dev_oob_set(device, oob_gpu); - if (status) + if (status) { + gmu_core_snapshot(device); goto error_mmu_off; + } status = gmu_core_dev_hfi_start_msg(device); - if (status) + if (status) { + gmu_core_snapshot(device); goto error_oob_clear; + } _set_secvid(device); @@ -2297,26 +2301,15 @@ static int adreno_stop(struct kgsl_device *device) error = gmu_core_dev_oob_set(device, oob_gpu); if (error) { gmu_core_dev_oob_clear(device, oob_gpu); - - if (gmu_core_regulator_isenabled(device)) { - /* GPU is on. Try recovery */ - set_bit(GMU_FAULT, &device->gmu_core.flags); gmu_core_snapshot(device); error = -EINVAL; - } + goto no_gx_power; } - adreno_dispatcher_stop(adreno_dev); - - adreno_ringbuffer_stop(adreno_dev); - kgsl_pwrscale_update_stats(device); adreno_irqctrl(adreno_dev, 0); - adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice); - adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice); - /* Save active coresight registers if applicable */ adreno_coresight_stop(adreno_dev); @@ -2334,7 +2327,6 @@ static int adreno_stop(struct kgsl_device *device) */ if (!error && gmu_core_dev_wait_for_lowest_idle(device)) { - set_bit(GMU_FAULT, &device->gmu_core.flags); gmu_core_snapshot(device); /* * Assume GMU hang after 10ms without responding. @@ -2347,6 +2339,17 @@ static int adreno_stop(struct kgsl_device *device) adreno_clear_pending_transactions(device); +no_gx_power: + adreno_dispatcher_stop(adreno_dev); + + adreno_ringbuffer_stop(adreno_dev); + + if (!IS_ERR_OR_NULL(adreno_dev->gpu_llc_slice)) + llcc_slice_deactivate(adreno_dev->gpu_llc_slice); + + if (!IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice)) + llcc_slice_deactivate(adreno_dev->gpuhtw_llc_slice); + /* * The halt is not cleared in the above function if we have GBIF. * Clear it here if GMU is enabled as GMU stop needs access to diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 1dc5b11e9b42..c763a6efe73f 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1680,6 +1680,12 @@ static void gmu_stop(struct kgsl_device *device) if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags)) return; + /* Force suspend if gmu is already in fault */ + if (test_bit(GMU_FAULT, &device->gmu_core.flags)) { + gmu_core_suspend(device); + return; + } + /* Wait for the lowest idle level we requested */ if (gmu_core_dev_wait_for_lowest_idle(device)) goto error; @@ -1713,6 +1719,11 @@ static void gmu_stop(struct kgsl_device *device) set_bit(GMU_FAULT, &device->gmu_core.flags); dev_err(&gmu->pdev->dev, "Failed to stop GMU\n"); gmu_core_snapshot(device); + /* + * We failed to stop the gmu successfully. Force a suspend + * to set things up for a fresh start. + */ + gmu_core_suspend(device); } static void gmu_remove(struct kgsl_device *device) From c68c41e0e3124953cd49cf5245339b5cdeec8dff Mon Sep 17 00:00:00 2001 From: Ashish Chavan Date: Fri, 1 May 2020 10:34:08 +0530 Subject: [PATCH 85/90] power: smb1398: Fix SOC based SMB enable condition The existing logic allows SMB's to enable even if the SOC is above the cutoff-threshold. Fix it by negating this logic. Change-Id: I294874754e79557f12b76c383a6e79d90008acc5 Signed-off-by: Ashish Chavan --- drivers/power/supply/qcom/smb1398-charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/qcom/smb1398-charger.c b/drivers/power/supply/qcom/smb1398-charger.c index 3d6c3ae2a6a7..2c2e339ee7ba 100644 --- a/drivers/power/supply/qcom/smb1398-charger.c +++ b/drivers/power/supply/qcom/smb1398-charger.c @@ -1624,7 +1624,7 @@ static void smb1398_status_change_work(struct work_struct *work) * valid due to the battery discharging later, remove * vote from CUTOFF_SOC_VOTER. */ - if (is_cutoff_soc_reached(chip)) + if (!is_cutoff_soc_reached(chip)) vote(chip->div2_cp_disable_votable, CUTOFF_SOC_VOTER, false, 0); rc = power_supply_get_property(chip->usb_psy, From 062908b3cf5cf125da58b5b1ec5da051789c11a9 Mon Sep 17 00:00:00 2001 From: Pankaj Gupta Date: Mon, 20 Apr 2020 17:47:37 +0530 Subject: [PATCH 86/90] msm: kgsl: Fix snapshot collection after preempt trigger failure Do not set FAULT_SKIP_SNAPSHOT in case of not collecting inline snapshot. Change-Id: I5f3de5881768ab07de989010096a58bea749c680 Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno_a6xx_preempt.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index 065699886f81..434cd525578e 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -389,10 +389,14 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) return; err: /* If fenced write fails, take inline snapshot and trigger recovery */ - if (!in_interrupt()) + if (!in_interrupt()) { gmu_core_snapshot(device); + adreno_set_gpu_fault(adreno_dev, + ADRENO_GMU_FAULT_SKIP_SNAPSHOT); + } else { + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + } adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT_SKIP_SNAPSHOT); adreno_dispatcher_schedule(device); /* Clear the keep alive */ if (gmu_core_isenabled(device)) From 06c077e19cfe9437032ac6026987ef2e7096c4bb Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 8 Aug 2019 21:15:30 -0600 Subject: [PATCH 87/90] msm: kgsl: Restart a6xx gpu only once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, as part of fault recovery, we try to restart the a6xx gpu five times. This loop is for legacy reasons for a hardware condition that existed on a4xx. That doesn’t exist for a6xx so remove the loop. Change-Id: Ib6f7437bab2b424c4f632efca459ff8c3fd064e4 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno_a6xx.c | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index f0cbf2608aac..41ab0f7d957e 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1103,8 +1103,7 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev) static int a6xx_reset(struct kgsl_device *device, int fault) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - int ret = -EINVAL; - int i = 0; + int ret; /* Use the regular reset sequence for No GMU */ if (!gmu_core_isenabled(device)) @@ -1116,33 +1115,20 @@ static int a6xx_reset(struct kgsl_device *device, int fault) /* since device is officially off now clear start bit */ clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv); - /* Keep trying to start the device until it works */ - for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) { - ret = adreno_start(device, 0); - if (!ret) - break; - - msleep(20); - } - + ret = adreno_start(device, 0); if (ret) return ret; - if (i != 0) - dev_warn(device->dev, - "Device hard reset tried %d tries\n", i); + kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE); /* - * If active_cnt is non-zero then the system was active before - * going into a reset - put it back in that state + * If active_cnt is zero, there is no need to keep the GPU active. So, + * we should transition to SLUMBER. */ + if (!atomic_read(&device->active_cnt)) + kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER); - if (atomic_read(&device->active_cnt)) - kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE); - else - kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP); - - return ret; + return 0; } static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit) From 458eea1748912628f139d8987f33d6bf4a4a200d Mon Sep 17 00:00:00 2001 From: Mao Jinlong Date: Fri, 8 May 2020 10:44:44 +0800 Subject: [PATCH 88/90] qdss_bridge: Add status check in usb_notifier Add status check in usb_notifier to avoid the exceptions. Change-Id: I6f9f2fc66c0b26329153783dde2969ef60edc20a Signed-off-by: Mao Jinlong --- drivers/soc/qcom/qdss_bridge.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index 4d15790f486f..92af4dcb6007 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -449,8 +449,12 @@ static void usb_notifier(void *priv, unsigned int event, { struct qdss_bridge_drvdata *drvdata = priv; - if (!drvdata) + if (!drvdata || drvdata->mode != MHI_TRANSFER_TYPE_USB + || drvdata->opened == DISABLE) { + pr_err_ratelimited("%s can't be called in invalid status.\n", + __func__); return; + } switch (event) { case USB_QDSS_CONNECT: From 37ff6dd39c3e1d533db93415c867570ff724a2a9 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Wed, 15 Apr 2020 14:35:52 +0530 Subject: [PATCH 89/90] usb: f_qdss: Setting channel to NULL in qdss_close When qdss_close is executed before disconnect_work there is possibility that from disconnect_work channel priv would be passed as NULL to the notifier resulting in page fault error. Fix this by setting notify to NULL on qdss_close. Change-Id: Ifdfc43733221fdc1dc3bcd8a40f32e1b021398a8 Signed-off-by: Udipto Goswami --- drivers/usb/gadget/function/f_qdss.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index dff1b5d45489..d3e345e2585f 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -944,6 +944,7 @@ void usb_qdss_close(struct usb_qdss_ch *ch) usb_qdss_free_req(ch); close: ch->priv_usb = NULL; + ch->notify = NULL; if (!qdss || !qdss->usb_connected || !strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) { ch->app_conn = 0; From f948f51813f1398af774f83767271ba17ea8be9e Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Thu, 14 May 2020 21:33:33 +0530 Subject: [PATCH 90/90] power: smb5-lib: Use the unlocked version votable for FV Float voltage is read in the batt_health() callback. batt_health() is called from batt_status() and batt_status() is also called from the FV callback leading to a deadlock. Fix it by using the unlocked version of votable while reading FV. Change-Id: Ie6f9f683c048a2a1c64d46716433bcff359dc3b0 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/qcom/smb5-lib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 9cdd1e6522ee..7d0ec5b1e985 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -2177,7 +2177,8 @@ int smblib_get_prop_batt_health(struct smb_charger *chg, * If Vbatt is within 40mV above Vfloat, then don't * treat it as overvoltage. */ - effective_fv_uv = get_effective_result(chg->fv_votable); + effective_fv_uv = get_effective_result_locked( + chg->fv_votable); if (pval.intval >= effective_fv_uv + 40000) { val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",