--- /dev/null
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_DEBUGV8_H
+#define __ASM_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for aarch 64 bit */
+#define dbg_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for aarch 64 bit */
+#define dbg_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for aarch 64 bit */
+#define dbg_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(reg) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(reg) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, "#reg : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, reg) \
+({ \
+asm volatile("msr "#reg", %0" : : "r" (val)); \
+})
+
+/*
+ * Debug Feature Register
+ *
+ * Read only
+ */
+#define RSYSQ_ID_AA64DFR0_EL1() MRSQ(ID_AA64DFR0_EL1)
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv8
+ *
+ * Read only
+ * MDCCSR_EL0, MDRAR_EL1, OSLSR_EL1, DBGDTRRX_EL0, DBGAUTHSTATUS_EL1
+ *
+ * Write only
+ * DBGDTRTX_EL0, OSLAR_EL1
+ */
+/* 32 bit registers */
+#define RSYSL_DBGDTRRX_EL0() MRSL(DBGDTRRX_EL0)
+#define RSYSL_MDCCSR_EL0() MRSL(MDCCSR_EL0)
+#define RSYSL_MDSCR_EL1() MRSL(MDSCR_EL1)
+#define RSYSL_OSDTRRX_EL1() MRSL(OSDTRRX_EL1)
+#define RSYSL_OSDTRTX_EL1() MRSL(OSDTRTX_EL1)
+#define RSYSL_OSDLR_EL1() MRSL(OSDLR_EL1)
+#define RSYSL_OSLSR_EL1() MRSL(OSLSR_EL1)
+#define RSYSL_MDCCINT_EL1() MRSL(MDCCINT_EL1)
+#define RSYSL_OSECCR_EL1() MRSL(OSECCR_EL1)
+#define RSYSL_DBGPRCR_EL1() MRSL(DBGPRCR_EL1)
+#define RSYSL_DBGBCR0_EL1() MRSL(DBGBCR0_EL1)
+#define RSYSL_DBGBCR1_EL1() MRSL(DBGBCR1_EL1)
+#define RSYSL_DBGBCR2_EL1() MRSL(DBGBCR2_EL1)
+#define RSYSL_DBGBCR3_EL1() MRSL(DBGBCR3_EL1)
+#define RSYSL_DBGBCR4_EL1() MRSL(DBGBCR4_EL1)
+#define RSYSL_DBGBCR5_EL1() MRSL(DBGBCR5_EL1)
+#define RSYSL_DBGBCR6_EL1() MRSL(DBGBCR6_EL1)
+#define RSYSL_DBGBCR7_EL1() MRSL(DBGBCR7_EL1)
+#define RSYSL_DBGBCR8_EL1() MRSL(DBGBCR8_EL1)
+#define RSYSL_DBGBCR9_EL1() MRSL(DBGBCR9_EL1)
+#define RSYSL_DBGBCR10_EL1() MRSL(DBGBCR10_EL1)
+#define RSYSL_DBGBCR11_EL1() MRSL(DBGBCR11_EL1)
+#define RSYSL_DBGBCR12_EL1() MRSL(DBGBCR12_EL1)
+#define RSYSL_DBGBCR13_EL1() MRSL(DBGBCR13_EL1)
+#define RSYSL_DBGBCR14_EL1() MRSL(DBGBCR14_EL1)
+#define RSYSL_DBGBCR15_EL1() MRSL(DBGBCR15_EL1)
+#define RSYSL_DBGWCR0_EL1() MRSL(DBGWCR0_EL1)
+#define RSYSL_DBGWCR1_EL1() MRSL(DBGWCR1_EL1)
+#define RSYSL_DBGWCR2_EL1() MRSL(DBGWCR2_EL1)
+#define RSYSL_DBGWCR3_EL1() MRSL(DBGWCR3_EL1)
+#define RSYSL_DBGWCR4_EL1() MRSL(DBGWCR4_EL1)
+#define RSYSL_DBGWCR5_EL1() MRSL(DBGWCR5_EL1)
+#define RSYSL_DBGWCR6_EL1() MRSL(DBGWCR6_EL1)
+#define RSYSL_DBGWCR7_EL1() MRSL(DBGWCR7_EL1)
+#define RSYSL_DBGWCR8_EL1() MRSL(DBGWCR8_EL1)
+#define RSYSL_DBGWCR9_EL1() MRSL(DBGWCR9_EL1)
+#define RSYSL_DBGWCR10_EL1() MRSL(DBGWCR10_EL1)
+#define RSYSL_DBGWCR11_EL1() MRSL(DBGWCR11_EL1)
+#define RSYSL_DBGWCR12_EL1() MRSL(DBGWCR12_EL1)
+#define RSYSL_DBGWCR13_EL1() MRSL(DBGWCR13_EL1)
+#define RSYSL_DBGWCR14_EL1() MRSL(DBGWCR14_EL1)
+#define RSYSL_DBGWCR15_EL1() MRSL(DBGWCR15_EL1)
+#define RSYSL_DBGCLAIMSET_EL1() MRSL(DBGCLAIMSET_EL1)
+#define RSYSL_DBGCLAIMCLR_EL1() MRSL(DBGCLAIMCLR_EL1)
+#define RSYSL_DBGAUTHSTATUS_EL1() MRSL(DBGAUTHSTATUS_EL1)
+#define RSYSL_DBGVCR32_EL2() MRSL(DBGVCR32_EL2)
+#define RSYSL_MDCR_EL2() MRSL(MDCR_EL2)
+#define RSYSL_MDCR_EL3() MRSL(MDCR_EL3)
+/* 64 bit registers */
+#define RSYSQ_DBGDTR_EL0() MRSQ(DBGDTR_EL0)
+#define RSYSQ_MDRAR_EL1() MRSQ(MDRAR_EL1)
+#define RSYSQ_DBGBVR0_EL1() MRSQ(DBGBVR0_EL1)
+#define RSYSQ_DBGBVR1_EL1() MRSQ(DBGBVR1_EL1)
+#define RSYSQ_DBGBVR2_EL1() MRSQ(DBGBVR2_EL1)
+#define RSYSQ_DBGBVR3_EL1() MRSQ(DBGBVR3_EL1)
+#define RSYSQ_DBGBVR4_EL1() MRSQ(DBGBVR4_EL1)
+#define RSYSQ_DBGBVR5_EL1() MRSQ(DBGBVR5_EL1)
+#define RSYSQ_DBGBVR6_EL1() MRSQ(DBGBVR6_EL1)
+#define RSYSQ_DBGBVR7_EL1() MRSQ(DBGBVR7_EL1)
+#define RSYSQ_DBGBVR8_EL1() MRSQ(DBGBVR8_EL1)
+#define RSYSQ_DBGBVR9_EL1() MRSQ(DBGBVR9_EL1)
+#define RSYSQ_DBGBVR10_EL1() MRSQ(DBGBVR10_EL1)
+#define RSYSQ_DBGBVR11_EL1() MRSQ(DBGBVR11_EL1)
+#define RSYSQ_DBGBVR12_EL1() MRSQ(DBGBVR12_EL1)
+#define RSYSQ_DBGBVR13_EL1() MRSQ(DBGBVR13_EL1)
+#define RSYSQ_DBGBVR14_EL1() MRSQ(DBGBVR14_EL1)
+#define RSYSQ_DBGBVR15_EL1() MRSQ(DBGBVR15_EL1)
+#define RSYSQ_DBGWVR0_EL1() MRSQ(DBGWVR0_EL1)
+#define RSYSQ_DBGWVR1_EL1() MRSQ(DBGWVR1_EL1)
+#define RSYSQ_DBGWVR2_EL1() MRSQ(DBGWVR2_EL1)
+#define RSYSQ_DBGWVR3_EL1() MRSQ(DBGWVR3_EL1)
+#define RSYSQ_DBGWVR4_EL1() MRSQ(DBGWVR4_EL1)
+#define RSYSQ_DBGWVR5_EL1() MRSQ(DBGWVR5_EL1)
+#define RSYSQ_DBGWVR6_EL1() MRSQ(DBGWVR6_EL1)
+#define RSYSQ_DBGWVR7_EL1() MRSQ(DBGWVR7_EL1)
+#define RSYSQ_DBGWVR8_EL1() MRSQ(DBGWVR8_EL1)
+#define RSYSQ_DBGWVR9_EL1() MRSQ(DBGWVR9_EL1)
+#define RSYSQ_DBGWVR10_EL1() MRSQ(DBGWVR10_EL1)
+#define RSYSQ_DBGWVR11_EL1() MRSQ(DBGWVR11_EL1)
+#define RSYSQ_DBGWVR12_EL1() MRSQ(DBGWVR12_EL1)
+#define RSYSQ_DBGWVR13_EL1() MRSQ(DBGWVR13_EL1)
+#define RSYSQ_DBGWVR14_EL1() MRSQ(DBGWVR14_EL1)
+#define RSYSQ_DBGWVR15_EL1() MRSQ(DBGWVR15_EL1)
+
+/* 32 bit registers */
+#define WSYS_DBGDTRTX_EL0(val) MSR(val, DBGDTRTX_EL0)
+#define WSYS_MDCCINT_EL1(val) MSR(val, MDCCINT_EL1)
+#define WSYS_MDSCR_EL1(val) MSR(val, MDSCR_EL1)
+#define WSYS_OSDTRRX_EL1(val) MSR(val, OSDTRRX_EL1)
+#define WSYS_OSDTRTX_EL1(val) MSR(val, OSDTRTX_EL1)
+#define WSYS_OSDLR_EL1(val) MSR(val, OSDLR_EL1)
+#define WSYS_OSECCR_EL1(val) MSR(val, OSECCR_EL1)
+#define WSYS_DBGPRCR_EL1(val) MSR(val, DBGPRCR_EL1)
+#define WSYS_DBGBCR0_EL1(val) MSR(val, DBGBCR0_EL1)
+#define WSYS_DBGBCR1_EL1(val) MSR(val, DBGBCR1_EL1)
+#define WSYS_DBGBCR2_EL1(val) MSR(val, DBGBCR2_EL1)
+#define WSYS_DBGBCR3_EL1(val) MSR(val, DBGBCR3_EL1)
+#define WSYS_DBGBCR4_EL1(val) MSR(val, DBGBCR4_EL1)
+#define WSYS_DBGBCR5_EL1(val) MSR(val, DBGBCR5_EL1)
+#define WSYS_DBGBCR6_EL1(val) MSR(val, DBGBCR6_EL1)
+#define WSYS_DBGBCR7_EL1(val) MSR(val, DBGBCR7_EL1)
+#define WSYS_DBGBCR8_EL1(val) MSR(val, DBGBCR8_EL1)
+#define WSYS_DBGBCR9_EL1(val) MSR(val, DBGBCR9_EL1)
+#define WSYS_DBGBCR10_EL1(val) MSR(val, DBGBCR10_EL1)
+#define WSYS_DBGBCR11_EL1(val) MSR(val, DBGBCR11_EL1)
+#define WSYS_DBGBCR12_EL1(val) MSR(val, DBGBCR12_EL1)
+#define WSYS_DBGBCR13_EL1(val) MSR(val, DBGBCR13_EL1)
+#define WSYS_DBGBCR14_EL1(val) MSR(val, DBGBCR14_EL1)
+#define WSYS_DBGBCR15_EL1(val) MSR(val, DBGBCR15_EL1)
+#define WSYS_DBGWCR0_EL1(val) MSR(val, DBGWCR0_EL1)
+#define WSYS_DBGWCR1_EL1(val) MSR(val, DBGWCR1_EL1)
+#define WSYS_DBGWCR2_EL1(val) MSR(val, DBGWCR2_EL1)
+#define WSYS_DBGWCR3_EL1(val) MSR(val, DBGWCR3_EL1)
+#define WSYS_DBGWCR4_EL1(val) MSR(val, DBGWCR4_EL1)
+#define WSYS_DBGWCR5_EL1(val) MSR(val, DBGWCR5_EL1)
+#define WSYS_DBGWCR6_EL1(val) MSR(val, DBGWCR6_EL1)
+#define WSYS_DBGWCR7_EL1(val) MSR(val, DBGWCR7_EL1)
+#define WSYS_DBGWCR8_EL1(val) MSR(val, DBGWCR8_EL1)
+#define WSYS_DBGWCR9_EL1(val) MSR(val, DBGWCR9_EL1)
+#define WSYS_DBGWCR10_EL1(val) MSR(val, DBGWCR10_EL1)
+#define WSYS_DBGWCR11_EL1(val) MSR(val, DBGWCR11_EL1)
+#define WSYS_DBGWCR12_EL1(val) MSR(val, DBGWCR12_EL1)
+#define WSYS_DBGWCR13_EL1(val) MSR(val, DBGWCR13_EL1)
+#define WSYS_DBGWCR14_EL1(val) MSR(val, DBGWCR14_EL1)
+#define WSYS_DBGWCR15_EL1(val) MSR(val, DBGWCR15_EL1)
+#define WSYS_DBGCLAIMSET_EL1(val) MSR(val, DBGCLAIMSET_EL1)
+#define WSYS_DBGCLAIMCLR_EL1(val) MSR(val, DBGCLAIMCLR_EL1)
+#define WSYS_OSLAR_EL1(val) MSR(val, OSLAR_EL1)
+#define WSYS_DBGVCR32_EL2(val) MSR(val, DBGVCR32_EL2)
+#define WSYS_MDCR_EL2(val) MSR(val, MDCR_EL2)
+#define WSYS_MDCR_EL3(val) MSR(val, MDCR_EL3)
+/* 64 bit registers */
+#define WSYS_DBGDTR_EL0(val) MSR(val, DBGDTR_EL0)
+#define WSYS_DBGBVR0_EL1(val) MSR(val, DBGBVR0_EL1)
+#define WSYS_DBGBVR1_EL1(val) MSR(val, DBGBVR1_EL1)
+#define WSYS_DBGBVR2_EL1(val) MSR(val, DBGBVR2_EL1)
+#define WSYS_DBGBVR3_EL1(val) MSR(val, DBGBVR3_EL1)
+#define WSYS_DBGBVR4_EL1(val) MSR(val, DBGBVR4_EL1)
+#define WSYS_DBGBVR5_EL1(val) MSR(val, DBGBVR5_EL1)
+#define WSYS_DBGBVR6_EL1(val) MSR(val, DBGBVR6_EL1)
+#define WSYS_DBGBVR7_EL1(val) MSR(val, DBGBVR7_EL1)
+#define WSYS_DBGBVR8_EL1(val) MSR(val, DBGBVR8_EL1)
+#define WSYS_DBGBVR9_EL1(val) MSR(val, DBGBVR9_EL1)
+#define WSYS_DBGBVR10_EL1(val) MSR(val, DBGBVR10_EL1)
+#define WSYS_DBGBVR11_EL1(val) MSR(val, DBGBVR11_EL1)
+#define WSYS_DBGBVR12_EL1(val) MSR(val, DBGBVR12_EL1)
+#define WSYS_DBGBVR13_EL1(val) MSR(val, DBGBVR13_EL1)
+#define WSYS_DBGBVR14_EL1(val) MSR(val, DBGBVR14_EL1)
+#define WSYS_DBGBVR15_EL1(val) MSR(val, DBGBVR15_EL1)
+#define WSYS_DBGWVR0_EL1(val) MSR(val, DBGWVR0_EL1)
+#define WSYS_DBGWVR1_EL1(val) MSR(val, DBGWVR1_EL1)
+#define WSYS_DBGWVR2_EL1(val) MSR(val, DBGWVR2_EL1)
+#define WSYS_DBGWVR3_EL1(val) MSR(val, DBGWVR3_EL1)
+#define WSYS_DBGWVR4_EL1(val) MSR(val, DBGWVR4_EL1)
+#define WSYS_DBGWVR5_EL1(val) MSR(val, DBGWVR5_EL1)
+#define WSYS_DBGWVR6_EL1(val) MSR(val, DBGWVR6_EL1)
+#define WSYS_DBGWVR7_EL1(val) MSR(val, DBGWVR7_EL1)
+#define WSYS_DBGWVR8_EL1(val) MSR(val, DBGWVR8_EL1)
+#define WSYS_DBGWVR9_EL1(val) MSR(val, DBGWVR9_EL1)
+#define WSYS_DBGWVR10_EL1(val) MSR(val, DBGWVR10_EL1)
+#define WSYS_DBGWVR11_EL1(val) MSR(val, DBGWVR11_EL1)
+#define WSYS_DBGWVR12_EL1(val) MSR(val, DBGWVR12_EL1)
+#define WSYS_DBGWVR13_EL1(val) MSR(val, DBGWVR13_EL1)
+#define WSYS_DBGWVR14_EL1(val) MSR(val, DBGWVR14_EL1)
+#define WSYS_DBGWVR15_EL1(val) MSR(val, DBGWVR15_EL1)
+
+#endif
--- /dev/null
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+/* 32 bit register reads for AArch64 */
+#define trc_readl(reg) RSYSL_##reg()
+/* 64 bit register reads for AArch64 */
+#define trc_readq(reg) RSYSQ_##reg()
+/* 32 and 64 bit register writes for AArch64 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRSL(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MRSQ(op0, op1, crn, crm, op2) \
+({ \
+uint64_t val; \
+asm volatile("mrs %0, S"#op0"_"#op1"_"#crn"_"#crm"_"#op2 : "=r" (val)); \
+val; \
+})
+
+#define MSR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("msr S"#op0"_"#op1"_"#crn"_"#crm"_"#op2", %0" : : "r" (val)); \
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRSL(3, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MSR(val, 3, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRSL(2, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRSL(2, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRSL(2, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRSL(2, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRSL(2, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRSL(2, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRSL(2, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRSL(2, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRSL(2, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRSL(2, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRSL(2, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRSL(2, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRSL(2, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRSL(2, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRSL(2, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRSL(2, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRSL(2, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRSL(2, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRSL(2, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRSL(2, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRSL(2, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRSL(2, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRSL(2, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRSL(2, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRSL(2, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRSL(2, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRSL(2, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRSL(2, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRSL(2, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRSL(2, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRSL(2, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRSL(2, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRSL(2, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRSL(2, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRSL(2, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRSL(2, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRSL(2, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRSL(2, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRSL(2, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRSL(2, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRSL(2, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRSL(2, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRSL(2, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRSL(2, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRSL(2, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRSL(2, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRSL(2, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRSL(2, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRSL(2, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRSL(2, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRSL(2, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRSL(2, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRSL(2, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRSL(2, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRSL(2, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRSL(2, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRSL(2, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRSL(2, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRSL(2, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRSL(2, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRSL(2, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRSL(2, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRSL(2, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRSL(2, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRSL(2, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRSL(2, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRSL(2, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRSL(2, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRSL(2, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRSL(2, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRSL(2, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRSL(2, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRSL(2, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRSL(2, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRSL(2, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRSL(2, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRSL(2, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRSL(2, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRSL(2, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRSL(2, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRSL(2, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRSL(2, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRSL(2, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRSL(2, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRSL(2, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRSL(2, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRSL(2, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRSL(2, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRSL(2, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRSL(2, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRSL(2, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRSL(2, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRSL(2, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRSL(2, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRSL(2, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRSL(2, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRSL(2, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRSL(2, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRSL(2, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRSL(2, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRSL(2, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRSL(2, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRSL(2, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRSL(2, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRSL(2, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRSL(2, 1, c1, c7, 3)
+
+/* 64 bit registers */
+#define RSYSQ_ETMACATR0() MRSQ(2, 1, c2, c0, 2)
+#define RSYSQ_ETMACATR1() MRSQ(2, 1, c2, c2, 2)
+#define RSYSQ_ETMACATR2() MRSQ(2, 1, c2, c4, 2)
+#define RSYSQ_ETMACATR3() MRSQ(2, 1, c2, c6, 2)
+#define RSYSQ_ETMACATR4() MRSQ(2, 1, c2, c8, 2)
+#define RSYSQ_ETMACATR5() MRSQ(2, 1, c2, c10, 2)
+#define RSYSQ_ETMACATR6() MRSQ(2, 1, c2, c12, 2)
+#define RSYSQ_ETMACATR7() MRSQ(2, 1, c2, c14, 2)
+#define RSYSQ_ETMACATR8() MRSQ(2, 1, c2, c0, 3)
+#define RSYSQ_ETMACATR9() MRSQ(2, 1, c2, c2, 3)
+#define RSYSQ_ETMACATR10() MRSQ(2, 1, c2, c4, 3)
+#define RSYSQ_ETMACATR11() MRSQ(2, 1, c2, c6, 3)
+#define RSYSQ_ETMACATR12() MRSQ(2, 1, c2, c8, 3)
+#define RSYSQ_ETMACATR13() MRSQ(2, 1, c2, c10, 3)
+#define RSYSQ_ETMACATR14() MRSQ(2, 1, c2, c12, 3)
+#define RSYSQ_ETMACATR15() MRSQ(2, 1, c2, c14, 3)
+#define RSYSQ_ETMCIDCVR0() MRSQ(2, 1, c3, c0, 0)
+#define RSYSQ_ETMCIDCVR1() MRSQ(2, 1, c3, c2, 0)
+#define RSYSQ_ETMCIDCVR2() MRSQ(2, 1, c3, c4, 0)
+#define RSYSQ_ETMCIDCVR3() MRSQ(2, 1, c3, c6, 0)
+#define RSYSQ_ETMCIDCVR4() MRSQ(2, 1, c3, c8, 0)
+#define RSYSQ_ETMCIDCVR5() MRSQ(2, 1, c3, c10, 0)
+#define RSYSQ_ETMCIDCVR6() MRSQ(2, 1, c3, c12, 0)
+#define RSYSQ_ETMCIDCVR7() MRSQ(2, 1, c3, c14, 0)
+#define RSYSQ_ETMACVR0() MRSQ(2, 1, c2, c0, 0)
+#define RSYSQ_ETMACVR1() MRSQ(2, 1, c2, c2, 0)
+#define RSYSQ_ETMACVR2() MRSQ(2, 1, c2, c4, 0)
+#define RSYSQ_ETMACVR3() MRSQ(2, 1, c2, c6, 0)
+#define RSYSQ_ETMACVR4() MRSQ(2, 1, c2, c8, 0)
+#define RSYSQ_ETMACVR5() MRSQ(2, 1, c2, c10, 0)
+#define RSYSQ_ETMACVR6() MRSQ(2, 1, c2, c12, 0)
+#define RSYSQ_ETMACVR7() MRSQ(2, 1, c2, c14, 0)
+#define RSYSQ_ETMACVR8() MRSQ(2, 1, c2, c0, 1)
+#define RSYSQ_ETMACVR9() MRSQ(2, 1, c2, c2, 1)
+#define RSYSQ_ETMACVR10() MRSQ(2, 1, c2, c4, 1)
+#define RSYSQ_ETMACVR11() MRSQ(2, 1, c2, c6, 1)
+#define RSYSQ_ETMACVR12() MRSQ(2, 1, c2, c8, 1)
+#define RSYSQ_ETMACVR13() MRSQ(2, 1, c2, c10, 1)
+#define RSYSQ_ETMACVR14() MRSQ(2, 1, c2, c12, 1)
+#define RSYSQ_ETMACVR15() MRSQ(2, 1, c2, c14, 1)
+#define RSYSQ_ETMVMIDCVR0() MRSQ(2, 1, c3, c0, 1)
+#define RSYSQ_ETMVMIDCVR1() MRSQ(2, 1, c3, c2, 1)
+#define RSYSQ_ETMVMIDCVR2() MRSQ(2, 1, c3, c4, 1)
+#define RSYSQ_ETMVMIDCVR3() MRSQ(2, 1, c3, c6, 1)
+#define RSYSQ_ETMVMIDCVR4() MRSQ(2, 1, c3, c8, 1)
+#define RSYSQ_ETMVMIDCVR5() MRSQ(2, 1, c3, c10, 1)
+#define RSYSQ_ETMVMIDCVR6() MRSQ(2, 1, c3, c12, 1)
+#define RSYSQ_ETMVMIDCVR7() MRSQ(2, 1, c3, c14, 1)
+#define RSYSQ_ETMDVCVR0() MRSQ(2, 1, c2, c0, 4)
+#define RSYSQ_ETMDVCVR1() MRSQ(2, 1, c2, c4, 4)
+#define RSYSQ_ETMDVCVR2() MRSQ(2, 1, c2, c8, 4)
+#define RSYSQ_ETMDVCVR3() MRSQ(2, 1, c2, c12, 4)
+#define RSYSQ_ETMDVCVR4() MRSQ(2, 1, c2, c0, 5)
+#define RSYSQ_ETMDVCVR5() MRSQ(2, 1, c2, c4, 5)
+#define RSYSQ_ETMDVCVR6() MRSQ(2, 1, c2, c8, 5)
+#define RSYSQ_ETMDVCVR7() MRSQ(2, 1, c2, c12, 5)
+#define RSYSQ_ETMDVCMR0() MRSQ(2, 1, c2, c0, 6)
+#define RSYSQ_ETMDVCMR1() MRSQ(2, 1, c2, c4, 6)
+#define RSYSQ_ETMDVCMR2() MRSQ(2, 1, c2, c8, 6)
+#define RSYSQ_ETMDVCMR3() MRSQ(2, 1, c2, c12, 6)
+#define RSYSQ_ETMDVCMR4() MRSQ(2, 1, c2, c0, 7)
+#define RSYSQ_ETMDVCMR5() MRSQ(2, 1, c2, c4, 7)
+#define RSYSQ_ETMDVCMR6() MRSQ(2, 1, c2, c8, 7)
+#define RSYSQ_ETMDVCMR7() MRSQ(2, 1, c2, c12, 7)
+
+/* 32 and 64 bit registers */
+#define WSYS_ETMAUXCTLR(val) MSR(val, 2, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MSR(val, 2, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MSR(val, 2, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MSR(val, 2, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MSR(val, 2, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MSR(val, 2, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MSR(val, 2, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MSR(val, 2, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MSR(val, 2, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MSR(val, 2, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MSR(val, 2, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MSR(val, 2, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MSR(val, 2, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MSR(val, 2, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MSR(val, 2, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MSR(val, 2, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MSR(val, 2, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MSR(val, 2, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MSR(val, 2, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MSR(val, 2, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MSR(val, 2, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MSR(val, 2, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MSR(val, 2, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MSR(val, 2, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MSR(val, 2, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MSR(val, 2, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MSR(val, 2, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MSR(val, 2, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MSR(val, 2, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MSR(val, 2, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MSR(val, 2, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MSR(val, 2, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MSR(val, 2, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MSR(val, 2, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MSR(val, 2, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MSR(val, 2, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MSR(val, 2, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MSR(val, 2, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MSR(val, 2, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MSR(val, 2, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MSR(val, 2, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MSR(val, 2, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MSR(val, 2, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MSR(val, 2, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MSR(val, 2, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MSR(val, 2, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MSR(val, 2, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MSR(val, 2, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MSR(val, 2, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MSR(val, 2, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MSR(val, 2, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MSR(val, 2, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MSR(val, 2, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MSR(val, 2, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MSR(val, 2, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MSR(val, 2, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MSR(val, 2, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MSR(val, 2, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MSR(val, 2, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MSR(val, 2, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MSR(val, 2, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MSR(val, 2, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MSR(val, 2, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MSR(val, 2, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MSR(val, 2, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MSR(val, 2, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MSR(val, 2, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MSR(val, 2, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MSR(val, 2, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MSR(val, 2, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MSR(val, 2, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MSR(val, 2, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MSR(val, 2, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MSR(val, 2, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MSR(val, 2, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MSR(val, 2, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MSR(val, 2, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MSR(val, 2, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MSR(val, 2, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MSR(val, 2, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MSR(val, 2, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MSR(val, 2, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MSR(val, 2, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MSR(val, 2, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MSR(val, 2, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MSR(val, 2, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MSR(val, 2, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MSR(val, 2, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MSR(val, 2, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MSR(val, 2, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MSR(val, 2, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MSR(val, 2, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MSR(val, 2, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MSR(val, 2, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MSR(val, 2, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MSR(val, 2, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MSR(val, 2, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MSR(val, 2, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MSR(val, 2, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MSR(val, 2, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MSR(val, 2, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MSR(val, 2, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MSR(val, 2, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MSR(val, 2, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MSR(val, 2, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MSR(val, 2, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MSR(val, 2, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MSR(val, 2, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MSR(val, 2, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MSR(val, 2, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MSR(val, 2, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MSR(val, 2, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MSR(val, 2, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MSR(val, 2, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MSR(val, 2, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MSR(val, 2, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MSR(val, 2, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MSR(val, 2, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MSR(val, 2, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MSR(val, 2, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MSR(val, 2, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MSR(val, 2, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MSR(val, 2, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MSR(val, 2, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MSR(val, 2, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MSR(val, 2, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MSR(val, 2, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MSR(val, 2, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MSR(val, 2, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MSR(val, 2, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MSR(val, 2, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MSR(val, 2, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MSR(val, 2, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MSR(val, 2, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MSR(val, 2, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MSR(val, 2, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MSR(val, 2, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MSR(val, 2, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MSR(val, 2, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MSR(val, 2, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MSR(val, 2, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MSR(val, 2, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MSR(val, 2, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MSR(val, 2, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MSR(val, 2, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MSR(val, 2, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MSR(val, 2, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MSR(val, 2, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MSR(val, 2, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MSR(val, 2, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MSR(val, 2, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MSR(val, 2, 1, c1, c7, 3)
+
+#endif
This information is exported to usespace via sysfs entries and userspace
algorithms uses info and decide when to turn on/off the cpu cores.
+config MSM_JTAGV8
+ bool "Debug and ETM trace support across power collapse for ARMv8"
+ help
+ Enables support for debugging (specifically breakpoints) and ETM
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on ARMv8 target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ If unsure, say 'N' here to avoid potential power, performance and
+ memory penalty.
+
config MSM_BOOT_STATS
bool "Use MSM boot stats reporting"
help
obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_kryo.o
+obj-$(CONFIG_MSM_JTAGV8) += jtag-fuse.o jtagv8.o jtagv8-etm.o
obj-$(CONFIG_MSM_KERNEL_PROTECT) += kernel_protect.o
obj-$(CONFIG_MSM_RTB) += msm_rtb-hotplug.o
obj-$(CONFIG_QCOM_REMOTEQDSS) += remoteqdss.o
--- /dev/null
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <soc/qcom/jtag.h>
+
+#define fuse_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define fuse_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define OEM_CONFIG0 (0x000)
+#define OEM_CONFIG1 (0x004)
+#define OEM_CONFIG2 (0x008)
+
+/* JTAG FUSE V1 */
+#define ALL_DEBUG_DISABLE BIT(21)
+#define APPS_DBGEN_DISABLE BIT(0)
+#define APPS_NIDEN_DISABLE BIT(1)
+#define APPS_SPIDEN_DISABLE BIT(2)
+#define APPS_SPNIDEN_DISABLE BIT(3)
+#define DAP_DEVICEEN_DISABLE BIT(8)
+
+/* JTAG FUSE V2 */
+#define ALL_DEBUG_DISABLE_V2 BIT(0)
+#define APPS_DBGEN_DISABLE_V2 BIT(10)
+#define APPS_NIDEN_DISABLE_V2 BIT(11)
+#define APPS_SPIDEN_DISABLE_V2 BIT(12)
+#define APPS_SPNIDEN_DISABLE_V2 BIT(13)
+#define DAP_DEVICEEN_DISABLE_V2 BIT(18)
+
+/* JTAG FUSE V3 */
+#define ALL_DEBUG_DISABLE_V3 BIT(29)
+#define APPS_DBGEN_DISABLE_V3 BIT(8)
+#define APPS_NIDEN_DISABLE_V3 BIT(21)
+#define APPS_SPIDEN_DISABLE_V3 BIT(5)
+#define APPS_SPNIDEN_DISABLE_V3 BIT(31)
+#define DAP_DEVICEEN_DISABLE_V3 BIT(7)
+
+#define JTAG_FUSE_VERSION_V1 "qcom,jtag-fuse"
+#define JTAG_FUSE_VERSION_V2 "qcom,jtag-fuse-v2"
+#define JTAG_FUSE_VERSION_V3 "qcom,jtag-fuse-v3"
+
+struct fuse_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ bool fuse_v2;
+ bool fuse_v3;
+};
+
+static struct fuse_drvdata *fusedrvdata;
+
+bool msm_jtag_fuse_apps_access_disabled(void)
+{
+ struct fuse_drvdata *drvdata = fusedrvdata;
+ uint32_t config0, config1, config2;
+ bool ret = false;
+
+ if (!drvdata)
+ return false;
+
+ config0 = fuse_readl(drvdata, OEM_CONFIG0);
+ config1 = fuse_readl(drvdata, OEM_CONFIG1);
+
+ dev_dbg(drvdata->dev, "apps config0: %lx\n", (unsigned long)config0);
+ dev_dbg(drvdata->dev, "apps config1: %lx\n", (unsigned long)config1);
+
+ if (drvdata->fuse_v3) {
+ config2 = fuse_readl(drvdata, OEM_CONFIG2);
+ dev_dbg(drvdata->dev, "apps config2: %lx\n",
+ (unsigned long)config2);
+ }
+
+ if (drvdata->fuse_v3) {
+ if (config0 & ALL_DEBUG_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE_V3)
+ ret = true;
+ else if (config2 & APPS_SPIDEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE_V3)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE_V3)
+ ret = true;
+ } else if (drvdata->fuse_v2) {
+ if (config1 & ALL_DEBUG_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_SPIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE_V2)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE_V2)
+ ret = true;
+ } else {
+ if (config0 & ALL_DEBUG_DISABLE)
+ ret = true;
+ else if (config1 & APPS_DBGEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_NIDEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_SPIDEN_DISABLE)
+ ret = true;
+ else if (config1 & APPS_SPNIDEN_DISABLE)
+ ret = true;
+ else if (config1 & DAP_DEVICEEN_DISABLE)
+ ret = true;
+ }
+
+ if (ret)
+ dev_dbg(drvdata->dev, "apps fuse disabled\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_jtag_fuse_apps_access_disabled);
+
+static const struct of_device_id jtag_fuse_match[] = {
+ {.compatible = JTAG_FUSE_VERSION_V1 },
+ {.compatible = JTAG_FUSE_VERSION_V2 },
+ {.compatible = JTAG_FUSE_VERSION_V3 },
+ {}
+};
+
+static int jtag_fuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fuse_drvdata *drvdata;
+ struct resource *res;
+ const struct of_device_id *match;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ /* Store the driver data pointer for use in exported functions */
+ fusedrvdata = drvdata;
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ match = of_match_device(jtag_fuse_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V2))
+ drvdata->fuse_v2 = true;
+ else if (!strcmp(match->compatible, JTAG_FUSE_VERSION_V3))
+ drvdata->fuse_v3 = true;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse-base");
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ dev_info(dev, "JTag Fuse initialized\n");
+ return 0;
+}
+
+static int jtag_fuse_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver jtag_fuse_driver = {
+ .probe = jtag_fuse_probe,
+ .remove = jtag_fuse_remove,
+ .driver = {
+ .name = "msm-jtag-fuse",
+ .owner = THIS_MODULE,
+ .of_match_table = jtag_fuse_match,
+ },
+};
+
+static int __init jtag_fuse_init(void)
+{
+ return platform_driver_register(&jtag_fuse_driver);
+}
+arch_initcall(jtag_fuse_init);
+
+static void __exit jtag_fuse_exit(void)
+{
+ platform_driver_unregister(&jtag_fuse_driver);
+}
+module_exit(jtag_fuse_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("JTag Fuse driver");
--- /dev/null
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#include <asm/smp_plat.h>
+#include <asm/etmv4x.h>
+#include <soc/qcom/socinfo.h>
+
+#define CORESIGHT_LAR (0xFB0)
+
+#define TIMEOUT_US (100)
+
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
+/*
+ * ETMv4 registers:
+ * 0x000 - 0x2FC: Trace registers
+ * 0x300 - 0x314: Management registers
+ * 0x318 - 0xEFC: Trace registers
+ * 0xF00: Management registers
+ * 0xFA0 - 0xFA4: Trace registers
+ * 0xFA8 - 0xFFC: Management registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+/* Main control and configuration registers */
+#define TRCPRGCTLR (0x004)
+#define TRCPROCSELR (0x008)
+#define TRCSTATR (0x00C)
+#define TRCCONFIGR (0x010)
+#define TRCAUXCTLR (0x018)
+#define TRCEVENTCTL0R (0x020)
+#define TRCEVENTCTL1R (0x024)
+#define TRCSTALLCTLR (0x02C)
+#define TRCTSCTLR (0x030)
+#define TRCSYNCPR (0x034)
+#define TRCCCCTLR (0x038)
+#define TRCBBCTLR (0x03C)
+#define TRCTRACEIDR (0x040)
+#define TRCQCTLR (0x044)
+/* Filtering control registers */
+#define TRCVICTLR (0x080)
+#define TRCVIIECTLR (0x084)
+#define TRCVISSCTLR (0x088)
+#define TRCVIPCSSCTLR (0x08C)
+#define TRCVDCTLR (0x0A0)
+#define TRCVDSACCTLR (0x0A4)
+#define TRCVDARCCTLR (0x0A8)
+/* Derived resources registers */
+#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQRSTEVR (0x118)
+#define TRCSEQSTR (0x11C)
+#define TRCEXTINSELR (0x120)
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
+#define TRCCNTCTLRn(n) (0x150 + (n * 4))
+#define TRCCNTVRn(n) (0x160 + (n * 4))
+/* ID registers */
+#define TRCIDR8 (0x180)
+#define TRCIDR9 (0x184)
+#define TRCIDR10 (0x188)
+#define TRCIDR11 (0x18C)
+#define TRCIDR12 (0x190)
+#define TRCIDR13 (0x194)
+#define TRCIMSPEC0 (0x1C0)
+#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIDR0 (0x1E0)
+#define TRCIDR1 (0x1E4)
+#define TRCIDR2 (0x1E8)
+#define TRCIDR3 (0x1EC)
+#define TRCIDR4 (0x1F0)
+#define TRCIDR5 (0x1F4)
+#define TRCIDR6 (0x1F8)
+#define TRCIDR7 (0x1FC)
+/* Resource selection registers */
+#define TRCRSCTLRn(n) (0x200 + (n * 4))
+/* Single-shot comparator registers */
+#define TRCSSCCRn(n) (0x280 + (n * 4))
+#define TRCSSCSRn(n) (0x2A0 + (n * 4))
+#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
+/* Management registers (0x300-0x314) */
+#define TRCOSLAR (0x300)
+#define TRCOSLSR (0x304)
+#define TRCPDCR (0x310)
+#define TRCPDSR (0x314)
+/* Trace registers (0x318-0xEFC) */
+/* Comparator registers */
+#define TRCACVRn(n) (0x400 + (n * 8))
+#define TRCACATRn(n) (0x480 + (n * 8))
+#define TRCDVCVRn(n) (0x500 + (n * 16))
+#define TRCDVCMRn(n) (0x580 + (n * 16))
+#define TRCCIDCVRn(n) (0x600 + (n * 8))
+#define TRCVMIDCVRn(n) (0x640 + (n * 8))
+#define TRCCIDCCTLR0 (0x680)
+#define TRCCIDCCTLR1 (0x684)
+#define TRCVMIDCCTLR0 (0x688)
+#define TRCVMIDCCTLR1 (0x68C)
+/* Management register (0xF00) */
+/* Integration control registers */
+#define TRCITCTRL (0xF00)
+/* Trace registers (0xFA0-0xFA4) */
+/* Claim tag registers */
+#define TRCCLAIMSET (0xFA0)
+#define TRCCLAIMCLR (0xFA4)
+/* Management registers (0xFA8-0xFFC) */
+#define TRCDEVAFF0 (0xFA8)
+#define TRCDEVAFF1 (0xFAC)
+#define TRCLAR (0xFB0)
+#define TRCLSR (0xFB4)
+#define TRCAUTHSTATUS (0xFB8)
+#define TRCDEVARCH (0xFBC)
+#define TRCDEVID (0xFC8)
+#define TRCDEVTYPE (0xFCC)
+#define TRCPIDR4 (0xFD0)
+#define TRCPIDR5 (0xFD4)
+#define TRCPIDR6 (0xFD8)
+#define TRCPIDR7 (0xFDC)
+#define TRCPIDR0 (0xFE0)
+#define TRCPIDR1 (0xFE4)
+#define TRCPIDR2 (0xFE8)
+#define TRCPIDR3 (0xFEC)
+#define TRCCIDR0 (0xFF0)
+#define TRCCIDR1 (0xFF4)
+#define TRCCIDR2 (0xFF8)
+#define TRCCIDR3 (0xFFC)
+
+/* ETMv4 resources */
+#define ETM_MAX_NR_PE (8)
+#define ETM_MAX_CNTR (4)
+#define ETM_MAX_SEQ_STATES (4)
+#define ETM_MAX_EXT_INP_SEL (4)
+#define ETM_MAX_EXT_INP (256)
+#define ETM_MAX_EXT_OUT (4)
+#define ETM_MAX_SINGLE_ADDR_CMP (16)
+#define ETM_MAX_ADDR_RANGE_CMP (ETM_MAX_SINGLE_ADDR_CMP / 2)
+#define ETM_MAX_DATA_VAL_CMP (8)
+#define ETM_MAX_CTXID_CMP (8)
+#define ETM_MAX_VMID_CMP (8)
+#define ETM_MAX_PE_CMP (8)
+#define ETM_MAX_RES_SEL (32)
+#define ETM_MAX_SS_CMP (8)
+
+#define ETM_CPMR_CLKEN (0x4)
+#define ETM_ARCH_V4 (0x40)
+
+#define MAX_ETM_STATE_SIZE (165)
+
+#define TZ_DBG_ETM_FEAT_ID (0x8)
+#define TZ_DBG_ETM_VER (0x400000)
+#define HW_SOC_ID_M8953 (293)
+
+#define etm_writel(etm, val, off) \
+ __raw_writel(val, etm->base + off)
+#define etm_readl(etm, off) \
+ __raw_readl(etm->base + off)
+
+#define etm_writeq(etm, val, off) \
+ __raw_writeq(val, etm->base + off)
+#define etm_readq(etm, off) \
+ __raw_readq(etm->base + off)
+
+#define ETM_LOCK(base) \
+do { \
+ mb(); /* ensure configuration take effect before we lock it */ \
+ etm_writel(base, 0x0, CORESIGHT_LAR); \
+} while (0)
+
+#define ETM_UNLOCK(base) \
+do { \
+ etm_writel(base, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); /* ensure unlock take effect before we configure */ \
+} while (0)
+
+struct etm_ctx {
+ uint8_t arch;
+ uint8_t nr_pe;
+ uint8_t nr_pe_cmp;
+ uint8_t nr_addr_cmp;
+ uint8_t nr_data_cmp;
+ uint8_t nr_cntr;
+ uint8_t nr_ext_inp;
+ uint8_t nr_ext_inp_sel;
+ uint8_t nr_ext_out;
+ uint8_t nr_ctxid_cmp;
+ uint8_t nr_vmid_cmp;
+ uint8_t nr_seq_state;
+ uint8_t nr_event;
+ uint8_t nr_resource;
+ uint8_t nr_ss_cmp;
+ bool si_enable;
+ bool save_restore_disabled;
+ bool save_restore_enabled;
+ bool os_lock_present;
+ bool init;
+ bool enable;
+ void __iomem *base;
+ struct device *dev;
+ uint64_t *state;
+ spinlock_t spinlock;
+ struct mutex mutex;
+};
+
+static struct etm_ctx *etm[NR_CPUS];
+static int cnt;
+
+static struct clk *clock[NR_CPUS];
+
+ATOMIC_NOTIFIER_HEAD(etm_save_notifier_list);
+ATOMIC_NOTIFIER_HEAD(etm_restore_notifier_list);
+
+int msm_jtag_save_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_register);
+
+int msm_jtag_save_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&etm_save_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_save_unregister);
+
+int msm_jtag_restore_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_register);
+
+int msm_jtag_restore_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&etm_restore_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_jtag_restore_unregister);
+
+static void etm_os_lock(struct etm_ctx *etmdata)
+{
+ if (etmdata->os_lock_present) {
+ etm_writel(etmdata, 0x1, TRCOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ mb();
+ }
+}
+
+static void etm_os_unlock(struct etm_ctx *etmdata)
+{
+ if (etmdata->os_lock_present) {
+ /* Ensure all writes are complete before clearing OS lock */
+ mb();
+ etm_writel(etmdata, 0x0, TRCOSLAR);
+ }
+}
+
+static inline void etm_mm_save_state(struct etm_ctx *etmdata)
+{
+ int i, j, count;
+
+ i = 0;
+ mb(); /* ensure all register writes complete before saving them */
+ isb();
+ ETM_UNLOCK(etmdata);
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ etm_os_lock(etmdata);
+
+ /* poll until programmers' model becomes stable */
+ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 1)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("programmers model is not stable\n"
+ );
+
+ /* main control and configuration registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCPROCSELR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCONFIGR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCAUXCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL0R);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEVENTCTL1R);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSTALLCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCTSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSYNCPR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCCCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCBBCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCTRACEIDR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCQCTLR);
+ /* filtering control registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCVICTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVIIECTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVISSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVIPCSSCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDSACCTLR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVDARCCTLR);
+ /* derived resource registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQEVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQRSTEVR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCSEQSTR);
+ etmdata->state[i++] = etm_readl(etmdata, TRCEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++) {
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTRLDVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTCTLRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCCNTVRn(j));
+ }
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+ etmdata->state[i++] = etm_readq(etmdata, TRCACVRn(j));
+ etmdata->state[i++] = etm_readq(etmdata, TRCACATRn(j));
+ }
+ for (j = 0; j < etmdata->nr_data_cmp; j++) {
+ etmdata->state[i++] = etm_readq(etmdata, TRCDVCVRn(j));
+ etmdata->state[i++] = etm_readq(etmdata, TRCDVCMRn(i));
+ }
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ etmdata->state[i++] = etm_readq(etmdata, TRCCIDCVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR0);
+ etmdata->state[i++] = etm_readl(etmdata, TRCCIDCCTLR1);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ etmdata->state[i++] = etm_readq(etmdata,
+ TRCVMIDCVRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR0);
+ etmdata->state[i++] = etm_readl(etmdata, TRCVMIDCCTLR1);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+ etmdata->state[i++] = etm_readl(etmdata, TRCSSCCRn(j));
+ etmdata->state[i++] = etm_readl(etmdata, TRCSSCSRn(j));
+ etmdata->state[i++] = etm_readl(etmdata,
+ TRCSSPCICRn(j));
+ }
+ /* claim tag registers */
+ etmdata->state[i++] = etm_readl(etmdata, TRCCLAIMCLR);
+ /* program ctrl register */
+ etmdata->state[i++] = etm_readl(etmdata, TRCPRGCTLR);
+
+ /* ensure trace unit is idle to be powered down */
+ for (count = TIMEOUT_US; (BVAL(etm_readl(etmdata, TRCSTATR), 0)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("timeout waiting for idle state\n");
+
+ atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ ETM_LOCK(etmdata);
+}
+
+static inline void etm_mm_restore_state(struct etm_ctx *etmdata)
+{
+ int i, j;
+
+ i = 0;
+ ETM_UNLOCK(etmdata);
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+ /* check OS lock is locked */
+ if (BVAL(etm_readl(etmdata, TRCOSLSR), 1) != 1) {
+ pr_err_ratelimited("OS lock is unlocked\n");
+ etm_os_lock(etmdata);
+ }
+
+ /* main control and configuration registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCPROCSELR);
+ etm_writel(etmdata, etmdata->state[i++], TRCCONFIGR);
+ etm_writel(etmdata, etmdata->state[i++], TRCAUXCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL0R);
+ etm_writel(etmdata, etmdata->state[i++], TRCEVENTCTL1R);
+ etm_writel(etmdata, etmdata->state[i++], TRCSTALLCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCTSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCSYNCPR);
+ etm_writel(etmdata, etmdata->state[i++], TRCCCCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCBBCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCTRACEIDR);
+ etm_writel(etmdata, etmdata->state[i++], TRCQCTLR);
+ /* filtering control registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCVICTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVIIECTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVISSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVIPCSSCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDSACCTLR);
+ etm_writel(etmdata, etmdata->state[i++], TRCVDARCCTLR);
+ /* derived resources registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQEVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQRSTEVR);
+ etm_writel(etmdata, etmdata->state[i++], TRCSEQSTR);
+ etm_writel(etmdata, etmdata->state[i++], TRCEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++) {
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCCNTRLDVRn(j));
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCCNTCTLRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
+ }
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
+ etm_writeq(etmdata, etmdata->state[i++], TRCACVRn(j));
+ etm_writeq(etmdata, etmdata->state[i++], TRCACATRn(j));
+ }
+ for (j = 0; j < etmdata->nr_data_cmp; j++) {
+ etm_writeq(etmdata, etmdata->state[i++], TRCDVCVRn(j));
+ etm_writeq(etmdata, etmdata->state[i++], TRCDVCMRn(j));
+ }
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ etm_writeq(etmdata, etmdata->state[i++], TRCCIDCVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR0);
+ etm_writel(etmdata, etmdata->state[i++], TRCCIDCCTLR1);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ etm_writeq(etmdata, etmdata->state[i++],
+ TRCVMIDCVRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR0);
+ etm_writel(etmdata, etmdata->state[i++], TRCVMIDCCTLR1);
+ /* e-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++) {
+ etm_writel(etmdata, etmdata->state[i++], TRCSSCCRn(j));
+ etm_writel(etmdata, etmdata->state[i++], TRCSSCSRn(j));
+ etm_writel(etmdata, etmdata->state[i++],
+ TRCSSPCICRn(j));
+ }
+ /* claim tag registers */
+ etm_writel(etmdata, etmdata->state[i++], TRCCLAIMSET);
+ /* program ctrl register */
+ etm_writel(etmdata, etmdata->state[i++], TRCPRGCTLR);
+
+ etm_os_unlock(etmdata);
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ ETM_LOCK(etmdata);
+}
+
+static inline void etm_clk_disable(void)
+{
+ uint32_t cpmr;
+
+ isb();
+ cpmr = trc_readl(CPMR_EL1);
+ cpmr &= ~ETM_CPMR_CLKEN;
+ trc_write(cpmr, CPMR_EL1);
+}
+
+static inline void etm_clk_enable(void)
+{
+ uint32_t cpmr;
+
+ cpmr = trc_readl(CPMR_EL1);
+ cpmr |= ETM_CPMR_CLKEN;
+ trc_write(cpmr, CPMR_EL1);
+ isb();
+}
+
+static int etm_read_ssxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMSEQEVR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMSEQEVR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMSEQEVR2);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_crxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMCNTRLDVR0);
+ state[i++] = trc_readl(ETMCNTCTLR0);
+ state[i++] = trc_readl(ETMCNTVR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMCNTRLDVR1);
+ state[i++] = trc_readl(ETMCNTCTLR1);
+ state[i++] = trc_readl(ETMCNTVR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMCNTRLDVR2);
+ state[i++] = trc_readl(ETMCNTCTLR2);
+ state[i++] = trc_readl(ETMCNTVR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMCNTRLDVR3);
+ state[i++] = trc_readl(ETMCNTCTLR3);
+ state[i++] = trc_readl(ETMCNTVR3);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_rsxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 2:
+ state[i++] = trc_readl(ETMRSCTLR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMRSCTLR3);
+ break;
+ case 4:
+ state[i++] = trc_readl(ETMRSCTLR4);
+ break;
+ case 5:
+ state[i++] = trc_readl(ETMRSCTLR5);
+ break;
+ case 6:
+ state[i++] = trc_readl(ETMRSCTLR6);
+ break;
+ case 7:
+ state[i++] = trc_readl(ETMRSCTLR7);
+ break;
+ case 8:
+ state[i++] = trc_readl(ETMRSCTLR8);
+ break;
+ case 9:
+ state[i++] = trc_readl(ETMRSCTLR9);
+ break;
+ case 10:
+ state[i++] = trc_readl(ETMRSCTLR10);
+ break;
+ case 11:
+ state[i++] = trc_readl(ETMRSCTLR11);
+ break;
+ case 12:
+ state[i++] = trc_readl(ETMRSCTLR12);
+ break;
+ case 13:
+ state[i++] = trc_readl(ETMRSCTLR13);
+ break;
+ case 14:
+ state[i++] = trc_readl(ETMRSCTLR14);
+ break;
+ case 15:
+ state[i++] = trc_readl(ETMRSCTLR15);
+ break;
+ case 16:
+ state[i++] = trc_readl(ETMRSCTLR16);
+ break;
+ case 17:
+ state[i++] = trc_readl(ETMRSCTLR17);
+ break;
+ case 18:
+ state[i++] = trc_readl(ETMRSCTLR18);
+ break;
+ case 19:
+ state[i++] = trc_readl(ETMRSCTLR19);
+ break;
+ case 20:
+ state[i++] = trc_readl(ETMRSCTLR20);
+ break;
+ case 21:
+ state[i++] = trc_readl(ETMRSCTLR21);
+ break;
+ case 22:
+ state[i++] = trc_readl(ETMRSCTLR22);
+ break;
+ case 23:
+ state[i++] = trc_readl(ETMRSCTLR23);
+ break;
+ case 24:
+ state[i++] = trc_readl(ETMRSCTLR24);
+ break;
+ case 25:
+ state[i++] = trc_readl(ETMRSCTLR25);
+ break;
+ case 26:
+ state[i++] = trc_readl(ETMRSCTLR26);
+ break;
+ case 27:
+ state[i++] = trc_readl(ETMRSCTLR27);
+ break;
+ case 28:
+ state[i++] = trc_readl(ETMRSCTLR28);
+ break;
+ case 29:
+ state[i++] = trc_readl(ETMRSCTLR29);
+ break;
+ case 30:
+ state[i++] = trc_readl(ETMRSCTLR30);
+ break;
+ case 31:
+ state[i++] = trc_readl(ETMRSCTLR31);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_acr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMACVR0);
+ state[i++] = trc_readq(ETMACATR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMACVR1);
+ state[i++] = trc_readq(ETMACATR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMACVR2);
+ state[i++] = trc_readq(ETMACATR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMACVR3);
+ state[i++] = trc_readq(ETMACATR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMACVR4);
+ state[i++] = trc_readq(ETMACATR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMACVR5);
+ state[i++] = trc_readq(ETMACATR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMACVR6);
+ state[i++] = trc_readq(ETMACATR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMACVR7);
+ state[i++] = trc_readq(ETMACATR7);
+ break;
+ case 8:
+ state[i++] = trc_readq(ETMACVR8);
+ state[i++] = trc_readq(ETMACATR8);
+ break;
+ case 9:
+ state[i++] = trc_readq(ETMACVR9);
+ state[i++] = trc_readq(ETMACATR9);
+ break;
+ case 10:
+ state[i++] = trc_readq(ETMACVR10);
+ state[i++] = trc_readq(ETMACATR10);
+ break;
+ case 11:
+ state[i++] = trc_readq(ETMACVR11);
+ state[i++] = trc_readq(ETMACATR11);
+ break;
+ case 12:
+ state[i++] = trc_readq(ETMACVR12);
+ state[i++] = trc_readq(ETMACATR12);
+ break;
+ case 13:
+ state[i++] = trc_readq(ETMACVR13);
+ state[i++] = trc_readq(ETMACATR13);
+ break;
+ case 14:
+ state[i++] = trc_readq(ETMACVR14);
+ state[i++] = trc_readq(ETMACATR14);
+ break;
+ case 15:
+ state[i++] = trc_readq(ETMACVR15);
+ state[i++] = trc_readq(ETMACATR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_dvcr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMDVCVR0);
+ state[i++] = trc_readq(ETMDVCMR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMDVCVR1);
+ state[i++] = trc_readq(ETMDVCMR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMDVCVR2);
+ state[i++] = trc_readq(ETMDVCMR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMDVCVR3);
+ state[i++] = trc_readq(ETMDVCMR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMDVCVR4);
+ state[i++] = trc_readq(ETMDVCMR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMDVCVR5);
+ state[i++] = trc_readq(ETMDVCMR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMDVCVR6);
+ state[i++] = trc_readq(ETMDVCMR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMDVCVR7);
+ state[i++] = trc_readq(ETMDVCMR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_ccvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMCIDCVR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMCIDCVR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMCIDCVR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMCIDCVR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMCIDCVR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMCIDCVR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMCIDCVR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMCIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_vcvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readq(ETMVMIDCVR0);
+ break;
+ case 1:
+ state[i++] = trc_readq(ETMVMIDCVR1);
+ break;
+ case 2:
+ state[i++] = trc_readq(ETMVMIDCVR2);
+ break;
+ case 3:
+ state[i++] = trc_readq(ETMVMIDCVR3);
+ break;
+ case 4:
+ state[i++] = trc_readq(ETMVMIDCVR4);
+ break;
+ case 5:
+ state[i++] = trc_readq(ETMVMIDCVR5);
+ break;
+ case 6:
+ state[i++] = trc_readq(ETMVMIDCVR6);
+ break;
+ case 7:
+ state[i++] = trc_readq(ETMVMIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_read_sscr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = trc_readl(ETMSSCCR0);
+ state[i++] = trc_readl(ETMSSCSR0);
+ state[i++] = trc_readl(ETMSSPCICR0);
+ break;
+ case 1:
+ state[i++] = trc_readl(ETMSSCCR1);
+ state[i++] = trc_readl(ETMSSCSR1);
+ state[i++] = trc_readl(ETMSSPCICR1);
+ break;
+ case 2:
+ state[i++] = trc_readl(ETMSSCCR2);
+ state[i++] = trc_readl(ETMSSCSR2);
+ state[i++] = trc_readl(ETMSSPCICR2);
+ break;
+ case 3:
+ state[i++] = trc_readl(ETMSSCCR3);
+ state[i++] = trc_readl(ETMSSCSR3);
+ state[i++] = trc_readl(ETMSSPCICR3);
+ break;
+ case 4:
+ state[i++] = trc_readl(ETMSSCCR4);
+ state[i++] = trc_readl(ETMSSCSR4);
+ state[i++] = trc_readl(ETMSSPCICR4);
+ break;
+ case 5:
+ state[i++] = trc_readl(ETMSSCCR5);
+ state[i++] = trc_readl(ETMSSCSR5);
+ state[i++] = trc_readl(ETMSSPCICR5);
+ break;
+ case 6:
+ state[i++] = trc_readl(ETMSSCCR6);
+ state[i++] = trc_readl(ETMSSCSR6);
+ state[i++] = trc_readl(ETMSSPCICR6);
+ break;
+ case 7:
+ state[i++] = trc_readl(ETMSSCCR7);
+ state[i++] = trc_readl(ETMSSCSR7);
+ state[i++] = trc_readl(ETMSSPCICR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void etm_si_save_state(struct etm_ctx *etmdata)
+{
+ int i, j, count;
+
+ i = 0;
+ /* Ensure all writes are complete before saving ETM registers */
+ mb();
+ isb();
+
+ /* Vote for ETM power/clock enable */
+ etm_clk_enable();
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ trc_write(0x1, ETMOSLAR);
+ isb();
+
+ /* poll until programmers' model becomes stable */
+ for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 1)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("programmers model is not stable\n");
+
+ /* main control and configuration registers */
+ etmdata->state[i++] = trc_readl(ETMCONFIGR);
+ etmdata->state[i++] = trc_readl(ETMEVENTCTL0R);
+ etmdata->state[i++] = trc_readl(ETMEVENTCTL1R);
+ etmdata->state[i++] = trc_readl(ETMSTALLCTLR);
+ etmdata->state[i++] = trc_readl(ETMTSCTLR);
+ etmdata->state[i++] = trc_readl(ETMSYNCPR);
+ etmdata->state[i++] = trc_readl(ETMCCCTLR);
+ etmdata->state[i++] = trc_readl(ETMTRACEIDR);
+ /* filtering control registers */
+ etmdata->state[i++] = trc_readl(ETMVICTLR);
+ etmdata->state[i++] = trc_readl(ETMVIIECTLR);
+ etmdata->state[i++] = trc_readl(ETMVISSCTLR);
+ /* derived resource registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ i = etm_read_ssxr(etmdata->state, i, j);
+ etmdata->state[i++] = trc_readl(ETMSEQRSTEVR);
+ etmdata->state[i++] = trc_readl(ETMSEQSTR);
+ etmdata->state[i++] = trc_readl(ETMEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++)
+ i = etm_read_crxr(etmdata->state, i, j);
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ i = etm_read_rsxr(etmdata->state, i, j + 2);
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+ i = etm_read_acr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_data_cmp; j++)
+ i = etm_read_dvcr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ i = etm_read_ccvr(etmdata->state, i, j);
+ etmdata->state[i++] = trc_readl(ETMCIDCCTLR0);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ i = etm_read_vcvr(etmdata->state, i, j);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++)
+ i = etm_read_sscr(etmdata->state, i, j);
+ /* program ctrl register */
+ etmdata->state[i++] = trc_readl(ETMPRGCTLR);
+
+ /* ensure trace unit is idle to be powered down */
+ for (count = TIMEOUT_US; (BVAL(trc_readl(ETMSTATR), 0)
+ != 1) && count > 0; count--)
+ udelay(1);
+ if (count == 0)
+ pr_err_ratelimited("timeout waiting for idle state\n");
+
+ atomic_notifier_call_chain(&etm_save_notifier_list, 0, NULL);
+
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_clk_disable();
+}
+
+static int etm_write_ssxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMSEQEVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMSEQEVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMSEQEVR2);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_crxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMCNTRLDVR0);
+ trc_write(state[i++], ETMCNTCTLR0);
+ trc_write(state[i++], ETMCNTVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMCNTRLDVR1);
+ trc_write(state[i++], ETMCNTCTLR1);
+ trc_write(state[i++], ETMCNTVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMCNTRLDVR2);
+ trc_write(state[i++], ETMCNTCTLR2);
+ trc_write(state[i++], ETMCNTVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMCNTRLDVR3);
+ trc_write(state[i++], ETMCNTCTLR3);
+ trc_write(state[i++], ETMCNTVR3);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_rsxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 2:
+ trc_write(state[i++], ETMRSCTLR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMRSCTLR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMRSCTLR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMRSCTLR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMRSCTLR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMRSCTLR7);
+ break;
+ case 8:
+ trc_write(state[i++], ETMRSCTLR8);
+ break;
+ case 9:
+ trc_write(state[i++], ETMRSCTLR9);
+ break;
+ case 10:
+ trc_write(state[i++], ETMRSCTLR10);
+ break;
+ case 11:
+ trc_write(state[i++], ETMRSCTLR11);
+ break;
+ case 12:
+ trc_write(state[i++], ETMRSCTLR12);
+ break;
+ case 13:
+ trc_write(state[i++], ETMRSCTLR13);
+ break;
+ case 14:
+ trc_write(state[i++], ETMRSCTLR14);
+ break;
+ case 15:
+ trc_write(state[i++], ETMRSCTLR15);
+ break;
+ case 16:
+ trc_write(state[i++], ETMRSCTLR16);
+ break;
+ case 17:
+ trc_write(state[i++], ETMRSCTLR17);
+ break;
+ case 18:
+ trc_write(state[i++], ETMRSCTLR18);
+ break;
+ case 19:
+ trc_write(state[i++], ETMRSCTLR19);
+ break;
+ case 20:
+ trc_write(state[i++], ETMRSCTLR20);
+ break;
+ case 21:
+ trc_write(state[i++], ETMRSCTLR21);
+ break;
+ case 22:
+ trc_write(state[i++], ETMRSCTLR22);
+ break;
+ case 23:
+ trc_write(state[i++], ETMRSCTLR23);
+ break;
+ case 24:
+ trc_write(state[i++], ETMRSCTLR24);
+ break;
+ case 25:
+ trc_write(state[i++], ETMRSCTLR25);
+ break;
+ case 26:
+ trc_write(state[i++], ETMRSCTLR26);
+ break;
+ case 27:
+ trc_write(state[i++], ETMRSCTLR27);
+ break;
+ case 28:
+ trc_write(state[i++], ETMRSCTLR28);
+ break;
+ case 29:
+ trc_write(state[i++], ETMRSCTLR29);
+ break;
+ case 30:
+ trc_write(state[i++], ETMRSCTLR30);
+ break;
+ case 31:
+ trc_write(state[i++], ETMRSCTLR31);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_acr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMACVR0);
+ trc_write(state[i++], ETMACATR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMACVR1);
+ trc_write(state[i++], ETMACATR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMACVR2);
+ trc_write(state[i++], ETMACATR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMACVR3);
+ trc_write(state[i++], ETMACATR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMACVR4);
+ trc_write(state[i++], ETMACATR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMACVR5);
+ trc_write(state[i++], ETMACATR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMACVR6);
+ trc_write(state[i++], ETMACATR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMACVR7);
+ trc_write(state[i++], ETMACATR7);
+ break;
+ case 8:
+ trc_write(state[i++], ETMACVR8);
+ trc_write(state[i++], ETMACATR8);
+ break;
+ case 9:
+ trc_write(state[i++], ETMACVR9);
+ trc_write(state[i++], ETMACATR9);
+ break;
+ case 10:
+ trc_write(state[i++], ETMACVR10);
+ trc_write(state[i++], ETMACATR10);
+ break;
+ case 11:
+ trc_write(state[i++], ETMACVR11);
+ trc_write(state[i++], ETMACATR11);
+ break;
+ case 12:
+ trc_write(state[i++], ETMACVR12);
+ trc_write(state[i++], ETMACATR12);
+ break;
+ case 13:
+ trc_write(state[i++], ETMACVR13);
+ trc_write(state[i++], ETMACATR13);
+ break;
+ case 14:
+ trc_write(state[i++], ETMACVR14);
+ trc_write(state[i++], ETMACATR14);
+ break;
+ case 15:
+ trc_write(state[i++], ETMACVR15);
+ trc_write(state[i++], ETMACATR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_dvcr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMDVCVR0);
+ trc_write(state[i++], ETMDVCMR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMDVCVR1);
+ trc_write(state[i++], ETMDVCMR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMDVCVR2);
+ trc_write(state[i++], ETMDVCMR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMDVCVR3);
+ trc_write(state[i++], ETMDVCMR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMDVCVR4);
+ trc_write(state[i++], ETMDVCMR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMDVCVR5);
+ trc_write(state[i++], ETMDVCMR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMDVCVR6);
+ trc_write(state[i++], ETMDVCMR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMDVCVR7);
+ trc_write(state[i++], ETMDVCMR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_ccvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMCIDCVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMCIDCVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMCIDCVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMCIDCVR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMCIDCVR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMCIDCVR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMCIDCVR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMCIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_vcvr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMVMIDCVR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMVMIDCVR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMVMIDCVR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMVMIDCVR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMVMIDCVR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMVMIDCVR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMVMIDCVR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMVMIDCVR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int etm_write_sscr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ trc_write(state[i++], ETMSSCCR0);
+ trc_write(state[i++], ETMSSCSR0);
+ trc_write(state[i++], ETMSSPCICR0);
+ break;
+ case 1:
+ trc_write(state[i++], ETMSSCCR1);
+ trc_write(state[i++], ETMSSCSR1);
+ trc_write(state[i++], ETMSSPCICR1);
+ break;
+ case 2:
+ trc_write(state[i++], ETMSSCCR2);
+ trc_write(state[i++], ETMSSCSR2);
+ trc_write(state[i++], ETMSSPCICR2);
+ break;
+ case 3:
+ trc_write(state[i++], ETMSSCCR3);
+ trc_write(state[i++], ETMSSCSR3);
+ trc_write(state[i++], ETMSSPCICR3);
+ break;
+ case 4:
+ trc_write(state[i++], ETMSSCCR4);
+ trc_write(state[i++], ETMSSCSR4);
+ trc_write(state[i++], ETMSSPCICR4);
+ break;
+ case 5:
+ trc_write(state[i++], ETMSSCCR5);
+ trc_write(state[i++], ETMSSCSR5);
+ trc_write(state[i++], ETMSSPCICR5);
+ break;
+ case 6:
+ trc_write(state[i++], ETMSSCCR6);
+ trc_write(state[i++], ETMSSCSR6);
+ trc_write(state[i++], ETMSSPCICR6);
+ break;
+ case 7:
+ trc_write(state[i++], ETMSSCCR7);
+ trc_write(state[i++], ETMSSCSR7);
+ trc_write(state[i++], ETMSSPCICR7);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void etm_si_restore_state(struct etm_ctx *etmdata)
+{
+ int i, j;
+
+ i = 0;
+
+ /* Vote for ETM power/clock enable */
+ etm_clk_enable();
+
+ switch (etmdata->arch) {
+ case ETM_ARCH_V4:
+ atomic_notifier_call_chain(&etm_restore_notifier_list, 0, NULL);
+
+ /* check OS lock is locked */
+ if (BVAL(trc_readl(ETMOSLSR), 1) != 1) {
+ pr_err_ratelimited("OS lock is unlocked\n");
+ trc_write(0x1, ETMOSLAR);
+ isb();
+ }
+
+ /* main control and configuration registers */
+ trc_write(etmdata->state[i++], ETMCONFIGR);
+ trc_write(etmdata->state[i++], ETMEVENTCTL0R);
+ trc_write(etmdata->state[i++], ETMEVENTCTL1R);
+ trc_write(etmdata->state[i++], ETMSTALLCTLR);
+ trc_write(etmdata->state[i++], ETMTSCTLR);
+ trc_write(etmdata->state[i++], ETMSYNCPR);
+ trc_write(etmdata->state[i++], ETMCCCTLR);
+ trc_write(etmdata->state[i++], ETMTRACEIDR);
+ /* filtering control registers */
+ trc_write(etmdata->state[i++], ETMVICTLR);
+ trc_write(etmdata->state[i++], ETMVIIECTLR);
+ trc_write(etmdata->state[i++], ETMVISSCTLR);
+ /* derived resources registers */
+ for (j = 0; j < etmdata->nr_seq_state-1; j++)
+ i = etm_write_ssxr(etmdata->state, i, j);
+ trc_write(etmdata->state[i++], ETMSEQRSTEVR);
+ trc_write(etmdata->state[i++], ETMSEQSTR);
+ trc_write(etmdata->state[i++], ETMEXTINSELR);
+ for (j = 0; j < etmdata->nr_cntr; j++)
+ i = etm_write_crxr(etmdata->state, i, j);
+ /* resource selection registers */
+ for (j = 0; j < etmdata->nr_resource; j++)
+ i = etm_write_rsxr(etmdata->state, i, j + 2);
+ /* comparator registers */
+ for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
+ i = etm_write_acr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_data_cmp; j++)
+ i = etm_write_dvcr(etmdata->state, i, j);
+ for (j = 0; j < etmdata->nr_ctxid_cmp; j++)
+ i = etm_write_ccvr(etmdata->state, i, j);
+ trc_write(etmdata->state[i++], ETMCIDCCTLR0);
+ for (j = 0; j < etmdata->nr_vmid_cmp; j++)
+ i = etm_write_vcvr(etmdata->state, i, j);
+ /* single-shot comparator registers */
+ for (j = 0; j < etmdata->nr_ss_cmp; j++)
+ i = etm_write_sscr(etmdata->state, i, j);
+ /* program ctrl register */
+ trc_write(etmdata->state[i++], ETMPRGCTLR);
+
+ isb();
+ trc_write(0x0, ETMOSLAR);
+ break;
+ default:
+ pr_err_ratelimited("unsupported etm arch %d in %s\n",
+ etmdata->arch, __func__);
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_clk_disable();
+}
+
+void msm_jtag_etm_save_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+ return;
+
+ if (etm[cpu]->save_restore_enabled) {
+ if (etm[cpu]->si_enable)
+ etm_si_save_state(etm[cpu]);
+ else
+ etm_mm_save_state(etm[cpu]);
+ }
+}
+EXPORT_SYMBOL(msm_jtag_etm_save_state);
+
+void msm_jtag_etm_restore_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ if (!etm[cpu] || etm[cpu]->save_restore_disabled)
+ return;
+
+ /*
+ * Check to ensure we attempt to restore only when save
+ * has been done is accomplished by callee function.
+ */
+ if (etm[cpu]->save_restore_enabled) {
+ if (etm[cpu]->si_enable)
+ etm_si_restore_state(etm[cpu]);
+ else
+ etm_mm_restore_state(etm[cpu]);
+ }
+}
+EXPORT_SYMBOL(msm_jtag_etm_restore_state);
+
+static inline bool etm_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case ETM_ARCH_V4:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void etm_os_lock_init(struct etm_ctx *etmdata)
+{
+ uint32_t etmoslsr;
+
+ etmoslsr = etm_readl(etmdata, TRCOSLSR);
+ if ((BVAL(etmoslsr, 0) == 0) && BVAL(etmoslsr, 3))
+ etmdata->os_lock_present = true;
+ else
+ etmdata->os_lock_present = false;
+}
+
+static void etm_init_arch_data(void *info)
+{
+ uint32_t val;
+ struct etm_ctx *etmdata = info;
+
+ ETM_UNLOCK(etmdata);
+
+ etm_os_lock_init(etmdata);
+
+ val = etm_readl(etmdata, TRCIDR1);
+ etmdata->arch = BMVAL(val, 4, 11);
+
+ /* number of resources trace unit supports */
+ val = etm_readl(etmdata, TRCIDR4);
+ etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
+ etmdata->nr_data_cmp = BMVAL(val, 4, 7);
+ etmdata->nr_resource = BMVAL(val, 16, 19);
+ etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
+ etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
+ etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
+
+ val = etm_readl(etmdata, TRCIDR5);
+ etmdata->nr_seq_state = BMVAL(val, 25, 27);
+ etmdata->nr_cntr = BMVAL(val, 28, 30);
+
+ ETM_LOCK(etmdata);
+}
+
+static int jtag_mm_etm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (!etm[cpu])
+ goto out;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ spin_lock(&etm[cpu]->spinlock);
+ if (!etm[cpu]->init) {
+ etm_init_arch_data(etm[cpu]);
+ etm[cpu]->init = true;
+ }
+ spin_unlock(&etm[cpu]->spinlock);
+ break;
+
+ case CPU_ONLINE:
+ mutex_lock(&etm[cpu]->mutex);
+ if (etm[cpu]->enable) {
+ mutex_unlock(&etm[cpu]->mutex);
+ goto out;
+ }
+ if (etm_arch_supported(etm[cpu]->arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
+ TZ_DBG_ETM_VER)
+ etm[cpu]->save_restore_enabled = true;
+ else
+ pr_info("etm save-restore supported by TZ\n");
+ } else
+ pr_info("etm arch %u not supported\n", etm[cpu]->arch);
+ etm[cpu]->enable = true;
+ mutex_unlock(&etm[cpu]->mutex);
+ break;
+ default:
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_mm_etm_notifier = {
+ .notifier_call = jtag_mm_etm_callback,
+};
+
+static bool skip_etm_save_restore(void)
+{
+ uint32_t id;
+ uint32_t version;
+
+ id = socinfo_get_id();
+ version = socinfo_get_version();
+
+ if (id == HW_SOC_ID_M8953 && SOCINFO_VERSION_MAJOR(version) == 1 &&
+ SOCINFO_VERSION_MINOR(version) == 0)
+ return true;
+
+ return false;
+}
+
+static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
+{
+ struct etm_ctx *etmdata;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ /* Allocate memory per cpu */
+ etmdata = devm_kzalloc(dev, sizeof(struct etm_ctx), GFP_KERNEL);
+ if (!etmdata)
+ return -ENOMEM;
+
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base");
+ if (!res)
+ return -ENODEV;
+
+ etmdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!etmdata->base)
+ return -EINVAL;
+
+ etmdata->si_enable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,si-enable");
+ etmdata->save_restore_disabled = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,save-restore-disable");
+
+ if (skip_etm_save_restore())
+ etmdata->save_restore_disabled = 1;
+
+ /* Allocate etm state save space per core */
+ etmdata->state = devm_kzalloc(dev,
+ MAX_ETM_STATE_SIZE * sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!etmdata->state)
+ return -ENOMEM;
+
+ spin_lock_init(&etmdata->spinlock);
+ mutex_init(&etmdata->mutex);
+
+ if (cnt++ == 0)
+ register_hotcpu_notifier(&jtag_mm_etm_notifier);
+
+ get_online_cpus();
+
+ if (!smp_call_function_single(cpu, etm_init_arch_data, etmdata,
+ 1))
+ etmdata->init = true;
+
+ etm[cpu] = etmdata;
+
+ put_online_cpus();
+
+ mutex_lock(&etmdata->mutex);
+ if (etmdata->init && !etmdata->enable) {
+ if (etm_arch_supported(etmdata->arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
+ TZ_DBG_ETM_VER)
+ etmdata->save_restore_enabled = true;
+ else
+ pr_info("etm save-restore supported by TZ\n");
+ } else
+ pr_info("etm arch %u not supported\n", etmdata->arch);
+ etmdata->enable = true;
+ }
+ mutex_unlock(&etmdata->mutex);
+ return 0;
+}
+
+static int jtag_mm_probe(struct platform_device *pdev)
+{
+ int ret, i, cpu = -1;
+ struct device *dev = &pdev->dev;
+ struct device_node *cpu_node;
+
+ if (msm_jtag_fuse_apps_access_disabled())
+ return -EPERM;
+
+ cpu_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,coresight-jtagmm-cpu", 0);
+ if (!cpu_node) {
+ dev_err(dev, "Jtag-mm cpu handle not specified\n");
+ return -ENODEV;
+ }
+ for_each_possible_cpu(i) {
+ if (cpu_node == of_get_cpu_node(i, NULL)) {
+ cpu = i;
+ break;
+ }
+ }
+ if (cpu == -1) {
+ dev_err(dev, "invalid Jtag-mm cpu handle\n");
+ return -EINVAL;
+ }
+
+ clock[cpu] = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(clock[cpu])) {
+ ret = PTR_ERR(clock[cpu]);
+ return ret;
+ }
+
+ ret = clk_set_rate(clock[cpu], CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(clock[cpu]);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, clock[cpu]);
+
+ ret = jtag_mm_etm_probe(pdev, cpu);
+ if (ret)
+ clk_disable_unprepare(clock[cpu]);
+ return ret;
+}
+
+static void jtag_mm_etm_remove(void)
+{
+ unregister_hotcpu_notifier(&jtag_mm_etm_notifier);
+}
+
+static int jtag_mm_remove(struct platform_device *pdev)
+{
+ struct clk *clock = platform_get_drvdata(pdev);
+
+ if (--cnt == 0)
+ jtag_mm_etm_remove();
+ clk_disable_unprepare(clock);
+ return 0;
+}
+
+static const struct of_device_id msm_qdss_mm_match[] = {
+ { .compatible = "qcom,jtagv8-mm"},
+ {}
+};
+
+static struct platform_driver jtag_mm_driver = {
+ .probe = jtag_mm_probe,
+ .remove = jtag_mm_remove,
+ .driver = {
+ .name = "msm-jtagv8-mm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_qdss_mm_match,
+ },
+};
+
+static int __init jtag_mm_init(void)
+{
+ return platform_driver_register(&jtag_mm_driver);
+}
+module_init(jtag_mm_init);
+
+static void __exit jtag_mm_exit(void)
+{
+ platform_driver_unregister(&jtag_mm_driver);
+}
+module_exit(jtag_mm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight DEBUGv8 and ETMv4 save-restore driver");
--- /dev/null
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/jtag.h>
+#ifdef CONFIG_ARM64
+#include <asm/debugv8.h>
+#else
+#include <asm/hardware/debugv8.h>
+#endif
+
+#define TIMEOUT_US (100)
+
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
+#ifdef CONFIG_ARM64
+#define ARM_DEBUG_ARCH_V8 (0x6)
+#endif
+
+#define MAX_DBG_REGS (66)
+#define MAX_DBG_STATE_SIZE (MAX_DBG_REGS * num_possible_cpus())
+
+#define OSLOCK_MAGIC (0xC5ACCE55)
+#define TZ_DBG_ETM_FEAT_ID (0x8)
+#define TZ_DBG_ETM_VER (0x400000)
+
+uint32_t msm_jtag_save_cntr[NR_CPUS];
+uint32_t msm_jtag_restore_cntr[NR_CPUS];
+
+/* access debug registers using system instructions */
+struct dbg_cpu_ctx {
+ uint32_t *state;
+};
+
+struct dbg_ctx {
+ uint8_t arch;
+ bool save_restore_enabled;
+ uint8_t nr_wp;
+ uint8_t nr_bp;
+ uint8_t nr_ctx_cmp;
+#ifdef CONFIG_ARM64
+ uint64_t *state;
+#else
+ uint32_t *state;
+#endif
+};
+
+static struct dbg_ctx dbg;
+static struct notifier_block jtag_hotcpu_save_notifier;
+static struct notifier_block jtag_hotcpu_restore_notifier;
+static struct notifier_block jtag_cpu_pm_notifier;
+
+#ifdef CONFIG_ARM64
+static int dbg_read_arch64_bxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_readq(DBGBVR0_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR0_EL1);
+ break;
+ case 1:
+ state[i++] = dbg_readq(DBGBVR1_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR1_EL1);
+ break;
+ case 2:
+ state[i++] = dbg_readq(DBGBVR2_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR2_EL1);
+ break;
+ case 3:
+ state[i++] = dbg_readq(DBGBVR3_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR3_EL1);
+ break;
+ case 4:
+ state[i++] = dbg_readq(DBGBVR4_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR4_EL1);
+ break;
+ case 5:
+ state[i++] = dbg_readq(DBGBVR5_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR5_EL1);
+ break;
+ case 6:
+ state[i++] = dbg_readq(DBGBVR6_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR6_EL1);
+ break;
+ case 7:
+ state[i++] = dbg_readq(DBGBVR7_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR7_EL1);
+ break;
+ case 8:
+ state[i++] = dbg_readq(DBGBVR8_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR8_EL1);
+ break;
+ case 9:
+ state[i++] = dbg_readq(DBGBVR9_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR9_EL1);
+ break;
+ case 10:
+ state[i++] = dbg_readq(DBGBVR10_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR10_EL1);
+ break;
+ case 11:
+ state[i++] = dbg_readq(DBGBVR11_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR11_EL1);
+ break;
+ case 12:
+ state[i++] = dbg_readq(DBGBVR12_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR12_EL1);
+ break;
+ case 13:
+ state[i++] = dbg_readq(DBGBVR13_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR13_EL1);
+ break;
+ case 14:
+ state[i++] = dbg_readq(DBGBVR14_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR14_EL1);
+ break;
+ case 15:
+ state[i++] = dbg_readq(DBGBVR15_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGBCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch64_bxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGBVR0_EL1);
+ dbg_write(state[i++], DBGBCR0_EL1);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGBVR1_EL1);
+ dbg_write(state[i++], DBGBCR1_EL1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGBVR2_EL1);
+ dbg_write(state[i++], DBGBCR2_EL1);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGBVR3_EL1);
+ dbg_write(state[i++], DBGBCR3_EL1);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGBVR4_EL1);
+ dbg_write(state[i++], DBGBCR4_EL1);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGBVR5_EL1);
+ dbg_write(state[i++], DBGBCR5_EL1);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGBVR6_EL1);
+ dbg_write(state[i++], DBGBCR6_EL1);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGBVR7_EL1);
+ dbg_write(state[i++], DBGBCR7_EL1);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGBVR8_EL1);
+ dbg_write(state[i++], DBGBCR8_EL1);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGBVR9_EL1);
+ dbg_write(state[i++], DBGBCR9_EL1);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGBVR10_EL1);
+ dbg_write(state[i++], DBGBCR10_EL1);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGBVR11_EL1);
+ dbg_write(state[i++], DBGBCR11_EL1);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGBVR12_EL1);
+ dbg_write(state[i++], DBGBCR12_EL1);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGBVR13_EL1);
+ dbg_write(state[i++], DBGBCR13_EL1);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGBVR14_EL1);
+ dbg_write(state[i++], DBGBCR14_EL1);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGBVR15_EL1);
+ dbg_write(state[i++], DBGBCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_read_arch64_wxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_readq(DBGWVR0_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR0_EL1);
+ break;
+ case 1:
+ state[i++] = dbg_readq(DBGWVR1_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR1_EL1);
+ break;
+ case 2:
+ state[i++] = dbg_readq(DBGWVR2_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR2_EL1);
+ break;
+ case 3:
+ state[i++] = dbg_readq(DBGWVR3_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR3_EL1);
+ break;
+ case 4:
+ state[i++] = dbg_readq(DBGWVR4_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR4_EL1);
+ break;
+ case 5:
+ state[i++] = dbg_readq(DBGWVR5_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR5_EL1);
+ break;
+ case 6:
+ state[i++] = dbg_readq(DBGWVR6_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR6_EL1);
+ break;
+ case 7:
+ state[i++] = dbg_readq(DBGWVR7_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR7_EL1);
+ break;
+ case 8:
+ state[i++] = dbg_readq(DBGWVR8_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR8_EL1);
+ break;
+ case 9:
+ state[i++] = dbg_readq(DBGWVR9_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR9_EL1);
+ break;
+ case 10:
+ state[i++] = dbg_readq(DBGWVR10_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR10_EL1);
+ break;
+ case 11:
+ state[i++] = dbg_readq(DBGWVR11_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR11_EL1);
+ break;
+ case 12:
+ state[i++] = dbg_readq(DBGWVR12_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR12_EL1);
+ break;
+ case 13:
+ state[i++] = dbg_readq(DBGWVR13_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR13_EL1);
+ break;
+ case 14:
+ state[i++] = dbg_readq(DBGWVR14_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR14_EL1);
+ break;
+ case 15:
+ state[i++] = dbg_readq(DBGWVR15_EL1);
+ state[i++] = (uint64_t)dbg_readl(DBGWCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch64_wxr(uint64_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGWVR0_EL1);
+ dbg_write(state[i++], DBGWCR0_EL1);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGWVR1_EL1);
+ dbg_write(state[i++], DBGWCR1_EL1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGWVR2_EL1);
+ dbg_write(state[i++], DBGWCR2_EL1);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGWVR3_EL1);
+ dbg_write(state[i++], DBGWCR3_EL1);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGWVR4_EL1);
+ dbg_write(state[i++], DBGWCR4_EL1);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGWVR5_EL1);
+ dbg_write(state[i++], DBGWCR5_EL1);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGWVR0_EL1);
+ dbg_write(state[i++], DBGWCR6_EL1);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGWVR7_EL1);
+ dbg_write(state[i++], DBGWCR7_EL1);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGWVR8_EL1);
+ dbg_write(state[i++], DBGWCR8_EL1);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGWVR9_EL1);
+ dbg_write(state[i++], DBGWCR9_EL1);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGWVR10_EL1);
+ dbg_write(state[i++], DBGWCR10_EL1);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGWVR11_EL1);
+ dbg_write(state[i++], DBGWCR11_EL1);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGWVR12_EL1);
+ dbg_write(state[i++], DBGWCR12_EL1);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGWVR13_EL1);
+ dbg_write(state[i++], DBGWCR13_EL1);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGWVR14_EL1);
+ dbg_write(state[i++], DBGWCR14_EL1);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGWVR15_EL1);
+ dbg_write(state[i++], DBGWCR15_EL1);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Set OS Lock to inform the debugger that the OS is in the
+ * process of saving debug registers. It prevents accidental
+ * modification of the debug regs by the external debugger.
+ */
+ dbg_write(0x1, OSLAR_EL1);
+ /* Ensure OS lock is set before proceeding */
+ isb();
+
+ dbg.state[i++] = (uint32_t)dbg_readl(MDSCR_EL1);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_read_arch64_bxr((uint64_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_read_arch64_wxr((uint64_t *)dbg.state, i, j);
+ dbg.state[i++] = (uint32_t)dbg_readl(MDCCINT_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(DBGCLAIMCLR_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSECCR_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSDTRRX_EL1);
+ dbg.state[i++] = (uint32_t)dbg_readl(OSDTRTX_EL1);
+
+ /* Set the OS double lock */
+ isb();
+ dbg_write(0x1, OSDLR_EL1);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Clear the OS double lock */
+ isb();
+ dbg_write(0x0, OSDLR_EL1);
+ isb();
+
+ /* Set OS lock. Lock will already be set after power collapse
+ * but this write is included to ensure it is set.
+ */
+ dbg_write(0x1, OSLAR_EL1);
+ isb();
+
+ dbg_write(dbg.state[i++], MDSCR_EL1);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_write_arch64_bxr((uint64_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_write_arch64_wxr((uint64_t *)dbg.state, i, j);
+ dbg_write(dbg.state[i++], MDCCINT_EL1);
+ dbg_write(dbg.state[i++], DBGCLAIMSET_EL1);
+ dbg_write(dbg.state[i++], OSECCR_EL1);
+ dbg_write(dbg.state[i++], OSDTRRX_EL1);
+ dbg_write(dbg.state[i++], OSDTRTX_EL1);
+
+ isb();
+ dbg_write(0x0, OSLAR_EL1);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static void dbg_init_arch_data(void)
+{
+ uint64_t dbgfr;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbgfr = dbg_readq(ID_AA64DFR0_EL1);
+ dbg.arch = BMVAL(dbgfr, 0, 3);
+ dbg.nr_bp = BMVAL(dbgfr, 12, 15) + 1;
+ dbg.nr_wp = BMVAL(dbgfr, 20, 23) + 1;
+ dbg.nr_ctx_cmp = BMVAL(dbgfr, 28, 31) + 1;
+}
+#else
+
+static int dbg_read_arch32_bxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_read(DBGBVR0);
+ state[i++] = dbg_read(DBGBCR0);
+ break;
+ case 1:
+ state[i++] = dbg_read(DBGBVR1);
+ state[i++] = dbg_read(DBGBCR1);
+ break;
+ case 2:
+ state[i++] = dbg_read(DBGBVR2);
+ state[i++] = dbg_read(DBGBCR2);
+ break;
+ case 3:
+ state[i++] = dbg_read(DBGBVR3);
+ state[i++] = dbg_read(DBGBCR3);
+ break;
+ case 4:
+ state[i++] = dbg_read(DBGBVR4);
+ state[i++] = dbg_read(DBGBCR4);
+ break;
+ case 5:
+ state[i++] = dbg_read(DBGBVR5);
+ state[i++] = dbg_read(DBGBCR5);
+ break;
+ case 6:
+ state[i++] = dbg_read(DBGBVR6);
+ state[i++] = dbg_read(DBGBCR6);
+ break;
+ case 7:
+ state[i++] = dbg_read(DBGBVR7);
+ state[i++] = dbg_read(DBGBCR7);
+ break;
+ case 8:
+ state[i++] = dbg_read(DBGBVR8);
+ state[i++] = dbg_read(DBGBCR8);
+ break;
+ case 9:
+ state[i++] = dbg_read(DBGBVR9);
+ state[i++] = dbg_read(DBGBCR9);
+ break;
+ case 10:
+ state[i++] = dbg_read(DBGBVR10);
+ state[i++] = dbg_read(DBGBCR10);
+ break;
+ case 11:
+ state[i++] = dbg_read(DBGBVR11);
+ state[i++] = dbg_read(DBGBCR11);
+ break;
+ case 12:
+ state[i++] = dbg_read(DBGBVR12);
+ state[i++] = dbg_read(DBGBCR12);
+ break;
+ case 13:
+ state[i++] = dbg_read(DBGBVR13);
+ state[i++] = dbg_read(DBGBCR13);
+ break;
+ case 14:
+ state[i++] = dbg_read(DBGBVR14);
+ state[i++] = dbg_read(DBGBCR14);
+ break;
+ case 15:
+ state[i++] = dbg_read(DBGBVR15);
+ state[i++] = dbg_read(DBGBCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch32_bxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGBVR0);
+ dbg_write(state[i++], DBGBCR0);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGBVR1);
+ dbg_write(state[i++], DBGBCR1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGBVR2);
+ dbg_write(state[i++], DBGBCR2);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGBVR3);
+ dbg_write(state[i++], DBGBCR3);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGBVR4);
+ dbg_write(state[i++], DBGBCR4);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGBVR5);
+ dbg_write(state[i++], DBGBCR5);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGBVR6);
+ dbg_write(state[i++], DBGBCR6);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGBVR7);
+ dbg_write(state[i++], DBGBCR7);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGBVR8);
+ dbg_write(state[i++], DBGBCR8);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGBVR9);
+ dbg_write(state[i++], DBGBCR9);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGBVR10);
+ dbg_write(state[i++], DBGBCR10);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGBVR11);
+ dbg_write(state[i++], DBGBCR11);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGBVR12);
+ dbg_write(state[i++], DBGBCR12);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGBVR13);
+ dbg_write(state[i++], DBGBCR13);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGBVR14);
+ dbg_write(state[i++], DBGBCR14);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGBVR15);
+ dbg_write(state[i++], DBGBCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_read_arch32_wxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ state[i++] = dbg_read(DBGWVR0);
+ state[i++] = dbg_read(DBGWCR0);
+ break;
+ case 1:
+ state[i++] = dbg_read(DBGWVR1);
+ state[i++] = dbg_read(DBGWCR1);
+ break;
+ case 2:
+ state[i++] = dbg_read(DBGWVR2);
+ state[i++] = dbg_read(DBGWCR2);
+ break;
+ case 3:
+ state[i++] = dbg_read(DBGWVR3);
+ state[i++] = dbg_read(DBGWCR3);
+ break;
+ case 4:
+ state[i++] = dbg_read(DBGWVR4);
+ state[i++] = dbg_read(DBGWCR4);
+ break;
+ case 5:
+ state[i++] = dbg_read(DBGWVR5);
+ state[i++] = dbg_read(DBGWCR5);
+ break;
+ case 6:
+ state[i++] = dbg_read(DBGWVR6);
+ state[i++] = dbg_read(DBGWCR6);
+ break;
+ case 7:
+ state[i++] = dbg_read(DBGWVR7);
+ state[i++] = dbg_read(DBGWCR7);
+ break;
+ case 8:
+ state[i++] = dbg_read(DBGWVR8);
+ state[i++] = dbg_read(DBGWCR8);
+ break;
+ case 9:
+ state[i++] = dbg_read(DBGWVR9);
+ state[i++] = dbg_read(DBGWCR9);
+ break;
+ case 10:
+ state[i++] = dbg_read(DBGWVR10);
+ state[i++] = dbg_read(DBGWCR10);
+ break;
+ case 11:
+ state[i++] = dbg_read(DBGWVR11);
+ state[i++] = dbg_read(DBGWCR11);
+ break;
+ case 12:
+ state[i++] = dbg_read(DBGWVR12);
+ state[i++] = dbg_read(DBGWCR12);
+ break;
+ case 13:
+ state[i++] = dbg_read(DBGWVR13);
+ state[i++] = dbg_read(DBGWCR13);
+ break;
+ case 14:
+ state[i++] = dbg_read(DBGWVR14);
+ state[i++] = dbg_read(DBGWCR14);
+ break;
+ case 15:
+ state[i++] = dbg_read(DBGWVR15);
+ state[i++] = dbg_read(DBGWCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static int dbg_write_arch32_wxr(uint32_t *state, int i, int j)
+{
+ switch (j) {
+ case 0:
+ dbg_write(state[i++], DBGWVR0);
+ dbg_write(state[i++], DBGWCR0);
+ break;
+ case 1:
+ dbg_write(state[i++], DBGWVR1);
+ dbg_write(state[i++], DBGWCR1);
+ break;
+ case 2:
+ dbg_write(state[i++], DBGWVR2);
+ dbg_write(state[i++], DBGWCR2);
+ break;
+ case 3:
+ dbg_write(state[i++], DBGWVR3);
+ dbg_write(state[i++], DBGWCR3);
+ break;
+ case 4:
+ dbg_write(state[i++], DBGWVR4);
+ dbg_write(state[i++], DBGWCR4);
+ break;
+ case 5:
+ dbg_write(state[i++], DBGWVR5);
+ dbg_write(state[i++], DBGWCR5);
+ break;
+ case 6:
+ dbg_write(state[i++], DBGWVR6);
+ dbg_write(state[i++], DBGWCR6);
+ break;
+ case 7:
+ dbg_write(state[i++], DBGWVR7);
+ dbg_write(state[i++], DBGWCR7);
+ break;
+ case 8:
+ dbg_write(state[i++], DBGWVR8);
+ dbg_write(state[i++], DBGWCR8);
+ break;
+ case 9:
+ dbg_write(state[i++], DBGWVR9);
+ dbg_write(state[i++], DBGWCR9);
+ break;
+ case 10:
+ dbg_write(state[i++], DBGWVR10);
+ dbg_write(state[i++], DBGWCR10);
+ break;
+ case 11:
+ dbg_write(state[i++], DBGWVR11);
+ dbg_write(state[i++], DBGWCR11);
+ break;
+ case 12:
+ dbg_write(state[i++], DBGWVR12);
+ dbg_write(state[i++], DBGWCR12);
+ break;
+ case 13:
+ dbg_write(state[i++], DBGWVR13);
+ dbg_write(state[i++], DBGWCR13);
+ break;
+ case 14:
+ dbg_write(state[i++], DBGWVR14);
+ dbg_write(state[i++], DBGWCR14);
+ break;
+ case 15:
+ dbg_write(state[i++], DBGWVR15);
+ dbg_write(state[i++], DBGWCR15);
+ break;
+ default:
+ pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
+ }
+ return i;
+}
+
+static inline void dbg_save_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Set OS Lock to inform the debugger that the OS is in the
+ * process of saving debug registers. It prevents accidental
+ * modification of the debug regs by the external debugger.
+ */
+ dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+ /* Ensure OS lock is set before proceeding */
+ isb();
+
+ dbg.state[i++] = dbg_read(DBGDSCRext);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_read_arch32_bxr(dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_read_arch32_wxr(dbg.state, i, j);
+ dbg.state[i++] = dbg_read(DBGDCCINT);
+ dbg.state[i++] = dbg_read(DBGCLAIMCLR);
+ dbg.state[i++] = dbg_read(DBGOSECCR);
+ dbg.state[i++] = dbg_read(DBGDTRRXext);
+ dbg.state[i++] = dbg_read(DBGDTRTXext);
+
+ /* Set the OS double lock */
+ isb();
+ dbg_write(0x1, DBGOSDLR);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static inline void dbg_restore_state(int cpu)
+{
+ int i, j;
+
+ i = cpu * MAX_DBG_REGS;
+
+ switch (dbg.arch) {
+ case ARM_DEBUG_ARCH_V8:
+ /* Clear the OS double lock */
+ isb();
+ dbg_write(0x0, DBGOSDLR);
+ isb();
+
+ /* Set OS lock. Lock will already be set after power collapse
+ * but this write is included to ensure it is set.
+ */
+ dbg_write(OSLOCK_MAGIC, DBGOSLAR);
+ isb();
+
+ dbg_write(dbg.state[i++], DBGDSCRext);
+ for (j = 0; j < dbg.nr_bp; j++)
+ i = dbg_write_arch32_bxr((uint32_t *)dbg.state, i, j);
+ for (j = 0; j < dbg.nr_wp; j++)
+ i = dbg_write_arch32_wxr((uint32_t *)dbg.state, i, j);
+ dbg_write(dbg.state[i++], DBGDCCINT);
+ dbg_write(dbg.state[i++], DBGCLAIMSET);
+ dbg_write(dbg.state[i++], DBGOSECCR);
+ dbg_write(dbg.state[i++], DBGDTRRXext);
+ dbg_write(dbg.state[i++], DBGDTRTXext);
+
+ isb();
+ dbg_write(0x0, DBGOSLAR);
+ isb();
+ break;
+ default:
+ pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
+ __func__);
+ }
+}
+
+static void dbg_init_arch_data(void)
+{
+ uint32_t dbgdidr;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbgdidr = dbg_read(DBGDIDR);
+ dbg.arch = BMVAL(dbgdidr, 16, 19);
+ dbg.nr_ctx_cmp = BMVAL(dbgdidr, 20, 23) + 1;
+ dbg.nr_bp = BMVAL(dbgdidr, 24, 27) + 1;
+ dbg.nr_wp = BMVAL(dbgdidr, 28, 31) + 1;
+}
+#endif
+
+/*
+ * msm_jtag_save_state - save debug registers
+ *
+ * Debug registers are saved before power collapse if debug
+ * architecture is supported respectively and TZ isn't supporting
+ * the save and restore of debug registers.
+ *
+ * CONTEXT:
+ * Called with preemption off and interrupts locked from:
+ * 1. per_cpu idle thread context for idle power collapses
+ * or
+ * 2. per_cpu idle thread context for hotplug/suspend power collapse
+ * for nonboot cpus
+ * or
+ * 3. suspend thread context for suspend power collapse for core0
+ *
+ * In all cases we will run on the same cpu for the entire duration.
+ */
+void msm_jtag_save_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ msm_jtag_save_cntr[cpu]++;
+ /* ensure counter is updated before moving forward */
+ mb();
+
+ msm_jtag_etm_save_state();
+ if (dbg.save_restore_enabled)
+ dbg_save_state(cpu);
+}
+EXPORT_SYMBOL(msm_jtag_save_state);
+
+void msm_jtag_restore_state(void)
+{
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+
+ /* Attempt restore only if save has been done. If power collapse
+ * is disabled, hotplug off of non-boot core will result in WFI
+ * and hence msm_jtag_save_state will not occur. Subsequently,
+ * during hotplug on of non-boot core when msm_jtag_restore_state
+ * is called via msm_platform_secondary_init, this check will help
+ * bail us out without restoring.
+ */
+ if (msm_jtag_save_cntr[cpu] == msm_jtag_restore_cntr[cpu])
+ return;
+ else if (msm_jtag_save_cntr[cpu] != msm_jtag_restore_cntr[cpu] + 1)
+ pr_err_ratelimited("jtag imbalance, save:%lu, restore:%lu\n",
+ (unsigned long)msm_jtag_save_cntr[cpu],
+ (unsigned long)msm_jtag_restore_cntr[cpu]);
+
+ msm_jtag_restore_cntr[cpu]++;
+ /* ensure counter is updated before moving forward */
+ mb();
+
+ if (dbg.save_restore_enabled)
+ dbg_restore_state(cpu);
+ msm_jtag_etm_restore_state();
+}
+EXPORT_SYMBOL(msm_jtag_restore_state);
+
+static inline bool dbg_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case ARM_DEBUG_ARCH_V8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int jtag_hotcpu_save_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ msm_jtag_save_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_save_notifier = {
+ .notifier_call = jtag_hotcpu_save_callback,
+};
+
+static int jtag_hotcpu_restore_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ msm_jtag_restore_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_hotcpu_restore_notifier = {
+ .notifier_call = jtag_hotcpu_restore_callback,
+ .priority = 1,
+};
+
+static int jtag_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ msm_jtag_save_state();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ msm_jtag_restore_state();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block jtag_cpu_pm_notifier = {
+ .notifier_call = jtag_cpu_pm_callback,
+};
+
+static int __init msm_jtag_dbg_init(void)
+{
+ int ret;
+
+ if (msm_jtag_fuse_apps_access_disabled())
+ return -EPERM;
+
+ /* This will run on core0 so use it to populate parameters */
+ dbg_init_arch_data();
+
+ if (dbg_arch_supported(dbg.arch)) {
+ if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
+ dbg.save_restore_enabled = true;
+ } else {
+ pr_info("dbg save-restore supported by TZ\n");
+ goto dbg_out;
+ }
+ } else {
+ pr_info("dbg arch %u not supported\n", dbg.arch);
+ goto dbg_out;
+ }
+
+ /* Allocate dbg state save space */
+#ifdef CONFIG_ARM64
+ dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint64_t), GFP_KERNEL);
+#else
+ dbg.state = kcalloc(MAX_DBG_STATE_SIZE, sizeof(uint32_t), GFP_KERNEL);
+#endif
+ if (!dbg.state) {
+ ret = -ENOMEM;
+ goto dbg_err;
+ }
+
+ register_hotcpu_notifier(&jtag_hotcpu_save_notifier);
+ register_hotcpu_notifier(&jtag_hotcpu_restore_notifier);
+ cpu_pm_register_notifier(&jtag_cpu_pm_notifier);
+dbg_out:
+ return 0;
+dbg_err:
+ return ret;
+}
+arch_initcall(msm_jtag_dbg_init);