aboutsummaryrefslogtreecommitdiff
path: root/plat/nvidia
diff options
context:
space:
mode:
Diffstat (limited to 'plat/nvidia')
-rw-r--r--plat/nvidia/tegra/common/aarch64/tegra_helpers.S454
-rw-r--r--plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c217
-rw-r--r--plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c206
-rw-r--r--plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c677
-rw-r--r--plat/nvidia/tegra/common/drivers/pmc/pmc.c112
-rw-r--r--plat/nvidia/tegra/common/drivers/smmu/smmu.c160
-rw-r--r--plat/nvidia/tegra/common/tegra_bl31_setup.c381
-rw-r--r--plat/nvidia/tegra/common/tegra_common.mk29
-rw-r--r--plat/nvidia/tegra/common/tegra_delay_timer.c30
-rw-r--r--plat/nvidia/tegra/common/tegra_fiq_glue.c139
-rw-r--r--plat/nvidia/tegra/common/tegra_gic.c332
-rw-r--r--plat/nvidia/tegra/common/tegra_platform.c151
-rw-r--r--plat/nvidia/tegra/common/tegra_pm.c384
-rw-r--r--plat/nvidia/tegra/common/tegra_sip_calls.c182
-rw-r--r--plat/nvidia/tegra/common/tegra_topology.c46
-rw-r--r--plat/nvidia/tegra/include/drivers/flowctrl.h61
-rw-r--r--plat/nvidia/tegra/include/drivers/mce.h76
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl.h17
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl_v1.h56
-rw-r--r--plat/nvidia/tegra/include/drivers/memctrl_v2.h478
-rw-r--r--plat/nvidia/tegra/include/drivers/pmc.h43
-rw-r--r--plat/nvidia/tegra/include/drivers/smmu.h708
-rw-r--r--plat/nvidia/tegra/include/plat_macros.S61
-rw-r--r--plat/nvidia/tegra/include/platform_def.h68
-rw-r--r--plat/nvidia/tegra/include/t132/tegra_def.h102
-rw-r--r--plat/nvidia/tegra/include/t186/tegra_def.h250
-rw-r--r--plat/nvidia/tegra/include/t210/tegra_def.h127
-rw-r--r--plat/nvidia/tegra/include/tegra_platform.h34
-rw-r--r--plat/nvidia/tegra/include/tegra_private.h103
-rw-r--r--plat/nvidia/tegra/platform.mk39
-rw-r--r--plat/nvidia/tegra/soc/t132/plat_psci_handlers.c147
-rw-r--r--plat/nvidia/tegra/soc/t132/plat_secondary.c73
-rw-r--r--plat/nvidia/tegra/soc/t132/plat_setup.c95
-rw-r--r--plat/nvidia/tegra/soc/t132/plat_sip_calls.c73
-rw-r--r--plat/nvidia/tegra/soc/t132/platform_t132.mk28
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h261
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h437
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S31
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/ari.c558
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/mce.c523
-rw-r--r--plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c252
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_memctrl.c221
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_psci_handlers.c400
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_secondary.c71
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_setup.c276
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_sip_calls.c180
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_smmu.c326
-rw-r--r--plat/nvidia/tegra/soc/t186/plat_trampoline.S82
-rw-r--r--plat/nvidia/tegra/soc/t186/platform_t186.mk66
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_psci_handlers.c238
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_secondary.c40
-rw-r--r--plat/nvidia/tegra/soc/t210/plat_setup.c101
-rw-r--r--plat/nvidia/tegra/soc/t210/platform_t210.mk45
53 files changed, 10247 insertions, 0 deletions
diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
new file mode 100644
index 00000000..3c490d07
--- /dev/null
+++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <platform_def.h>
+#include <tegra_def.h>
+
+#define MIDR_PN_CORTEX_A57 0xD07
+
+/*******************************************************************************
+ * Implementation defined ACTLR_EL3 bit definitions
+ ******************************************************************************/
+#define ACTLR_EL3_L2ACTLR_BIT (1 << 6)
+#define ACTLR_EL3_L2ECTLR_BIT (1 << 5)
+#define ACTLR_EL3_L2CTLR_BIT (1 << 4)
+#define ACTLR_EL3_CPUECTLR_BIT (1 << 1)
+#define ACTLR_EL3_CPUACTLR_BIT (1 << 0)
+#define ACTLR_EL3_ENABLE_ALL_ACCESS (ACTLR_EL3_L2ACTLR_BIT | \
+ ACTLR_EL3_L2ECTLR_BIT | \
+ ACTLR_EL3_L2CTLR_BIT | \
+ ACTLR_EL3_CPUECTLR_BIT | \
+ ACTLR_EL3_CPUACTLR_BIT)
+
+ /* Global functions */
+ .globl plat_is_my_cpu_primary
+ .globl plat_my_core_pos
+ .globl plat_get_my_entrypoint
+ .globl plat_secondary_cold_boot_setup
+ .globl platform_mem_init
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl tegra_secure_entrypoint
+ .globl plat_reset_handler
+
+ /* Global variables */
+ .globl tegra_sec_entry_point
+ .globl ns_image_entrypoint
+ .globl tegra_bl31_phys_base
+ .globl tegra_console_base
+ .globl tegra_enable_l2_ecc_parity_prot
+
+ /* ---------------------
+ * Common CPU init code
+ * ---------------------
+ */
+.macro cpu_init_common
+
+ /* ------------------------------------------------
+ * We enable procesor retention, L2/CPUECTLR NS
+ * access and ECC/Parity protection for A57 CPUs
+ * ------------------------------------------------
+ */
+ mrs x0, midr_el1
+ mov x1, #(MIDR_PN_MASK << MIDR_PN_SHIFT)
+ and x0, x0, x1
+ lsr x0, x0, #MIDR_PN_SHIFT
+ cmp x0, #MIDR_PN_CORTEX_A57
+ b.ne 1f
+
+ /* ---------------------------
+ * Enable processor retention
+ * ---------------------------
+ */
+ mrs x0, CORTEX_A57_L2ECTLR_EL1
+ mov x1, #RETENTION_ENTRY_TICKS_512
+ bic x0, x0, #CORTEX_A57_L2ECTLR_RET_CTRL_MASK
+ orr x0, x0, x1
+ msr CORTEX_A57_L2ECTLR_EL1, x0
+ isb
+
+ mrs x0, CORTEX_A57_ECTLR_EL1
+ mov x1, #RETENTION_ENTRY_TICKS_512
+ bic x0, x0, #CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK
+ orr x0, x0, x1
+ msr CORTEX_A57_ECTLR_EL1, x0
+ isb
+
+ /* -------------------------------------------------------
+ * Enable L2 and CPU ECTLR RW access from non-secure world
+ * -------------------------------------------------------
+ */
+ mov x0, #ACTLR_EL3_ENABLE_ALL_ACCESS
+ msr actlr_el3, x0
+ msr actlr_el2, x0
+ isb
+
+ /* -------------------------------------------------------
+ * Enable L2 ECC and Parity Protection
+ * -------------------------------------------------------
+ */
+ adr x0, tegra_enable_l2_ecc_parity_prot
+ ldr x0, [x0]
+ cbz x0, 1f
+ mrs x0, CORTEX_A57_L2CTLR_EL1
+ and x1, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
+ cbnz x1, 1f
+ orr x0, x0, #CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT
+ msr CORTEX_A57_L2CTLR_EL1, x0
+ isb
+
+ /* --------------------------------
+ * Enable the cycle count register
+ * --------------------------------
+ */
+1: mrs x0, pmcr_el0
+ ubfx x0, x0, #11, #5 // read PMCR.N field
+ mov x1, #1
+ lsl x0, x1, x0
+ sub x0, x0, #1 // mask of event counters
+ orr x0, x0, #0x80000000 // disable overflow intrs
+ msr pmintenclr_el1, x0
+ msr pmuserenr_el0, x1 // enable user mode access
+
+ /* ----------------------------------------------------------------
+ * Allow non-privileged access to CNTVCT: Set CNTKCTL (Kernel Count
+ * register), bit 1 (EL0VCTEN) to enable access to CNTVCT/CNTFRQ
+ * registers from EL0.
+ * ----------------------------------------------------------------
+ */
+ mrs x0, cntkctl_el1
+ orr x0, x0, #EL0VCTEN_BIT
+ msr cntkctl_el1, x0
+.endm
+
+ /* -----------------------------------------------------
+ * unsigned int plat_is_my_cpu_primary(void);
+ *
+ * This function checks if this is the Primary CPU
+ * -----------------------------------------------------
+ */
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+ cmp x0, #TEGRA_PRIMARY_CPU
+ cset x0, eq
+ ret
+endfunc plat_is_my_cpu_primary
+
+ /* -----------------------------------------------------
+ * unsigned int plat_my_core_pos(void);
+ *
+ * result: CorePos = CoreId + (ClusterId << 2)
+ * -----------------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned long plat_get_my_entrypoint (void);
+ *
+ * Main job of this routine is to distinguish between
+ * a cold and warm boot. If the tegra_sec_entry_point for
+ * this CPU is present, then it's a warm boot.
+ *
+ * -----------------------------------------------------
+ */
+func plat_get_my_entrypoint
+ adr x1, tegra_sec_entry_point
+ ldr x0, [x1]
+ ret
+endfunc plat_get_my_entrypoint
+
+ /* -----------------------------------------------------
+ * int platform_get_core_pos(int mpidr);
+ *
+ * With this function: CorePos = (ClusterId * 4) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func platform_get_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc platform_get_core_pos
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset. Right
+ * now this is a stub function.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ mov x0, #0
+ ret
+endfunc plat_secondary_cold_boot_setup
+
+ /* --------------------------------------------------------
+ * void platform_mem_init (void);
+ *
+ * Any memory init, relocation to be done before the
+ * platform boots. Called very early in the boot process.
+ * --------------------------------------------------------
+ */
+func platform_mem_init
+ mov x0, #0
+ ret
+endfunc platform_mem_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov x0, #0
+ adr x1, tegra_console_base
+ ldr x1, [x1]
+ cbz x1, 1f
+ mov w0, #1
+1: ret
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(void)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ adr x1, tegra_console_base
+ ldr x1, [x1]
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------------
+ * Function to handle a platform reset and store
+ * input parameters passed by BL2.
+ * ---------------------------------------------------
+ */
+func plat_reset_handler
+
+ /* ----------------------------------------------------
+ * Verify if we are running from BL31_BASE address
+ * ----------------------------------------------------
+ */
+ adr x18, bl31_entrypoint
+ mov x17, #BL31_BASE
+ cmp x18, x17
+ b.eq 1f
+
+ /* ----------------------------------------------------
+ * Copy the entire BL31 code to BL31_BASE if we are not
+ * running from it already
+ * ----------------------------------------------------
+ */
+ mov x0, x17
+ mov x1, x18
+ mov x2, #BL31_SIZE
+_loop16:
+ cmp x2, #16
+ b.lo _loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b _loop16
+ /* copy byte per byte */
+_loop1:
+ cbz x2, _end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne _loop1
+
+ /* ----------------------------------------------------
+ * Jump to BL31_BASE and start execution again
+ * ----------------------------------------------------
+ */
+_end: mov x0, x20
+ mov x1, x21
+ br x17
+1:
+
+ /* -----------------------------------
+ * derive and save the phys_base addr
+ * -----------------------------------
+ */
+ adr x17, tegra_bl31_phys_base
+ ldr x18, [x17]
+ cbnz x18, 1f
+ adr x18, bl31_entrypoint
+ str x18, [x17]
+
+1: cpu_init_common
+
+ ret
+endfunc plat_reset_handler
+
+ /* ----------------------------------------
+ * Secure entrypoint function for CPU boot
+ * ----------------------------------------
+ */
+func tegra_secure_entrypoint _align=6
+
+#if ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT
+
+ /* -------------------------------------------------------
+ * Invalidate BTB along with I$ to remove any stale
+ * entries from the branch predictor array.
+ * -------------------------------------------------------
+ */
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ orr x0, x0, #1
+ msr CORTEX_A57_CPUACTLR_EL1, x0 /* invalidate BTB and I$ together */
+ dsb sy
+ isb
+ ic iallu /* actual invalidate */
+ dsb sy
+ isb
+
+ mrs x0, CORTEX_A57_CPUACTLR_EL1
+ bic x0, x0, #1
+ msr CORTEX_A57_CPUACTLR_EL1, X0 /* restore original CPUACTLR_EL1 */
+ dsb sy
+ isb
+
+ .rept 7
+ nop /* wait */
+ .endr
+
+ /* -----------------------------------------------
+ * Extract OSLK bit and check if it is '1'. This
+ * bit remains '0' for A53 on warm-resets. If '1',
+ * turn off regional clock gating and request warm
+ * reset.
+ * -----------------------------------------------
+ */
+ mrs x0, oslsr_el1
+ and x0, x0, #2
+ mrs x1, mpidr_el1
+ bics xzr, x0, x1, lsr #7 /* 0 = slow cluster or warm reset */
+ b.eq restore_oslock
+ mov x0, xzr
+ msr oslar_el1, x0 /* os lock stays 0 across warm reset */
+ mov x3, #3
+ movz x4, #0x8000, lsl #48
+ msr CORTEX_A57_CPUACTLR_EL1, x4 /* turn off RCG */
+ isb
+ msr rmr_el3, x3 /* request warm reset */
+ isb
+ dsb sy
+1: wfi
+ b 1b
+
+ /* --------------------------------------------------
+ * These nops are here so that speculative execution
+ * won't harm us before we are done with warm reset.
+ * --------------------------------------------------
+ */
+ .rept 65
+ nop
+ .endr
+
+ /* --------------------------------------------------
+ * Do not insert instructions here
+ * --------------------------------------------------
+ */
+#endif
+
+ /* --------------------------------------------------
+ * Restore OS Lock bit
+ * --------------------------------------------------
+ */
+restore_oslock:
+ mov x0, #1
+ msr oslar_el1, x0
+
+ cpu_init_common
+
+ /* ---------------------------------------------------------------------
+ * The initial state of the Architectural feature trap register
+ * (CPTR_EL3) is unknown and it must be set to a known state. All
+ * feature traps are disabled. Some bits in this register are marked as
+ * Reserved and should not be modified.
+ *
+ * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1
+ * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+ * CPTR_EL3.TTA: This causes access to the Trace functionality to trap
+ * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register
+ * access to trace functionality is not supported, this bit is RES0.
+ * CPTR_EL3.TFP: This causes instructions that access the registers
+ * associated with Floating Point and Advanced SIMD execution to trap
+ * to EL3 when executed from any exception level, unless trapped to EL1
+ * or EL2.
+ * ---------------------------------------------------------------------
+ */
+ mrs x1, cptr_el3
+ bic w1, w1, #TCPAC_BIT
+ bic w1, w1, #TTA_BIT
+ bic w1, w1, #TFP_BIT
+ msr cptr_el3, x1
+
+ /* --------------------------------------------------
+ * Get secure world's entry point and jump to it
+ * --------------------------------------------------
+ */
+ bl plat_get_my_entrypoint
+ br x0
+endfunc tegra_secure_entrypoint
+
+ .data
+ .align 3
+
+ /* --------------------------------------------------
+ * CPU Secure entry point - resume from suspend
+ * --------------------------------------------------
+ */
+tegra_sec_entry_point:
+ .quad 0
+
+ /* --------------------------------------------------
+ * NS world's cold boot entry point
+ * --------------------------------------------------
+ */
+ns_image_entrypoint:
+ .quad 0
+
+ /* --------------------------------------------------
+ * BL31's physical base address
+ * --------------------------------------------------
+ */
+tegra_bl31_phys_base:
+ .quad 0
+
+ /* --------------------------------------------------
+ * UART controller base for console init
+ * --------------------------------------------------
+ */
+tegra_console_base:
+ .quad 0
+
+ /* --------------------------------------------------
+ * Enable L2 ECC and Parity Protection
+ * --------------------------------------------------
+ */
+tegra_enable_l2_ecc_parity_prot:
+ .quad 0
diff --git a/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c b/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c
new file mode 100644
index 00000000..2d827da8
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/flowctrl/flowctrl.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cortex_a53.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define CLK_RST_DEV_L_SET 0x300
+#define CLK_RST_DEV_L_CLR 0x304
+#define CLK_BPMP_RST (1 << 1)
+
+#define EVP_BPMP_RESET_VECTOR 0x200
+
+static const uint64_t flowctrl_offset_cpu_csr[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU0_CSR),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CPU1_CSR + 16)
+};
+
+static const uint64_t flowctrl_offset_halt_cpu[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU0_EVENTS),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_HALT_CPU1_EVENTS + 16)
+};
+
+static const uint64_t flowctrl_offset_cc4_ctrl[4] = {
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 4),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 8),
+ (TEGRA_FLOWCTRL_BASE + FLOWCTRL_CC4_CORE0_CTRL + 12)
+};
+
+static inline void tegra_fc_cc4_ctrl(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_cc4_ctrl[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_cc4_ctrl[cpu_id]);
+}
+
+static inline void tegra_fc_cpu_csr(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_cpu_csr[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_cpu_csr[cpu_id]);
+}
+
+static inline void tegra_fc_halt_cpu(int cpu_id, uint32_t val)
+{
+ mmio_write_32(flowctrl_offset_halt_cpu[cpu_id], val);
+ val = mmio_read_32(flowctrl_offset_halt_cpu[cpu_id]);
+}
+
+static void tegra_fc_prepare_suspend(int cpu_id, uint32_t csr)
+{
+ uint32_t val;
+
+ val = FLOWCTRL_HALT_GIC_IRQ | FLOWCTRL_HALT_GIC_FIQ |
+ FLOWCTRL_HALT_LIC_IRQ | FLOWCTRL_HALT_LIC_FIQ |
+ FLOWCTRL_WAITEVENT;
+ tegra_fc_halt_cpu(cpu_id, val);
+
+ val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+ FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu_id);
+ tegra_fc_cpu_csr(cpu_id, val | csr);
+}
+
+/*******************************************************************************
+ * Powerdn the current CPU
+ ******************************************************************************/
+void tegra_fc_cpu_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+
+ VERBOSE("CPU%d powering down...\n", cpu);
+ tegra_fc_prepare_suspend(cpu, 0);
+}
+
+/*******************************************************************************
+ * Suspend the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_idle(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering cluster idle state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ /* hardware L2 flush is faster for A53 only */
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+ !!MPIDR_AFFLVL1_VAL(mpidr));
+
+ /* suspend the CPU cluster */
+ val = FLOWCTRL_PG_CPU_NONCPU << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Power down the current CPU cluster
+ ******************************************************************************/
+void tegra_fc_cluster_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering cluster powerdn state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ /* hardware L2 flush is faster for A53 only */
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL,
+ read_midr() == CORTEX_A53_MIDR);
+
+ /* power down the CPU cluster */
+ val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+}
+
+/*******************************************************************************
+ * Suspend the entire SoC
+ ******************************************************************************/
+void tegra_fc_soc_powerdn(uint32_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t val;
+
+ VERBOSE("Entering SoC powerdn state...\n");
+
+ tegra_fc_cc4_ctrl(cpu, 0);
+
+ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL, 1);
+
+ val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT;
+ tegra_fc_prepare_suspend(cpu, val);
+
+ /* overwrite HALT register */
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+}
+
+/*******************************************************************************
+ * Power up the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_on(int cpu)
+{
+ tegra_fc_cpu_csr(cpu, FLOWCTRL_CSR_ENABLE);
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT | FLOWCTRL_HALT_SCLK);
+}
+
+/*******************************************************************************
+ * Power down the CPU
+ ******************************************************************************/
+void tegra_fc_cpu_off(int cpu)
+{
+ uint32_t val;
+
+ /*
+ * Flow controller powers down the CPU during wfi. The CPU would be
+ * powered on when it receives any interrupt.
+ */
+ val = FLOWCTRL_CSR_INTR_FLAG | FLOWCTRL_CSR_EVENT_FLAG |
+ FLOWCTRL_CSR_ENABLE | (FLOWCTRL_WAIT_WFI_BITMAP << cpu);
+ tegra_fc_cpu_csr(cpu, val);
+ tegra_fc_halt_cpu(cpu, FLOWCTRL_WAITEVENT);
+ tegra_fc_cc4_ctrl(cpu, 0);
+}
+
+/*******************************************************************************
+ * Inform the BPMP that we have completed the cluster power up
+ ******************************************************************************/
+void tegra_fc_lock_active_cluster(void)
+{
+ uint32_t val;
+
+ val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+ val |= FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK;
+ tegra_fc_write_32(FLOWCTRL_BPMP_CLUSTER_CONTROL, val);
+ val = tegra_fc_read_32(FLOWCTRL_BPMP_CLUSTER_CONTROL);
+}
+
+/*******************************************************************************
+ * Reset BPMP processor
+ ******************************************************************************/
+void tegra_fc_reset_bpmp(void)
+{
+ uint32_t val;
+
+ /* halt BPMP */
+ tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, FLOWCTRL_WAITEVENT);
+
+ /* Assert BPMP reset */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_SET, CLK_BPMP_RST);
+
+ /* Restore reset address (stored in PMC_SCRATCH39) */
+ val = tegra_pmc_read_32(PMC_SCRATCH39);
+ mmio_write_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR, val);
+ while (val != mmio_read_32(TEGRA_EVP_BASE + EVP_BPMP_RESET_VECTOR))
+ ; /* wait till value reaches EVP_BPMP_RESET_VECTOR */
+
+ /* Wait for 2us before de-asserting the reset signal. */
+ udelay(2);
+
+ /* De-assert BPMP reset */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CLK_RST_DEV_L_CLR, CLK_BPMP_RST);
+
+ /* Un-halt BPMP */
+ tegra_fc_write_32(FLOWCTRL_HALT_BPMP_EVENTS, 0);
+}
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c
new file mode 100644
index 00000000..9944e729
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v1.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <memctrl.h>
+#include <memctrl_v1.h>
+#include <mmio.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size;
+
+/*
+ * Init SMMU.
+ */
+void tegra_memctrl_setup(void)
+{
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Tegra Memory Controller (v1)\n");
+
+ /* allow translations for all MC engines */
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_0_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_1_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_2_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_3_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+ tegra_mc_write_32(MC_SMMU_TRANSLATION_ENABLE_4_0,
+ (unsigned int)MC_SMMU_TRANSLATION_ENABLE);
+
+ tegra_mc_write_32(MC_SMMU_ASID_SECURITY_0, MC_SMMU_ASID_SECURITY);
+
+ tegra_mc_write_32(MC_SMMU_TLB_CONFIG_0, MC_SMMU_TLB_CONFIG_0_RESET_VAL);
+ tegra_mc_write_32(MC_SMMU_PTC_CONFIG_0, MC_SMMU_PTC_CONFIG_0_RESET_VAL);
+
+ /* flush PTC and TLB */
+ tegra_mc_write_32(MC_SMMU_PTC_FLUSH_0, MC_SMMU_PTC_FLUSH_ALL);
+ (void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+ tegra_mc_write_32(MC_SMMU_TLB_FLUSH_0, MC_SMMU_TLB_FLUSH_ALL);
+
+ /* enable SMMU */
+ tegra_mc_write_32(MC_SMMU_CONFIG_0,
+ MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE);
+ (void)tegra_mc_read_32(MC_SMMU_CONFIG_0); /* read to flush writes */
+
+ /* video memory carveout */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(video_mem_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)video_mem_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size);
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+ tegra_memctrl_setup();
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+ tegra_mc_write_32(MC_SECURITY_CFG0_0, phys_base);
+ tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * The v1 hardware controller does not have any registers
+ * for setting up the on-chip TZRAM.
+ */
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+ unsigned long long non_overlap_area_size)
+{
+ /*
+ * Map the NS memory first, clean it and then unmap it.
+ */
+ mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+ non_overlap_area_start, /* VA */
+ non_overlap_area_size, /* size */
+ MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
+
+ zeromem((void *)non_overlap_area_start, non_overlap_area_size);
+ flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+ mmap_remove_dynamic_region(non_overlap_area_start,
+ non_overlap_area_size);
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ uintptr_t vmem_end_old = video_mem_base + (video_mem_size << 20);
+ uintptr_t vmem_end_new = phys_base + size_in_bytes;
+ unsigned long long non_overlap_area_size;
+
+ /*
+ * Setup the Memory controller to restrict CPU accesses to the Video
+ * Memory region
+ */
+ INFO("Configuring Video Memory Carveout\n");
+
+ /*
+ * Configure Memory Controller directly for the first time.
+ */
+ if (video_mem_base == 0)
+ goto done;
+
+ /*
+ * Clear the old regions now being exposed. The following cases
+ * can occur -
+ *
+ * 1. clear whole old region (no overlap with new region)
+ * 2. clear old sub-region below new base
+ * 3. clear old sub-region above new end
+ */
+ INFO("Cleaning previous Video Memory Carveout\n");
+
+ if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
+ tegra_clear_videomem(video_mem_base, video_mem_size << 20);
+ } else {
+ if (video_mem_base < phys_base) {
+ non_overlap_area_size = phys_base - video_mem_base;
+ tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+ }
+ if (vmem_end_old > vmem_end_new) {
+ non_overlap_area_size = vmem_end_old - vmem_end_new;
+ tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+ }
+ }
+
+done:
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+ /* store new values */
+ video_mem_base = phys_base;
+ video_mem_size = size_in_bytes >> 20;
+}
+
+/*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need access to
+ * IRAM. Because these clients connect to the MC and do not have a direct
+ * path to the IRAM, the MC implements AHB redirection during boot to allow
+ * path to IRAM. In this mode, accesses to a programmed memory address aperture
+ * are directed to the AHB bus, allowing access to the IRAM. The AHB aperture
+ * is defined by the IRAM_BASE_LO and IRAM_BASE_HI registers, which are
+ * initialized to disable this aperture.
+ *
+ * Once bootup is complete, we must program IRAM base to 0xffffffff and
+ * IRAM top to 0x00000000, thus disabling access to IRAM. DRAM is then
+ * potentially accessible in this address range. These aperture registers
+ * also have an access_control/lock bit. After disabling the aperture, the
+ * access_control register should be programmed to lock the registers.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+ /* program the aperture registers */
+ tegra_mc_write_32(MC_IRAM_BASE_LO, 0xFFFFFFFF);
+ tegra_mc_write_32(MC_IRAM_TOP_LO, 0);
+ tegra_mc_write_32(MC_IRAM_BASE_TOP_HI, 0);
+
+ /* lock the aperture registers */
+ tegra_mc_write_32(MC_IRAM_REG_CTRL, MC_DISABLE_IRAM_CFG_WRITES);
+}
diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
new file mode 100644
index 00000000..92fdadcf
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <memctrl_v2.h>
+#include <mmio.h>
+#include <smmu.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+/* Video Memory base and size (live values) */
+static uint64_t video_mem_base;
+static uint64_t video_mem_size_mb;
+
+static void tegra_memctrl_reconfig_mss_clients(void)
+{
+#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS
+ uint32_t val, wdata_0, wdata_1;
+
+ /*
+ * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for
+ * boot and strongly ordered MSS clients to flush existing memory
+ * traffic and stall future requests.
+ */
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+ assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL);
+
+ wdata_0 = MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB |
+#if ENABLE_AFI_DEVICE
+ MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB |
+#endif
+ MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+ /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+ } while ((val & wdata_0) != wdata_0);
+
+ /* Wait one more time due to SW WAR for known legacy issue */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0);
+ } while ((val & wdata_0) != wdata_0);
+
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+ assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL);
+
+ wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB |
+ MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+ /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+ } while ((val & wdata_1) != wdata_1);
+
+ /* Wait one more time due to SW WAR for known legacy issue */
+ do {
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1);
+ } while ((val & wdata_1) != wdata_1);
+
+ /*
+ * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and
+ * strongly ordered MSS clients. ROC needs to be single point
+ * of control on overriding the memory type. So, remove TSA's
+ * memtype override.
+ */
+#if ENABLE_AFI_DEVICE
+ mc_set_tsa_passthrough(AFIW);
+#endif
+ mc_set_tsa_passthrough(HDAW);
+ mc_set_tsa_passthrough(SATAW);
+ mc_set_tsa_passthrough(XUSB_HOSTW);
+ mc_set_tsa_passthrough(XUSB_DEVW);
+ mc_set_tsa_passthrough(SDMMCWAB);
+ mc_set_tsa_passthrough(APEDMAW);
+ mc_set_tsa_passthrough(SESWR);
+ mc_set_tsa_passthrough(ETRW);
+ mc_set_tsa_passthrough(AXISW);
+ mc_set_tsa_passthrough(EQOSW);
+ mc_set_tsa_passthrough(UFSHCW);
+ mc_set_tsa_passthrough(BPMPDMAW);
+ mc_set_tsa_passthrough(AONDMAW);
+ mc_set_tsa_passthrough(SCEDMAW);
+
+ /*
+ * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT
+ * for boot and strongly ordered MSS clients. This steers all sodev
+ * transactions to ROC.
+ *
+ * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients
+ * whose AXI IDs we know and trust.
+ */
+
+#if ENABLE_AFI_DEVICE
+ /* Match AFIW */
+ mc_set_forced_coherent_so_dev_cfg(AFIR);
+#endif
+
+ /*
+ * See bug 200131110 comment #35 - there are no normal requests
+ * and AWID for SO/DEV requests is hardcoded in RTL for a
+ * particular PCIE controller
+ */
+#if ENABLE_AFI_DEVICE
+ mc_set_forced_coherent_so_dev_cfg(AFIW);
+#endif
+ mc_set_forced_coherent_cfg(HDAR);
+ mc_set_forced_coherent_cfg(HDAW);
+ mc_set_forced_coherent_cfg(SATAR);
+ mc_set_forced_coherent_cfg(SATAW);
+ mc_set_forced_coherent_cfg(XUSB_HOSTR);
+ mc_set_forced_coherent_cfg(XUSB_HOSTW);
+ mc_set_forced_coherent_cfg(XUSB_DEVR);
+ mc_set_forced_coherent_cfg(XUSB_DEVW);
+ mc_set_forced_coherent_cfg(SDMMCRAB);
+ mc_set_forced_coherent_cfg(SDMMCWAB);
+
+ /* Match APEDMAW */
+ mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR);
+
+ /*
+ * See bug 200131110 comment #35 - AWID for normal requests
+ * is 0x80 and AWID for SO/DEV requests is 0x01
+ */
+ mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW);
+ mc_set_forced_coherent_cfg(SESRD);
+ mc_set_forced_coherent_cfg(SESWR);
+ mc_set_forced_coherent_cfg(ETRR);
+ mc_set_forced_coherent_cfg(ETRW);
+ mc_set_forced_coherent_cfg(AXISR);
+ mc_set_forced_coherent_cfg(AXISW);
+ mc_set_forced_coherent_cfg(EQOSR);
+ mc_set_forced_coherent_cfg(EQOSW);
+ mc_set_forced_coherent_cfg(UFSHCR);
+ mc_set_forced_coherent_cfg(UFSHCW);
+ mc_set_forced_coherent_cfg(BPMPDMAR);
+ mc_set_forced_coherent_cfg(BPMPDMAW);
+ mc_set_forced_coherent_cfg(AONDMAR);
+ mc_set_forced_coherent_cfg(AONDMAW);
+ mc_set_forced_coherent_cfg(SCEDMAR);
+ mc_set_forced_coherent_cfg(SCEDMAW);
+
+ /*
+ * At this point, ordering can occur at ROC. So, remove PCFIFO's
+ * control over ordering requests.
+ *
+ * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for
+ * boot and strongly ordered MSS clients
+ */
+ val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL &
+#if ENABLE_AFI_DEVICE
+ mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) &
+#endif
+ mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(1, SATAW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) &
+ mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) &
+ mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val);
+
+ val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL &
+ mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW);
+ tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val);
+
+ /*
+ * At this point, ordering can occur at ROC. SMMU need not
+ * reorder any requests.
+ *
+ * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED
+ * for boot and strongly ordered MSS clients
+ */
+ val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL &
+#if ENABLE_AFI_DEVICE
+ mc_set_smmu_unordered_boot_so_mss(1, AFIW) &
+#endif
+ mc_set_smmu_unordered_boot_so_mss(1, HDAW) &
+ mc_set_smmu_unordered_boot_so_mss(1, SATAW);
+ tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val);
+
+ val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL &
+ mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) &
+ mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW);
+ tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val);
+
+ val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL &
+ mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB);
+ tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val);
+
+ val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL &
+ mc_set_smmu_unordered_boot_so_mss(4, SESWR) &
+ mc_set_smmu_unordered_boot_so_mss(4, ETRW) &
+ mc_set_smmu_unordered_boot_so_mss(4, AXISW) &
+ mc_set_smmu_unordered_boot_so_mss(4, EQOSW) &
+ mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) &
+ mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) &
+ mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) &
+ mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW);
+ tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val);
+
+ val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL &
+ mc_set_smmu_unordered_boot_so_mss(5, APEDMAW);
+ tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val);
+
+ /*
+ * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS
+ * clients to allow memory traffic from all clients to start passing
+ * through ROC
+ */
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0);
+ assert(val == wdata_0);
+
+ wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0);
+
+ val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1);
+ assert(val == wdata_1);
+
+ wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL;
+ tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1);
+
+#endif
+}
+
+static void tegra_memctrl_set_overrides(void)
+{
+ tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
+ const mc_txn_override_cfg_t *mc_txn_override_cfgs;
+ uint32_t num_txn_override_cfgs;
+ uint32_t i, val;
+
+ /* Get the settings from the platform */
+ assert(plat_mc_settings);
+ mc_txn_override_cfgs = plat_mc_settings->txn_override_cfg;
+ num_txn_override_cfgs = plat_mc_settings->num_txn_override_cfgs;
+
+ /*
+ * Set the MC_TXN_OVERRIDE registers for write clients.
+ */
+ if ((tegra_chipid_is_t186()) &&
+ (!tegra_platform_is_silicon() ||
+ (tegra_platform_is_silicon() && (tegra_get_chipid_minor() == 1)))) {
+
+ /*
+ * GPU and NVENC settings for Tegra186 simulation and
+ * Silicon rev. A01
+ */
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR);
+ val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR,
+ val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2);
+ val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR2,
+ val | MC_TXN_OVERRIDE_CGID_TAG_ZERO);
+
+ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR);
+ val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_NVENCSWR,
+ val | MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID);
+
+ } else {
+
+ /*
+ * Settings for Tegra186 silicon rev. A02 and onwards.
+ */
+ for (i = 0; i < num_txn_override_cfgs; i++) {
+ val = tegra_mc_read_32(mc_txn_override_cfgs[i].offset);
+ val &= ~MC_TXN_OVERRIDE_CGID_TAG_MASK;
+ tegra_mc_write_32(mc_txn_override_cfgs[i].offset,
+ val | mc_txn_override_cfgs[i].cgid_tag);
+ }
+ }
+}
+
+/*
+ * Init Memory controller during boot.
+ */
+void tegra_memctrl_setup(void)
+{
+ uint32_t val;
+ const uint32_t *mc_streamid_override_regs;
+ uint32_t num_streamid_override_regs;
+ const mc_streamid_security_cfg_t *mc_streamid_sec_cfgs;
+ uint32_t num_streamid_sec_cfgs;
+ tegra_mc_settings_t *plat_mc_settings = tegra_get_mc_settings();
+ uint32_t i;
+
+ INFO("Tegra Memory Controller (v2)\n");
+
+#if ENABLE_SMMU_DEVICE
+ /* Program the SMMU pagesize */
+ tegra_smmu_init();
+#endif
+ /* Get the settings from the platform */
+ assert(plat_mc_settings);
+ mc_streamid_override_regs = plat_mc_settings->streamid_override_cfg;
+ num_streamid_override_regs = plat_mc_settings->num_streamid_override_cfgs;
+ mc_streamid_sec_cfgs = plat_mc_settings->streamid_security_cfg;
+ num_streamid_sec_cfgs = plat_mc_settings->num_streamid_security_cfgs;
+
+ /* Program all the Stream ID overrides */
+ for (i = 0; i < num_streamid_override_regs; i++)
+ tegra_mc_streamid_write_32(mc_streamid_override_regs[i],
+ MC_STREAM_ID_MAX);
+
+ /* Program the security config settings for all Stream IDs */
+ for (i = 0; i < num_streamid_sec_cfgs; i++) {
+ val = mc_streamid_sec_cfgs[i].override_enable << 16 |
+ mc_streamid_sec_cfgs[i].override_client_inputs << 8 |
+ mc_streamid_sec_cfgs[i].override_client_ns_flag << 0;
+ tegra_mc_streamid_write_32(mc_streamid_sec_cfgs[i].offset, val);
+ }
+
+ /*
+ * All requests at boot time, and certain requests during
+ * normal run time, are physically addressed and must bypass
+ * the SMMU. The client hub logic implements a hardware bypass
+ * path around the Translation Buffer Units (TBU). During
+ * boot-time, the SMMU_BYPASS_CTRL register (which defaults to
+ * TBU_BYPASS mode) will be used to steer all requests around
+ * the uninitialized TBUs. During normal operation, this register
+ * is locked into TBU_BYPASS_SID config, which routes requests
+ * with special StreamID 0x7f on the bypass path and all others
+ * through the selected TBU. This is done to disable SMMU Bypass
+ * mode, as it could be used to circumvent SMMU security checks.
+ */
+ tegra_mc_write_32(MC_SMMU_BYPASS_CONFIG,
+ MC_SMMU_BYPASS_CONFIG_SETTINGS);
+
+ /*
+ * Re-configure MSS to allow ROC to deal with ordering of the
+ * Memory Controller traffic. This is needed as the Memory Controller
+ * boots with MSS having all control, but ROC provides a performance
+ * boost as compared to MSS.
+ */
+ tegra_memctrl_reconfig_mss_clients();
+
+ /* Program overrides for MC transactions */
+ tegra_memctrl_set_overrides();
+}
+
+/*
+ * Restore Memory Controller settings after "System Suspend"
+ */
+void tegra_memctrl_restore_settings(void)
+{
+ /*
+ * Re-configure MSS to allow ROC to deal with ordering of the
+ * Memory Controller traffic. This is needed as the Memory Controller
+ * resets during System Suspend with MSS having all control, but ROC
+ * provides a performance boost as compared to MSS.
+ */
+ tegra_memctrl_reconfig_mss_clients();
+
+ /* Program overrides for MC transactions */
+ tegra_memctrl_set_overrides();
+
+ /* video memory carveout region */
+ if (video_mem_base) {
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO,
+ (uint32_t)video_mem_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(video_mem_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size_mb);
+
+ /*
+ * MCE propagates the VideoMem configuration values across the
+ * CCPLEX.
+ */
+ mce_update_gsc_videomem();
+ }
+}
+
+/*
+ * Secure the BL31 DRAM aperture.
+ *
+ * phys_base = physical base of TZDRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ /*
+ * Setup the Memory controller to allow only secure accesses to
+ * the TZDRAM carveout
+ */
+ INFO("Configuring TrustZone DRAM Memory Carveout\n");
+
+ tegra_mc_write_32(MC_SECURITY_CFG0_0, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_SECURITY_CFG3_0, (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_SECURITY_CFG1_0, size_in_bytes >> 20);
+
+ /*
+ * When TZ encryption enabled,
+ * We need setup TZDRAM before CPU to access TZ Carveout,
+ * otherwise CPU will fetch non-decrypted data.
+ * So save TZDRAM setting for retore by SC7 resume FW.
+ */
+
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_LO,
+ tegra_mc_read_32(MC_SECURITY_CFG0_0));
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV55_HI,
+ tegra_mc_read_32(MC_SECURITY_CFG3_0));
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV54_HI,
+ tegra_mc_read_32(MC_SECURITY_CFG1_0));
+
+ /*
+ * MCE propagates the security configuration values across the
+ * CCPLEX.
+ */
+ mce_update_gsc_tzdram();
+}
+
+/*
+ * Secure the BL31 TZRAM aperture.
+ *
+ * phys_base = physical base of TZRAM aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ uint32_t index;
+ uint32_t total_128kb_blocks = size_in_bytes >> 17;
+ uint32_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
+ uint32_t val;
+
+ INFO("Configuring TrustZone SRAM Memory Carveout\n");
+
+ /*
+ * Reset the access configuration registers to restrict access
+ * to the TZRAM aperture
+ */
+ for (index = MC_TZRAM_CLIENT_ACCESS_CFG0;
+ index < ((uint32_t)MC_TZRAM_CARVEOUT_CFG + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
+ index += 4U) {
+ tegra_mc_write_32(index, 0);
+ }
+
+ /*
+ * Set the TZRAM base. TZRAM base must be 4k aligned, at least.
+ */
+ assert((phys_base & (uint64_t)0xFFF) == 0U);
+ tegra_mc_write_32(MC_TZRAM_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_TZRAM_BASE_HI,
+ (uint32_t)(phys_base >> 32) & MC_GSC_BASE_HI_MASK);
+
+ /*
+ * Set the TZRAM size
+ *
+ * total size = (number of 128KB blocks) + (number of remaining 4KB
+ * blocks)
+ *
+ */
+ val = (residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
+ total_128kb_blocks;
+ tegra_mc_write_32(MC_TZRAM_SIZE, val);
+
+ /*
+ * Lock the configuration settings by disabling TZ-only lock
+ * and locking the configuration against any future changes
+ * at all.
+ */
+ val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG);
+ val &= ~MC_GSC_ENABLE_TZ_LOCK_BIT;
+ val |= MC_GSC_LOCK_CFG_SETTINGS_BIT;
+ tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val);
+
+ /*
+ * MCE propagates the security configuration values across the
+ * CCPLEX.
+ */
+ mce_update_gsc_tzram();
+}
+
+static void tegra_lock_videomem_nonoverlap(uint64_t phys_base,
+ uint64_t size_in_bytes)
+{
+ uint32_t index;
+ uint64_t total_128kb_blocks = size_in_bytes >> 17;
+ uint64_t residual_4kb_blocks = (size_in_bytes & (uint32_t)0x1FFFF) >> 12;
+ uint64_t val;
+
+ /*
+ * Reset the access configuration registers to restrict access to
+ * old Videomem aperture
+ */
+ for (index = MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0;
+ index < ((uint32_t)MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 + (uint32_t)MC_GSC_CONFIG_REGS_SIZE);
+ index += 4U) {
+ tegra_mc_write_32(index, 0);
+ }
+
+ /*
+ * Set the base. It must be 4k aligned, at least.
+ */
+ assert((phys_base & (uint64_t)0xFFF) == 0U);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI,
+ (uint32_t)(phys_base >> 32) & (uint32_t)MC_GSC_BASE_HI_MASK);
+
+ /*
+ * Set the aperture size
+ *
+ * total size = (number of 128KB blocks) + (number of remaining 4KB
+ * blocks)
+ *
+ */
+ val = (uint32_t)((residual_4kb_blocks << MC_GSC_SIZE_RANGE_4KB_SHIFT) |
+ total_128kb_blocks);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, (uint32_t)val);
+
+ /*
+ * Lock the configuration settings by enabling TZ-only lock and
+ * locking the configuration against any future changes from NS
+ * world.
+ */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_CFG,
+ (uint32_t)MC_GSC_ENABLE_TZ_LOCK_BIT);
+
+ /*
+ * MCE propagates the GSC configuration values across the
+ * CCPLEX.
+ */
+}
+
+static void tegra_unlock_videomem_nonoverlap(void)
+{
+ /* Clear the base */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_LO, 0);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_BASE_HI, 0);
+
+ /* Clear the size */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_CLEAR_SIZE, 0);
+}
+
+static void tegra_clear_videomem(uintptr_t non_overlap_area_start,
+ unsigned long long non_overlap_area_size)
+{
+ /*
+ * Map the NS memory first, clean it and then unmap it.
+ */
+ mmap_add_dynamic_region(non_overlap_area_start, /* PA */
+ non_overlap_area_start, /* VA */
+ non_overlap_area_size, /* size */
+ MT_NS | MT_RW | MT_EXECUTE_NEVER); /* attrs */
+
+ zero_normalmem((void *)non_overlap_area_start, non_overlap_area_size);
+ flush_dcache_range(non_overlap_area_start, non_overlap_area_size);
+
+ mmap_remove_dynamic_region(non_overlap_area_start,
+ non_overlap_area_size);
+}
+
+/*
+ * Program the Video Memory carveout region
+ *
+ * phys_base = physical base of aperture
+ * size_in_bytes = size of aperture in bytes
+ */
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes)
+{
+ uintptr_t vmem_end_old = video_mem_base + (video_mem_size_mb << 20);
+ uintptr_t vmem_end_new = phys_base + size_in_bytes;
+ unsigned long long non_overlap_area_size;
+
+ /*
+ * Setup the Memory controller to restrict CPU accesses to the Video
+ * Memory region
+ */
+ INFO("Configuring Video Memory Carveout\n");
+
+ /*
+ * Configure Memory Controller directly for the first time.
+ */
+ if (video_mem_base == 0U)
+ goto done;
+
+ /*
+ * Lock the non overlapping memory being cleared so that other masters
+ * do not accidently write to it. The memory would be unlocked once
+ * the non overlapping region is cleared and the new memory
+ * settings take effect.
+ */
+ tegra_lock_videomem_nonoverlap(video_mem_base,
+ video_mem_size_mb << 20);
+
+ /*
+ * Clear the old regions now being exposed. The following cases
+ * can occur -
+ *
+ * 1. clear whole old region (no overlap with new region)
+ * 2. clear old sub-region below new base
+ * 3. clear old sub-region above new end
+ */
+ INFO("Cleaning previous Video Memory Carveout\n");
+
+ if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) {
+ tegra_clear_videomem(video_mem_base,
+ (uint64_t)video_mem_size_mb << 20);
+ } else {
+ if (video_mem_base < phys_base) {
+ non_overlap_area_size = phys_base - video_mem_base;
+ tegra_clear_videomem(video_mem_base, non_overlap_area_size);
+ }
+ if (vmem_end_old > vmem_end_new) {
+ non_overlap_area_size = vmem_end_old - vmem_end_new;
+ tegra_clear_videomem(vmem_end_new, non_overlap_area_size);
+ }
+ }
+
+done:
+ /* program the Videomem aperture */
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base);
+ tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI,
+ (uint32_t)(phys_base >> 32));
+ tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20);
+
+ /* unlock the previous locked nonoverlapping aperture */
+ tegra_unlock_videomem_nonoverlap();
+
+ /* store new values */
+ video_mem_base = phys_base;
+ video_mem_size_mb = size_in_bytes >> 20;
+
+ /*
+ * MCE propagates the VideoMem configuration values across the
+ * CCPLEX.
+ */
+ mce_update_gsc_videomem();
+}
+
+/*
+ * This feature exists only for v1 of the Tegra Memory Controller.
+ */
+void tegra_memctrl_disable_ahb_redirection(void)
+{
+ ; /* do nothing */
+}
diff --git a/plat/nvidia/tegra/common/drivers/pmc/pmc.c b/plat/nvidia/tegra/common/drivers/pmc/pmc.c
new file mode 100644
index 00000000..d8827e10
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/pmc/pmc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define RESET_ENABLE 0x10U
+
+/* Module IDs used during power ungate procedure */
+static const uint32_t pmc_cpu_powergate_id[4] = {
+ 0, /* CPU 0 */
+ 9, /* CPU 1 */
+ 10, /* CPU 2 */
+ 11 /* CPU 3 */
+};
+
+/*******************************************************************************
+ * Power ungate CPU to start the boot process. CPU reset vectors must be
+ * populated before calling this function.
+ ******************************************************************************/
+void tegra_pmc_cpu_on(int32_t cpu)
+{
+ uint32_t val;
+
+ /*
+ * Check if CPU is already power ungated
+ */
+ val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+ if ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U) {
+ /*
+ * The PMC deasserts the START bit when it starts the power
+ * ungate process. Loop till no power toggle is in progress.
+ */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+ } while ((val & PMC_TOGGLE_START) != 0U);
+
+ /*
+ * Start the power ungate procedure
+ */
+ val = pmc_cpu_powergate_id[cpu] | PMC_TOGGLE_START;
+ tegra_pmc_write_32(PMC_PWRGATE_TOGGLE, val);
+
+ /*
+ * The PMC deasserts the START bit when it starts the power
+ * ungate process. Loop till powergate START bit is asserted.
+ */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_TOGGLE);
+ } while ((val & (1U << 8)) != 0U);
+
+ /* loop till the CPU is power ungated */
+ do {
+ val = tegra_pmc_read_32(PMC_PWRGATE_STATUS);
+ } while ((val & (1U << pmc_cpu_powergate_id[cpu])) == 0U);
+ }
+}
+
+/*******************************************************************************
+ * Setup CPU vectors for resume from deep sleep
+ ******************************************************************************/
+void tegra_pmc_cpu_setup(uint64_t reset_addr)
+{
+ uint32_t val;
+
+ tegra_pmc_write_32(PMC_SECURE_SCRATCH34,
+ ((uint32_t)reset_addr & 0xFFFFFFFFU) | 1U);
+ val = (uint32_t)(reset_addr >> 32U);
+ tegra_pmc_write_32(PMC_SECURE_SCRATCH35, val & 0x7FFU);
+}
+
+/*******************************************************************************
+ * Lock CPU vectors to restrict further writes
+ ******************************************************************************/
+void tegra_pmc_lock_cpu_vectors(void)
+{
+ uint32_t val;
+
+ /* lock PMC_SECURE_SCRATCH22 */
+ val = tegra_pmc_read_32(PMC_SECURE_DISABLE2);
+ val |= PMC_SECURE_DISABLE2_WRITE22_ON;
+ tegra_pmc_write_32(PMC_SECURE_DISABLE2, val);
+
+ /* lock PMC_SECURE_SCRATCH34/35 */
+ val = tegra_pmc_read_32(PMC_SECURE_DISABLE3);
+ val |= (PMC_SECURE_DISABLE3_WRITE34_ON |
+ PMC_SECURE_DISABLE3_WRITE35_ON);
+ tegra_pmc_write_32(PMC_SECURE_DISABLE3, val);
+}
+
+/*******************************************************************************
+ * Restart the system
+ ******************************************************************************/
+__dead2 void tegra_pmc_system_reset(void)
+{
+ uint32_t reg;
+
+ reg = tegra_pmc_read_32(PMC_CONFIG);
+ reg |= RESET_ENABLE; /* restart */
+ tegra_pmc_write_32(PMC_CONFIG, reg);
+ wfi();
+
+ ERROR("Tegra System Reset: operation not handled.\n");
+ panic();
+}
diff --git a/plat/nvidia/tegra/common/drivers/smmu/smmu.c b/plat/nvidia/tegra/common/drivers/smmu/smmu.c
new file mode 100644
index 00000000..fa3f0002
--- /dev/null
+++ b/plat/nvidia/tegra/common/drivers/smmu/smmu.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <smmu.h>
+#include <string.h>
+#include <tegra_private.h>
+
+/* SMMU IDs currently supported by the driver */
+enum {
+ TEGRA_SMMU0,
+ TEGRA_SMMU1,
+ TEGRA_SMMU2
+};
+
+static uint32_t tegra_smmu_read_32(uint32_t smmu_id, uint32_t off)
+{
+#if defined(TEGRA_SMMU0_BASE)
+ if (smmu_id == TEGRA_SMMU0)
+ return mmio_read_32(TEGRA_SMMU0_BASE + off);
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+ if (smmu_id == TEGRA_SMMU1)
+ return mmio_read_32(TEGRA_SMMU1_BASE + off);
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+ if (smmu_id == TEGRA_SMMU2)
+ return mmio_read_32(TEGRA_SMMU2_BASE + off);
+#endif
+
+ return 0;
+}
+
+static void tegra_smmu_write_32(uint32_t smmu_id,
+ uint32_t off, uint32_t val)
+{
+#if defined(TEGRA_SMMU0_BASE)
+ if (smmu_id == TEGRA_SMMU0)
+ mmio_write_32(TEGRA_SMMU0_BASE + off, val);
+#endif
+
+#if defined(TEGRA_SMMU1_BASE)
+ if (smmu_id == TEGRA_SMMU1)
+ mmio_write_32(TEGRA_SMMU1_BASE + off, val);
+#endif
+
+#if defined(TEGRA_SMMU2_BASE)
+ if (smmu_id == TEGRA_SMMU2)
+ mmio_write_32(TEGRA_SMMU2_BASE + off, val);
+#endif
+}
+
+/*
+ * Save SMMU settings before "System Suspend" to TZDRAM
+ */
+void tegra_smmu_save_context(uint64_t smmu_ctx_addr)
+{
+ uint32_t i, num_entries = 0;
+ smmu_regs_t *smmu_ctx_regs;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint64_t tzdram_base = params_from_bl2->tzdram_base;
+ uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size;
+ uint32_t reg_id1, pgshift, cb_size;
+
+ /* sanity check SMMU settings c*/
+ reg_id1 = mmio_read_32((TEGRA_SMMU0_BASE + SMMU_GNSR0_IDR1));
+ pgshift = (reg_id1 & ID1_PAGESIZE) ? 16 : 12;
+ cb_size = (2 << pgshift) * \
+ (1 << (((reg_id1 >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1));
+
+ assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE)));
+ assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end));
+
+ /* get SMMU context table */
+ smmu_ctx_regs = plat_get_smmu_ctx();
+ assert(smmu_ctx_regs);
+
+ /*
+ * smmu_ctx_regs[0].val contains the size of the context table minus
+ * the last entry. Sanity check the table size before we start with
+ * the context save operation.
+ */
+ while (smmu_ctx_regs[num_entries].val != 0xFFFFFFFFU) {
+ num_entries++;
+ }
+
+ /* panic if the sizes do not match */
+ if (num_entries != smmu_ctx_regs[0].val)
+ panic();
+
+ /* save SMMU register values */
+ for (i = 1; i < num_entries; i++)
+ smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg);
+
+ /* increment by 1 to take care of the last entry */
+ num_entries++;
+
+ /* Save SMMU config settings */
+ memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs,
+ (sizeof(smmu_regs_t) * num_entries));
+
+ /* save the SMMU table address */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO,
+ (uint32_t)smmu_ctx_addr);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI,
+ (uint32_t)(smmu_ctx_addr >> 32));
+}
+
+#define SMMU_NUM_CONTEXTS 64
+#define SMMU_CONTEXT_BANK_MAX_IDX 64
+
+/*
+ * Init SMMU during boot or "System Suspend" exit
+ */
+void tegra_smmu_init(void)
+{
+ uint32_t val, cb_idx, smmu_id, ctx_base;
+
+ for (smmu_id = 0; smmu_id < NUM_SMMU_DEVICES; smmu_id++) {
+ /* Program the SMMU pagesize and reset CACHE_LOCK bit */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+ val |= SMMU_GSR0_PGSIZE_64K;
+ val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+
+ /* reset CACHE LOCK bit for NS Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+ val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+ /* disable TCU prefetch for all contexts */
+ ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS)
+ + SMMU_CBn_ACTLR;
+ for (cb_idx = 0; cb_idx < SMMU_CONTEXT_BANK_MAX_IDX; cb_idx++) {
+ val = tegra_smmu_read_32(smmu_id,
+ ctx_base + (SMMU_GSR0_PGSIZE_64K * cb_idx));
+ val &= ~SMMU_CBn_ACTLR_CPRE_BIT;
+ tegra_smmu_write_32(smmu_id, ctx_base +
+ (SMMU_GSR0_PGSIZE_64K * cb_idx), val);
+ }
+
+ /* set CACHE LOCK bit for NS Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GNSR_ACR);
+ val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GNSR_ACR, val);
+
+ /* set CACHE LOCK bit for S Aux. Config. Register */
+ val = tegra_smmu_read_32(smmu_id, SMMU_GSR0_SECURE_ACR);
+ val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT;
+ tegra_smmu_write_32(smmu_id, SMMU_GSR0_SECURE_ACR, val);
+ }
+}
diff --git a/plat/nvidia/tegra/common/tegra_bl31_setup.c b/plat/nvidia/tegra/common/tegra_bl31_setup.c
new file mode 100644
index 00000000..d5d3d530
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_bl31_setup.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <cortex_a53.h>
+#include <cortex_a57.h>
+#include <debug.h>
+#include <denver.h>
+#include <errno.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+extern void zeromem16(void *mem, unsigned int length);
+
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+extern unsigned long __TEXT_START__;
+extern unsigned long __TEXT_END__;
+extern unsigned long __RW_START__;
+extern unsigned long __RW_END__;
+extern unsigned long __RODATA_START__;
+extern unsigned long __RODATA_END__;
+extern unsigned long __BL31_END__;
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t tegra_console_base;
+
+/*
+ * The next 3 constants identify the extents of the code, RO data region and the
+ * limit of the BL3-1 image. These addresses are used by the MMU setup code and
+ * therefore they must be page-aligned. It is the responsibility of the linker
+ * script to ensure that __RO_START__, __RO_END__ & __BL31_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_RW_START (unsigned long)(&__RW_START__)
+#define BL31_RW_END (unsigned long)(&__RW_END__)
+#define BL31_RODATA_BASE (unsigned long)(&__RODATA_START__)
+#define BL31_RODATA_END (unsigned long)(&__RODATA_END__)
+#define BL31_END (unsigned long)(&__BL31_END__)
+
+static entry_point_info_t bl33_image_ep_info, bl32_image_ep_info;
+static plat_params_from_bl2_t plat_bl31_params_from_bl2 = {
+ .tzdram_size = (uint64_t)TZDRAM_SIZE
+};
+
+/*******************************************************************************
+ * This variable holds the non-secure image entry address
+ ******************************************************************************/
+extern uint64_t ns_image_entrypoint;
+
+/*******************************************************************************
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that will be overridden by a SoC.
+ ******************************************************************************/
+#pragma weak plat_early_platform_setup
+#pragma weak plat_get_bl31_params
+#pragma weak plat_get_bl31_plat_params
+
+void plat_early_platform_setup(void)
+{
+ ; /* do nothing */
+}
+
+bl31_params_t *plat_get_bl31_params(void)
+{
+ return NULL;
+}
+
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+ return NULL;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ if (type == NON_SECURE)
+ return &bl33_image_ep_info;
+
+ /* return BL32 entry point info if it is valid */
+ if (type == SECURE && bl32_image_ep_info.pc)
+ return &bl32_image_ep_info;
+
+ return NULL;
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'plat_params_from_bl2_t' structure. The BL2 image
+ * passes this platform specific information.
+ ******************************************************************************/
+plat_params_from_bl2_t *bl31_get_plat_params(void)
+{
+ return &plat_bl31_params_from_bl2;
+}
+
+/*******************************************************************************
+ * Perform any BL31 specific platform actions. Populate the BL33 and BL32 image
+ * info.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+ void *plat_params_from_bl2)
+{
+ plat_params_from_bl2_t *plat_params =
+ (plat_params_from_bl2_t *)plat_params_from_bl2;
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+#endif
+ image_info_t bl32_img_info = { {0} };
+ uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end;
+
+ /*
+ * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
+ * there's no argument to relay from a previous bootloader. Platforms
+ * might use custom ways to get arguments, so provide handlers which
+ * they can override.
+ */
+ if (from_bl2 == NULL)
+ from_bl2 = plat_get_bl31_params();
+ if (plat_params == NULL)
+ plat_params = plat_get_bl31_plat_params();
+
+ /*
+ * Copy BL3-3, BL3-2 entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ assert(from_bl2);
+ assert(from_bl2->bl33_ep_info);
+ bl33_image_ep_info = *from_bl2->bl33_ep_info;
+
+ if (from_bl2->bl32_ep_info)
+ bl32_image_ep_info = *from_bl2->bl32_ep_info;
+
+ /*
+ * Parse platform specific parameters - TZDRAM aperture base and size
+ */
+ assert(plat_params);
+ plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base;
+ plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size;
+ plat_bl31_params_from_bl2.uart_id = plat_params->uart_id;
+
+ /*
+ * It is very important that we run either from TZDRAM or TZSRAM base.
+ * Add an explicit check here.
+ */
+ if ((plat_bl31_params_from_bl2.tzdram_base != BL31_BASE) &&
+ (TEGRA_TZRAM_BASE != BL31_BASE))
+ panic();
+
+ /*
+ * Get the base address of the UART controller to be used for the
+ * console
+ */
+ tegra_console_base = plat_get_console_from_id(plat_params->uart_id);
+
+ if (tegra_console_base != (uint64_t)0) {
+ /*
+ * Configure the UART port to be used as the console
+ */
+ console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+ TEGRA_CONSOLE_BAUDRATE);
+ }
+
+ /*
+ * Initialize delay timer
+ */
+ tegra_delay_timer_init();
+
+ /*
+ * Do initial security configuration to allow DRAM/device access.
+ */
+ tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base,
+ plat_bl31_params_from_bl2.tzdram_size);
+
+ /*
+ * The previous bootloader might not have placed the BL32 image
+ * inside the TZDRAM. We check the BL32 image info to find out
+ * the base/PC values and relocate the image if necessary.
+ */
+ if (from_bl2->bl32_image_info) {
+
+ bl32_img_info = *from_bl2->bl32_image_info;
+
+ /* Relocate BL32 if it resides outside of the TZDRAM */
+ tzdram_start = plat_bl31_params_from_bl2.tzdram_base;
+ tzdram_end = plat_bl31_params_from_bl2.tzdram_base +
+ plat_bl31_params_from_bl2.tzdram_size;
+ bl32_start = bl32_img_info.image_base;
+ bl32_end = bl32_img_info.image_base + bl32_img_info.image_size;
+
+ assert(tzdram_end > tzdram_start);
+ assert(bl32_end > bl32_start);
+ assert(bl32_image_ep_info.pc > tzdram_start);
+ assert(bl32_image_ep_info.pc < tzdram_end);
+
+ /* relocate BL32 */
+ if (bl32_start >= tzdram_end || bl32_end <= tzdram_start) {
+
+ INFO("Relocate BL32 to TZDRAM\n");
+
+ memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc,
+ (void *)(uintptr_t)bl32_start,
+ bl32_img_info.image_size);
+
+ /* clean up non-secure intermediate buffer */
+ zeromem16((void *)(uintptr_t)bl32_start,
+ bl32_img_info.image_size);
+ }
+ }
+
+ /* Early platform setup for Tegra SoCs */
+ plat_early_platform_setup();
+
+ INFO("BL3-1: Boot CPU: %s Processor [%lx]\n", (impl == DENVER_IMPL) ?
+ "Denver" : "ARM", read_mpidr());
+}
+
+/*******************************************************************************
+ * Initialize the gic, configure the SCR.
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ uint32_t tmp_reg;
+
+ /* Initialize the gic cpu and distributor interfaces */
+ plat_gic_setup();
+
+ /*
+ * Setup secondary CPU POR infrastructure.
+ */
+ plat_secondary_setup();
+
+ /*
+ * Initial Memory Controller configuration.
+ */
+ tegra_memctrl_setup();
+
+ /*
+ * Set up the TZRAM memory aperture to allow only secure world
+ * access
+ */
+ tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
+
+ /* Set the next EL to be AArch64 */
+ tmp_reg = SCR_RES1_BITS | SCR_RW_BIT;
+ write_scr(tmp_reg);
+
+ INFO("BL3-1: Tegra platform setup complete\n");
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform runtime setup prior to BL3-1 cold boot exit
+ ******************************************************************************/
+void bl31_plat_runtime_setup(void)
+{
+ /*
+ * During boot, USB3 and flash media (SDMMC/SATA) devices need
+ * access to IRAM. Because these clients connect to the MC and
+ * do not have a direct path to the IRAM, the MC implements AHB
+ * redirection during boot to allow path to IRAM. In this mode
+ * accesses to a programmed memory address aperture are directed
+ * to the AHB bus, allowing access to the IRAM. This mode must be
+ * disabled before we jump to the non-secure world.
+ */
+ tegra_memctrl_disable_ahb_redirection();
+}
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ unsigned long rw_start = BL31_RW_START;
+ unsigned long rw_size = BL31_RW_END - BL31_RW_START;
+ unsigned long rodata_start = BL31_RODATA_BASE;
+ unsigned long rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
+ unsigned long code_base = (unsigned long)(&__TEXT_START__);
+ unsigned long code_size = (unsigned long)(&__TEXT_END__) - code_base;
+ const mmap_region_t *plat_mmio_map = NULL;
+#if USE_COHERENT_MEM
+ unsigned long coh_start, coh_size;
+#endif
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+
+ /* add memory regions */
+ mmap_add_region(rw_start, rw_start,
+ rw_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+ mmap_add_region(rodata_start, rodata_start,
+ rodata_size,
+ MT_RO_DATA | MT_SECURE);
+ mmap_add_region(code_base, code_base,
+ code_size,
+ MT_CODE | MT_SECURE);
+
+ /* map TZDRAM used by BL31 as coherent memory */
+ if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
+ mmap_add_region(params_from_bl2->tzdram_base,
+ params_from_bl2->tzdram_base,
+ BL31_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE);
+ }
+
+#if USE_COHERENT_MEM
+ coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE);
+ coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE;
+
+ mmap_add_region(coh_start, coh_start,
+ coh_size,
+ MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+ /* map on-chip free running uS timer */
+ mmap_add_region(page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
+ page_align((uint64_t)TEGRA_TMRUS_BASE, 0),
+ (uint64_t)TEGRA_TMRUS_SIZE,
+ MT_DEVICE | MT_RO | MT_SECURE);
+
+ /* add MMIO space */
+ plat_mmio_map = plat_get_mmio_map();
+ if (plat_mmio_map)
+ mmap_add(plat_mmio_map);
+ else
+ WARN("MMIO map not available\n");
+
+ /* set up translation tables */
+ init_xlat_tables();
+
+ /* enable the MMU */
+ enable_mmu_el3(0);
+
+ INFO("BL3-1: Tegra: MMU enabled\n");
+}
+
+/*******************************************************************************
+ * Check if the given NS DRAM range is valid
+ ******************************************************************************/
+int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes)
+{
+ uint64_t end = base + size_in_bytes;
+
+ /*
+ * Check if the NS DRAM address is valid
+ */
+ if ((base < TEGRA_DRAM_BASE) || (end > TEGRA_DRAM_END)) {
+ ERROR("NS address is out-of-bounds!\n");
+ return -EFAULT;
+ }
+
+ /*
+ * TZDRAM aperture contains the BL31 and BL32 images, so we need
+ * to check if the NS DRAM range overlaps the TZDRAM aperture.
+ */
+ if ((base < TZDRAM_END) && (end > tegra_bl31_phys_base)) {
+ ERROR("NS address overlaps TZDRAM!\n");
+ return -ENOTSUP;
+ }
+
+ /* valid NS address */
+ return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk
new file mode 100644
index 00000000..8f6c7b83
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_common.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/nvidia/tegra/include/drivers \
+ -Iplat/nvidia/tegra/include \
+ -Iplat/nvidia/tegra/include/${TARGET_SOC}
+
+include lib/xlat_tables_v2/xlat_tables.mk
+PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
+
+COMMON_DIR := plat/nvidia/tegra/common
+
+BL31_SOURCES += drivers/arm/gic/gic_v2.c \
+ drivers/console/aarch64/console.S \
+ drivers/delay_timer/delay_timer.c \
+ drivers/ti/uart/aarch64/16550_console.S \
+ ${COMMON_DIR}/aarch64/tegra_helpers.S \
+ ${COMMON_DIR}/drivers/pmc/pmc.c \
+ ${COMMON_DIR}/tegra_bl31_setup.c \
+ ${COMMON_DIR}/tegra_delay_timer.c \
+ ${COMMON_DIR}/tegra_fiq_glue.c \
+ ${COMMON_DIR}/tegra_gic.c \
+ ${COMMON_DIR}/tegra_platform.c \
+ ${COMMON_DIR}/tegra_pm.c \
+ ${COMMON_DIR}/tegra_sip_calls.c \
+ ${COMMON_DIR}/tegra_topology.c
diff --git a/plat/nvidia/tegra/common/tegra_delay_timer.c b/plat/nvidia/tegra/common/tegra_delay_timer.c
new file mode 100644
index 00000000..3bd2b0e2
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_delay_timer.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <delay_timer.h>
+#include <mmio.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+static uint32_t tegra_timerus_get_value(void)
+{
+ return mmio_read_32(TEGRA_TMRUS_BASE);
+}
+
+/*
+ * Initialise the on-chip free rolling us counter as the delay
+ * timer.
+ */
+void tegra_delay_timer_init(void)
+{
+ static const timer_ops_t tegra_timer_ops = {
+ .get_timer_value = tegra_timerus_get_value,
+ .clk_mult = 1,
+ .clk_div = 1,
+ };
+
+ timer_init(&tegra_timer_ops);
+}
diff --git a/plat/nvidia/tegra/common/tegra_fiq_glue.c b/plat/nvidia/tegra/common/tegra_fiq_glue.c
new file mode 100644
index 00000000..2f439587
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_fiq_glue.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <gic_v2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+static DEFINE_BAKERY_LOCK(tegra_fiq_lock);
+
+/*******************************************************************************
+ * Static variables
+ ******************************************************************************/
+static uint64_t ns_fiq_handler_addr;
+static uint32_t fiq_handler_active;
+static pcpu_fiq_state_t fiq_state[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Handler for FIQ interrupts
+ ******************************************************************************/
+static uint64_t tegra_fiq_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ el3_state_t *el3state_ctx = get_el3state_ctx(ctx);
+ uint32_t cpu = plat_my_core_pos();
+ uint32_t irq;
+
+ bakery_lock_get(&tegra_fiq_lock);
+
+ /*
+ * The FIQ was generated when the execution was in the non-secure
+ * world. Save the context registers to start with.
+ */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * Save elr_el3 and spsr_el3 from the saved context, and overwrite
+ * the context with the NS fiq_handler_addr and SPSR value.
+ */
+ fiq_state[cpu].elr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3));
+ fiq_state[cpu].spsr_el3 = read_ctx_reg((el3state_ctx), (uint32_t)(CTX_SPSR_EL3));
+
+ /*
+ * Set the new ELR to continue execution in the NS world using the
+ * FIQ handler registered earlier.
+ */
+ assert(ns_fiq_handler_addr);
+ write_ctx_reg((el3state_ctx), (uint32_t)(CTX_ELR_EL3), (ns_fiq_handler_addr));
+
+ /*
+ * Mark this interrupt as complete to avoid a FIQ storm.
+ */
+ irq = plat_ic_acknowledge_interrupt();
+ if (irq < 1022U) {
+ plat_ic_end_of_interrupt(irq);
+ }
+
+ bakery_lock_release(&tegra_fiq_lock);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Setup handler for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_handler_setup(void)
+{
+ uint32_t flags;
+ int32_t rc;
+
+ /* return if already registered */
+ if (fiq_handler_active == 0U) {
+ /*
+ * Register an interrupt handler for FIQ interrupts generated for
+ * NS interrupt sources
+ */
+ flags = 0U;
+ set_interrupt_rm_flag((flags), (NON_SECURE));
+ rc = register_interrupt_type_handler(INTR_TYPE_EL3,
+ tegra_fiq_interrupt_handler,
+ flags);
+ if (rc != 0) {
+ panic();
+ }
+
+ /* handler is now active */
+ fiq_handler_active = 1;
+ }
+}
+
+/*******************************************************************************
+ * Validate and store NS world's entrypoint for FIQ interrupts
+ ******************************************************************************/
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint)
+{
+ ns_fiq_handler_addr = entrypoint;
+}
+
+/*******************************************************************************
+ * Handler to return the NS EL1/EL0 CPU context
+ ******************************************************************************/
+int32_t tegra_fiq_get_intr_context(void)
+{
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ gp_regs_t *gpregs_ctx = get_gpregs_ctx(ctx);
+ const el1_sys_regs_t *el1state_ctx = get_sysregs_ctx(ctx);
+ uint32_t cpu = plat_my_core_pos();
+ uint64_t val;
+
+ /*
+ * We store the ELR_EL3, SPSR_EL3, SP_EL0 and SP_EL1 registers so
+ * that el3_exit() sends these values back to the NS world.
+ */
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X0), (fiq_state[cpu].elr_el3));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X1), (fiq_state[cpu].spsr_el3));
+
+ val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
+
+ val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
+ write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
+
+ return 0;
+}
diff --git a/plat/nvidia/tegra/common/tegra_gic.c b/plat/nvidia/tegra/common/tegra_gic.c
new file mode 100644
index 00000000..3ace554d
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_gic.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <gic_v2.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <stdint.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/* Value used to initialize Non-Secure IRQ priorities four at a time */
+#define GICD_IPRIORITYR_DEF_VAL \
+ (GIC_HIGHEST_NS_PRIORITY | \
+ (GIC_HIGHEST_NS_PRIORITY << 8) | \
+ (GIC_HIGHEST_NS_PRIORITY << 16) | \
+ (GIC_HIGHEST_NS_PRIORITY << 24))
+
+static const irq_sec_cfg_t *g_irq_sec_ptr;
+static uint32_t g_num_irqs;
+
+/*******************************************************************************
+ * Place the cpu interface in a state where it can never make a cpu exit wfi as
+ * as result of an asserted interrupt. This is critical for powering down a cpu
+ ******************************************************************************/
+void tegra_gic_cpuif_deactivate(void)
+{
+ uint32_t val;
+
+ /* Disable secure, non-secure interrupts and disable their bypass */
+ val = gicc_read_ctlr(TEGRA_GICC_BASE);
+ val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
+ val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
+ val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
+ gicc_write_ctlr(TEGRA_GICC_BASE, val);
+}
+
+/*******************************************************************************
+ * Enable secure interrupts and set the priority mask register to allow all
+ * interrupts to trickle in.
+ ******************************************************************************/
+static void tegra_gic_cpuif_setup(uint32_t gicc_base)
+{
+ uint32_t val;
+
+ val = ENABLE_GRP0 | ENABLE_GRP1 | FIQ_EN | FIQ_BYP_DIS_GRP0;
+ val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
+
+ gicc_write_ctlr(gicc_base, val);
+ gicc_write_pmr(gicc_base, GIC_PRI_MASK);
+}
+
+/*******************************************************************************
+ * Per cpu gic distributor setup which will be done by all cpus after a cold
+ * boot/hotplug. This marks out the secure interrupts & enables them.
+ ******************************************************************************/
+static void tegra_gic_pcpu_distif_setup(uint32_t gicd_base)
+{
+ uint32_t index, sec_ppi_sgi_mask = 0;
+
+ assert(gicd_base != 0U);
+
+ /* Setup PPI priorities doing four at a time */
+ for (index = 0U; index < 32U; index += 4U) {
+ gicd_write_ipriorityr(gicd_base, index,
+ GICD_IPRIORITYR_DEF_VAL);
+ }
+
+ /*
+ * Invert the bitmask to create a mask for non-secure PPIs and
+ * SGIs. Program the GICD_IGROUPR0 with this bit mask. This write will
+ * update the GICR_IGROUPR0 as well in case we are running on a GICv3
+ * system. This is critical if GICD_CTLR.ARE_NS=1.
+ */
+ gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
+}
+
+/*******************************************************************************
+ * Global gic distributor setup which will be done by the primary cpu after a
+ * cold boot. It marks out the non secure SPIs, PPIs & SGIs and enables them.
+ * It then enables the secure GIC distributor interface.
+ ******************************************************************************/
+static void tegra_gic_distif_setup(uint32_t gicd_base)
+{
+ uint32_t index, num_ints, irq_num;
+ uint8_t target_cpus;
+ uint32_t val;
+
+ /*
+ * Mark out non-secure interrupts. Calculate number of
+ * IGROUPR registers to consider. Will be equal to the
+ * number of IT_LINES
+ */
+ num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
+ num_ints = (num_ints + 1U) << 5;
+ for (index = MIN_SPI_ID; index < num_ints; index += 32U) {
+ gicd_write_igroupr(gicd_base, index, 0xFFFFFFFFU);
+ }
+
+ /* Setup SPI priorities doing four at a time */
+ for (index = MIN_SPI_ID; index < num_ints; index += 4U) {
+ gicd_write_ipriorityr(gicd_base, index,
+ GICD_IPRIORITYR_DEF_VAL);
+ }
+
+ /* Configure SPI secure interrupts now */
+ if (g_irq_sec_ptr != NULL) {
+
+ for (index = 0U; index < g_num_irqs; index++) {
+ irq_num = g_irq_sec_ptr[index].irq;
+ target_cpus = (uint8_t)g_irq_sec_ptr[index].target_cpus;
+
+ if (irq_num >= MIN_SPI_ID) {
+
+ /* Configure as a secure interrupt */
+ gicd_clr_igroupr(gicd_base, irq_num);
+
+ /* Configure SPI priority */
+ mmio_write_8((uint64_t)gicd_base +
+ (uint64_t)GICD_IPRIORITYR +
+ (uint64_t)irq_num,
+ GIC_HIGHEST_SEC_PRIORITY &
+ GIC_PRI_MASK);
+
+ /* Configure as level triggered */
+ val = gicd_read_icfgr(gicd_base, irq_num);
+ val |= (3U << ((irq_num & 0xFU) << 1U));
+ gicd_write_icfgr(gicd_base, irq_num, val);
+
+ /* Route SPI to the target CPUs */
+ gicd_set_itargetsr(gicd_base, irq_num,
+ target_cpus);
+
+ /* Enable this interrupt */
+ gicd_set_isenabler(gicd_base, irq_num);
+ }
+ }
+ }
+
+ /*
+ * Configure the SGI and PPI. This is done in a separated function
+ * because each CPU is responsible for initializing its own private
+ * interrupts.
+ */
+ tegra_gic_pcpu_distif_setup(gicd_base);
+
+ /* enable distributor */
+ gicd_write_ctlr(gicd_base, ENABLE_GRP0 | ENABLE_GRP1);
+}
+
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, uint32_t num_irqs)
+{
+ g_irq_sec_ptr = irq_sec_ptr;
+ g_num_irqs = num_irqs;
+
+ tegra_gic_cpuif_setup(TEGRA_GICC_BASE);
+ tegra_gic_distif_setup(TEGRA_GICD_BASE);
+}
+
+/*******************************************************************************
+ * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
+ * The interrupt controller knows which pin/line it uses to signal a type of
+ * interrupt. This function provides a common implementation of
+ * plat_interrupt_type_to_line() in an ARM GIC environment for optional re-use
+ * across platforms. It lets the interrupt management framework determine
+ * for a type of interrupt and security state, which line should be used in the
+ * SCR_EL3 to control its routing to EL3. The interrupt line is represented as
+ * the bit position of the IRQ or FIQ bit in the SCR_EL3.
+ ******************************************************************************/
+static uint32_t tegra_gic_interrupt_type_to_line(uint32_t type,
+ uint32_t security_state)
+{
+ assert((type == INTR_TYPE_S_EL1) ||
+ (type == INTR_TYPE_EL3) ||
+ (type == INTR_TYPE_NS));
+
+ assert(sec_state_is_valid(security_state));
+
+ /*
+ * We ignore the security state parameter under the assumption that
+ * both normal and secure worlds are using ARM GICv2. This parameter
+ * will be used when the secure world starts using GICv3.
+ */
+#if ARM_GIC_ARCH == 2
+ return gicv2_interrupt_type_to_line(TEGRA_GICC_BASE, type);
+#else
+#error "Invalid ARM GIC architecture version specified for platform port"
+#endif /* ARM_GIC_ARCH */
+}
+
+#if ARM_GIC_ARCH == 2
+/*******************************************************************************
+ * This function returns the type of the highest priority pending interrupt at
+ * the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no
+ * interrupt pending.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_pending_interrupt_type(void)
+{
+ uint32_t id;
+ uint32_t index;
+ uint32_t ret = INTR_TYPE_NS;
+
+ id = gicc_read_hppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+
+ /* get the interrupt type */
+ if (id < 1022U) {
+ for (index = 0U; index < g_num_irqs; index++) {
+ if (id == g_irq_sec_ptr[index].irq) {
+ ret = g_irq_sec_ptr[index].type;
+ break;
+ }
+ }
+ } else {
+ if (id == GIC_SPURIOUS_INTERRUPT) {
+ ret = INTR_TYPE_INVAL;
+ }
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This function returns the id of the highest priority pending interrupt at
+ * the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no
+ * interrupt pending.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_pending_interrupt_id(void)
+{
+ uint32_t id, ret;
+
+ id = gicc_read_hppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+
+ if (id < 1022U) {
+ ret = id;
+ } else if (id == 1023U) {
+ ret = 0xFFFFFFFFU; /* INTR_ID_UNAVAILABLE */
+ } else {
+ /*
+ * Find out which non-secure interrupt it is under the assumption that
+ * the GICC_CTLR.AckCtl bit is 0.
+ */
+ ret = gicc_read_ahppir(TEGRA_GICC_BASE) & INT_ID_MASK;
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This functions reads the GIC cpu interface Interrupt Acknowledge register
+ * to start handling the pending interrupt. It returns the contents of the IAR.
+ ******************************************************************************/
+static uint32_t tegra_gic_acknowledge_interrupt(void)
+{
+ return gicc_read_IAR(TEGRA_GICC_BASE);
+}
+
+/*******************************************************************************
+ * This functions writes the GIC cpu interface End Of Interrupt register with
+ * the passed value to finish handling the active interrupt
+ ******************************************************************************/
+static void tegra_gic_end_of_interrupt(uint32_t id)
+{
+ gicc_write_EOIR(TEGRA_GICC_BASE, id);
+}
+
+/*******************************************************************************
+ * This function returns the type of the interrupt id depending upon the group
+ * this interrupt has been configured under by the interrupt controller i.e.
+ * group0 or group1.
+ ******************************************************************************/
+static uint32_t tegra_gic_get_interrupt_type(uint32_t id)
+{
+ uint32_t group;
+ uint32_t index;
+ uint32_t ret = INTR_TYPE_NS;
+
+ group = gicd_get_igroupr(TEGRA_GICD_BASE, id);
+
+ /* get the interrupt type */
+ if (group == GRP0) {
+ for (index = 0U; index < g_num_irqs; index++) {
+ if (id == g_irq_sec_ptr[index].irq) {
+ ret = g_irq_sec_ptr[index].type;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+#else
+#error "Invalid ARM GIC architecture version specified for platform port"
+#endif /* ARM_GIC_ARCH */
+
+uint32_t plat_ic_get_pending_interrupt_id(void)
+{
+ return tegra_gic_get_pending_interrupt_id();
+}
+
+uint32_t plat_ic_get_pending_interrupt_type(void)
+{
+ return tegra_gic_get_pending_interrupt_type();
+}
+
+uint32_t plat_ic_acknowledge_interrupt(void)
+{
+ return tegra_gic_acknowledge_interrupt();
+}
+
+uint32_t plat_ic_get_interrupt_type(uint32_t id)
+{
+ return tegra_gic_get_interrupt_type(id);
+}
+
+void plat_ic_end_of_interrupt(uint32_t id)
+{
+ tegra_gic_end_of_interrupt(id);
+}
+
+uint32_t plat_interrupt_type_to_line(uint32_t type,
+ uint32_t security_state)
+{
+ return tegra_gic_interrupt_type_to_line(type, security_state);
+}
diff --git a/plat/nvidia/tegra/common/tegra_platform.c b/plat/nvidia/tegra/common/tegra_platform.c
new file mode 100644
index 00000000..6a906ae0
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_platform.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Tegra platforms
+ ******************************************************************************/
+typedef enum tegra_platform {
+ TEGRA_PLATFORM_SILICON = 0,
+ TEGRA_PLATFORM_QT,
+ TEGRA_PLATFORM_FPGA,
+ TEGRA_PLATFORM_EMULATION,
+ TEGRA_PLATFORM_MAX,
+} tegra_platform_t;
+
+/*******************************************************************************
+ * Tegra macros defining all the SoC minor versions
+ ******************************************************************************/
+#define TEGRA_MINOR_QT 0
+#define TEGRA_MINOR_FPGA 1
+#define TEGRA_MINOR_EMULATION_MIN 2
+#define TEGRA_MINOR_EMULATION_MAX 10
+
+/*******************************************************************************
+ * Tegra major, minor version helper macros
+ ******************************************************************************/
+#define MAJOR_VERSION_SHIFT 0x4
+#define MAJOR_VERSION_MASK 0xF
+#define MINOR_VERSION_SHIFT 0x10
+#define MINOR_VERSION_MASK 0xF
+#define CHIP_ID_SHIFT 8
+#define CHIP_ID_MASK 0xFF
+
+/*******************************************************************************
+ * Tegra chip ID values
+ ******************************************************************************/
+typedef enum tegra_chipid {
+ TEGRA_CHIPID_TEGRA13 = 0x13,
+ TEGRA_CHIPID_TEGRA21 = 0x21,
+ TEGRA_CHIPID_TEGRA18 = 0x18,
+} tegra_chipid_t;
+
+/*
+ * Read the chip ID value
+ */
+static uint32_t tegra_get_chipid(void)
+{
+ return mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET);
+}
+
+/*
+ * Read the chip's major version from chip ID value
+ */
+uint32_t tegra_get_chipid_major(void)
+{
+ return (tegra_get_chipid() >> MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
+}
+
+/*
+ * Read the chip's minor version from the chip ID value
+ */
+uint32_t tegra_get_chipid_minor(void)
+{
+ return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
+}
+
+uint8_t tegra_chipid_is_t132(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA13);
+}
+
+uint8_t tegra_chipid_is_t210(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA21);
+}
+
+uint8_t tegra_chipid_is_t186(void)
+{
+ uint32_t chip_id = (tegra_get_chipid() >> CHIP_ID_SHIFT) & CHIP_ID_MASK;
+
+ return (chip_id == TEGRA_CHIPID_TEGRA18);
+}
+
+/*
+ * Read the chip ID value and derive the platform
+ */
+static tegra_platform_t tegra_get_platform(void)
+{
+ uint32_t major = tegra_get_chipid_major();
+ uint32_t minor = tegra_get_chipid_minor();
+
+ /* Actual silicon platforms have a non-zero major version */
+ if (major > 0)
+ return TEGRA_PLATFORM_SILICON;
+
+ /*
+ * The minor version number is used by simulation platforms
+ */
+
+ /*
+ * Cadence's QuickTurn emulation system is a Solaris-based
+ * chip emulation system
+ */
+ if (minor == TEGRA_MINOR_QT)
+ return TEGRA_PLATFORM_QT;
+
+ /*
+ * FPGAs are used during early software/hardware development
+ */
+ if (minor == TEGRA_MINOR_FPGA)
+ return TEGRA_PLATFORM_FPGA;
+
+ /* Minor version reserved for other emulation platforms */
+ if ((minor > TEGRA_MINOR_FPGA) && (minor <= TEGRA_MINOR_EMULATION_MAX))
+ return TEGRA_PLATFORM_EMULATION;
+
+ /* unsupported platform */
+ return TEGRA_PLATFORM_MAX;
+}
+
+uint8_t tegra_platform_is_silicon(void)
+{
+ return (tegra_get_platform() == TEGRA_PLATFORM_SILICON);
+}
+
+uint8_t tegra_platform_is_qt(void)
+{
+ return (tegra_get_platform() == TEGRA_PLATFORM_QT);
+}
+
+uint8_t tegra_platform_is_fpga(void)
+{
+ return (tegra_get_platform() == TEGRA_PLATFORM_FPGA);
+}
+
+uint8_t tegra_platform_is_emulation(void)
+{
+ return (tegra_get_platform() == TEGRA_PLATFORM_EMULATION);
+}
diff --git a/plat/nvidia/tegra/common/tegra_pm.c b/plat/nvidia/tegra/common/tegra_pm.c
new file mode 100644
index 00000000..86021ba9
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_pm.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t tegra_sec_entry_point;
+extern uint64_t tegra_console_base;
+
+/*
+ * tegra_fake_system_suspend acts as a boolean var controlling whether
+ * we are going to take fake system suspend code or normal system suspend code
+ * path. This variable is set inside the sip call handlers,when the kernel
+ * requests a SIP call to set the suspend debug flags.
+ */
+uint8_t tegra_fake_system_suspend;
+
+/*
+ * The following platform setup functions are weakly defined. They
+ * provide typical implementations that will be overridden by a SoC.
+ */
+#pragma weak tegra_soc_pwr_domain_suspend_pwrdown_early
+#pragma weak tegra_soc_pwr_domain_suspend
+#pragma weak tegra_soc_pwr_domain_on
+#pragma weak tegra_soc_pwr_domain_off
+#pragma weak tegra_soc_pwr_domain_on_finish
+#pragma weak tegra_soc_pwr_domain_power_down_wfi
+#pragma weak tegra_soc_prepare_system_reset
+#pragma weak tegra_soc_prepare_system_off
+#pragma weak tegra_soc_get_target_pwr_state
+
+int tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ return PSCI_E_NOT_SUPPORTED;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ return PSCI_E_NOT_SUPPORTED;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+ return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+ ERROR("Tegra System Off: operation not handled.\n");
+ panic();
+}
+
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+ assert(ncpu);
+
+ do {
+ temp = *states++;
+ if ((temp < target))
+ target = temp;
+ } while (--ncpu);
+
+ return target;
+}
+
+/*******************************************************************************
+ * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call to get the `power_state` parameter. This allows the platform to encode
+ * the appropriate State-ID field within the `power_state` parameter which can
+ * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
+******************************************************************************/
+void tegra_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ /* all affinities use system suspend state id */
+ for (uint32_t i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
+ req_state->pwr_domain_state[i] = PSTATE_ID_SOC_POWERDN;
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to enter standby.
+ ******************************************************************************/
+void tegra_cpu_standby(plat_local_state_t cpu_state)
+{
+ /*
+ * Enter standby state
+ * dsb is good practice before using wfi to enter low power states
+ */
+ dsb();
+ wfi();
+}
+
+/*******************************************************************************
+ * Handler called when an affinity instance is about to be turned on. The
+ * level and mpidr determine the affinity instance.
+ ******************************************************************************/
+int tegra_pwr_domain_on(u_register_t mpidr)
+{
+ return tegra_soc_pwr_domain_on(mpidr);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void tegra_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ tegra_soc_pwr_domain_off(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ * This handler is called with SMP and data cache enabled, when
+ * HW_ASSISTED_COHERENCY = 0
+ ******************************************************************************/
+void tegra_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
+{
+ tegra_soc_pwr_domain_suspend_pwrdown_early(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void tegra_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ tegra_soc_pwr_domain_suspend(target_state);
+
+ /* Disable console if we are entering deep sleep. */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PSTATE_ID_SOC_POWERDN)
+ console_uninit();
+
+ /* disable GICC */
+ tegra_gic_cpuif_deactivate();
+}
+
+/*******************************************************************************
+ * Handler called at the end of the power domain suspend sequence. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+__dead2 void tegra_pwr_domain_power_down_wfi(const psci_power_state_t
+ *target_state)
+{
+ uint8_t pwr_state = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+ uint64_t rmr_el3 = 0;
+
+ /* call the chip's power down handler */
+ tegra_soc_pwr_domain_power_down_wfi(target_state);
+
+ /*
+ * If we are in fake system suspend mode, ensure we start doing
+ * procedures that help in looping back towards system suspend exit
+ * instead of calling WFI by requesting a warm reset.
+ * Else, just call WFI to enter low power state.
+ */
+ if ((tegra_fake_system_suspend != 0U) &&
+ (pwr_state == (uint8_t)PSTATE_ID_SOC_POWERDN)) {
+
+ /* warm reboot */
+ rmr_el3 = read_rmr_el3();
+ write_rmr_el3(rmr_el3 | RMR_WARM_RESET_CPU);
+
+ } else {
+ /* enter power down state */
+ wfi();
+ }
+
+ /* we can never reach here */
+ panic();
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void tegra_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ plat_params_from_bl2_t *plat_params;
+
+ /*
+ * Initialize the GIC cpu and distributor interfaces
+ */
+ plat_gic_setup();
+
+ /*
+ * Check if we are exiting from deep sleep.
+ */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PSTATE_ID_SOC_POWERDN) {
+
+ /* Initialize the runtime console */
+ if (tegra_console_base != (uint64_t)0) {
+ console_init(tegra_console_base, TEGRA_BOOT_UART_CLK_IN_HZ,
+ TEGRA_CONSOLE_BAUDRATE);
+ }
+
+ /*
+ * Restore Memory Controller settings as it loses state
+ * during system suspend.
+ */
+ tegra_memctrl_restore_settings();
+
+ /*
+ * Security configuration to allow DRAM/device access.
+ */
+ plat_params = bl31_get_plat_params();
+ tegra_memctrl_tzdram_setup(plat_params->tzdram_base,
+ plat_params->tzdram_size);
+
+ /*
+ * Set up the TZRAM memory aperture to allow only secure world
+ * access
+ */
+ tegra_memctrl_tzram_setup(TEGRA_TZRAM_BASE, TEGRA_TZRAM_SIZE);
+ }
+
+ /*
+ * Reset hardware settings.
+ */
+ tegra_soc_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ ******************************************************************************/
+void tegra_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ tegra_pwr_domain_on_finish(target_state);
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be powered off
+ ******************************************************************************/
+__dead2 void tegra_system_off(void)
+{
+ INFO("Powering down system...\n");
+
+ tegra_soc_prepare_system_off();
+}
+
+/*******************************************************************************
+ * Handler called when the system wants to be restarted.
+ ******************************************************************************/
+__dead2 void tegra_system_reset(void)
+{
+ INFO("Restarting system...\n");
+
+ /* per-SoC system reset handler */
+ tegra_soc_prepare_system_reset();
+
+ /*
+ * Program the PMC in order to restart the system.
+ */
+ tegra_pmc_system_reset();
+}
+
+/*******************************************************************************
+ * Handler called to check the validity of the power state parameter.
+ ******************************************************************************/
+int32_t tegra_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ assert(req_state);
+
+ return tegra_soc_validate_power_state(power_state, req_state);
+}
+
+/*******************************************************************************
+ * Platform handler called to check the validity of the non secure entrypoint.
+ ******************************************************************************/
+int tegra_validate_ns_entrypoint(uintptr_t entrypoint)
+{
+ /*
+ * Check if the non secure entrypoint lies within the non
+ * secure DRAM.
+ */
+ if ((entrypoint >= TEGRA_DRAM_BASE) && (entrypoint <= TEGRA_DRAM_END))
+ return PSCI_E_SUCCESS;
+
+ return PSCI_E_INVALID_ADDRESS;
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const plat_psci_ops_t tegra_plat_psci_ops = {
+ .cpu_standby = tegra_cpu_standby,
+ .pwr_domain_on = tegra_pwr_domain_on,
+ .pwr_domain_off = tegra_pwr_domain_off,
+ .pwr_domain_suspend_pwrdown_early = tegra_pwr_domain_suspend_pwrdown_early,
+ .pwr_domain_suspend = tegra_pwr_domain_suspend,
+ .pwr_domain_on_finish = tegra_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = tegra_pwr_domain_suspend_finish,
+ .pwr_domain_pwr_down_wfi = tegra_pwr_domain_power_down_wfi,
+ .system_off = tegra_system_off,
+ .system_reset = tegra_system_reset,
+ .validate_power_state = tegra_validate_power_state,
+ .validate_ns_entrypoint = tegra_validate_ns_entrypoint,
+ .get_sys_suspend_power_state = tegra_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops and initialize Power Controller
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ psci_power_state_t target_state = { { PSCI_LOCAL_STATE_RUN } };
+
+ /*
+ * Flush entrypoint variable to PoC since it will be
+ * accessed after a reset with the caches turned off.
+ */
+ tegra_sec_entry_point = sec_entrypoint;
+ flush_dcache_range((uint64_t)&tegra_sec_entry_point, sizeof(uint64_t));
+
+ /*
+ * Reset hardware settings.
+ */
+ tegra_soc_pwr_domain_on_finish(&target_state);
+
+ /*
+ * Initialize PSCI ops struct
+ */
+ *psci_ops = &tegra_plat_psci_ops;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ return tegra_soc_get_target_pwr_state(lvl, states, ncpu);
+}
diff --git a/plat/nvidia/tegra/common/tegra_sip_calls.c b/plat/nvidia/tegra/common/tegra_sip_calls.c
new file mode 100644
index 00000000..d96ce7a0
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_sip_calls.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <debug.h>
+#include <errno.h>
+#include <memctrl.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+
+/*******************************************************************************
+ * Common Tegra SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_NEW_VIDEOMEM_REGION 0x82000003
+#define TEGRA_SIP_FIQ_NS_ENTRYPOINT 0x82000005
+#define TEGRA_SIP_FIQ_NS_GET_CONTEXT 0x82000006
+#define TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND 0xC2000007
+
+/*******************************************************************************
+ * Fake system suspend mode control var
+ ******************************************************************************/
+extern uint8_t tegra_fake_system_suspend;
+
+
+/*******************************************************************************
+ * SoC specific SiP handler
+ ******************************************************************************/
+#pragma weak plat_sip_handler
+int plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ return -ENOTSUP;
+}
+
+/*******************************************************************************
+ * This function is responsible for handling all SiP calls
+ ******************************************************************************/
+uint64_t tegra_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint32_t regval;
+ int err;
+
+ /* Check if this is a SoC specific SiP */
+ err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
+ if (err == 0)
+ SMC_RET1(handle, (uint64_t)err);
+
+ switch (smc_fid) {
+
+ case TEGRA_SIP_NEW_VIDEOMEM_REGION:
+
+ /* clean up the high bits */
+ x2 = (uint32_t)x2;
+
+ /*
+ * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
+ * or falls outside of the valid DRAM range
+ */
+ err = bl31_check_ns_address(x1, x2);
+ if (err)
+ SMC_RET1(handle, err);
+
+ /*
+ * Check if Video Memory is aligned to 1MB.
+ */
+ if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
+ ERROR("Unaligned Video Memory base address!\n");
+ SMC_RET1(handle, -ENOTSUP);
+ }
+
+ /*
+ * The GPU is the user of the Video Memory region. In order to
+ * transition to the new memory region smoothly, we program the
+ * new base/size ONLY if the GPU is in reset mode.
+ */
+ regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
+ TEGRA_GPU_RESET_REG_OFFSET);
+ if ((regval & GPU_RESET_BIT) == 0U) {
+ ERROR("GPU not in reset! Video Memory setup failed\n");
+ SMC_RET1(handle, -ENOTSUP);
+ }
+
+ /* new video memory carveout settings */
+ tegra_memctrl_videomem_setup(x1, x2);
+
+ SMC_RET1(handle, 0);
+ break;
+
+ /*
+ * The NS world registers the address of its handler to be
+ * used for processing the FIQ. This is normally used by the
+ * NS FIQ debugger driver to detect system hangs by programming
+ * a watchdog timer to fire a FIQ interrupt.
+ */
+ case TEGRA_SIP_FIQ_NS_ENTRYPOINT:
+
+ if (!x1)
+ SMC_RET1(handle, SMC_UNK);
+
+ /*
+ * TODO: Check if x1 contains a valid DRAM address
+ */
+
+ /* store the NS world's entrypoint */
+ tegra_fiq_set_ns_entrypoint(x1);
+
+ SMC_RET1(handle, 0);
+ break;
+
+ /*
+ * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
+ * CPU context when the FIQ interrupt was triggered. This allows the
+ * NS world to understand the CPU state when the watchdog interrupt
+ * triggered.
+ */
+ case TEGRA_SIP_FIQ_NS_GET_CONTEXT:
+
+ /* retrieve context registers when FIQ triggered */
+ tegra_fiq_get_intr_context();
+
+ SMC_RET0(handle);
+ break;
+
+ case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND:
+ /*
+ * System suspend fake mode is set if we are on VDK and we make
+ * a debug SIP call. This mode ensures that we excercise debug
+ * path instead of the regular code path to suit the pre-silicon
+ * platform needs. These include replacing the call to WFI by
+ * a warm reset request.
+ */
+ if (tegra_platform_is_emulation() != 0U) {
+
+ tegra_fake_system_suspend = 1;
+ SMC_RET1(handle, 0);
+ }
+
+ /*
+ * We return to the external world as if this SIP is not
+ * implemented in case, we are not running on VDK.
+ */
+ break;
+
+ default:
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ break;
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ tegra_sip_fast,
+
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ tegra_sip_handler
+);
diff --git a/plat/nvidia/tegra/common/tegra_topology.c b/plat/nvidia/tegra/common/tegra_topology.c
new file mode 100644
index 00000000..05930535
--- /dev/null
+++ b/plat/nvidia/tegra/common/tegra_topology.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+extern const unsigned char tegra_power_domain_tree_desc[];
+#pragma weak plat_core_pos_by_mpidr
+
+/*******************************************************************************
+ * This function returns the Tegra default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return tegra_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cluster_id, cpu_id;
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+ return PSCI_E_NOT_PRESENT;
+
+ /*
+ * Validate cpu_id by checking whether it represents a CPU in
+ * one of the two clusters present on the platform.
+ */
+ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+ return PSCI_E_NOT_PRESENT;
+
+ return (cpu_id + (cluster_id * 4));
+}
diff --git a/plat/nvidia/tegra/include/drivers/flowctrl.h b/plat/nvidia/tegra/include/drivers/flowctrl.h
new file mode 100644
index 00000000..2e3bcf04
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/flowctrl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __FLOWCTRL_H__
+#define __FLOWCTRL_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+#define FLOWCTRL_HALT_CPU0_EVENTS 0x0U
+#define FLOWCTRL_WAITEVENT (2U << 29)
+#define FLOWCTRL_WAIT_FOR_INTERRUPT (4U << 29)
+#define FLOWCTRL_JTAG_RESUME (1U << 28)
+#define FLOWCTRL_HALT_SCLK (1U << 27)
+#define FLOWCTRL_HALT_LIC_IRQ (1U << 11)
+#define FLOWCTRL_HALT_LIC_FIQ (1U << 10)
+#define FLOWCTRL_HALT_GIC_IRQ (1U << 9)
+#define FLOWCTRL_HALT_GIC_FIQ (1U << 8)
+#define FLOWCTRL_HALT_BPMP_EVENTS 0x4U
+#define FLOWCTRL_CPU0_CSR 0x8U
+#define FLOW_CTRL_CSR_PWR_OFF_STS (1U << 16)
+#define FLOWCTRL_CSR_INTR_FLAG (1U << 15)
+#define FLOWCTRL_CSR_EVENT_FLAG (1U << 14)
+#define FLOWCTRL_CSR_IMMEDIATE_WAKE (1U << 3)
+#define FLOWCTRL_CSR_ENABLE (1U << 0)
+#define FLOWCTRL_HALT_CPU1_EVENTS 0x14U
+#define FLOWCTRL_CPU1_CSR 0x18U
+#define FLOWCTRL_CC4_CORE0_CTRL 0x6cU
+#define FLOWCTRL_WAIT_WFI_BITMAP 0x100U
+#define FLOWCTRL_L2_FLUSH_CONTROL 0x94U
+#define FLOWCTRL_BPMP_CLUSTER_CONTROL 0x98U
+#define FLOWCTRL_BPMP_CLUSTER_PWRON_LOCK (1U << 2)
+
+#define FLOWCTRL_ENABLE_EXT 12U
+#define FLOWCTRL_ENABLE_EXT_MASK 3U
+#define FLOWCTRL_PG_CPU_NONCPU 0x1U
+#define FLOWCTRL_TURNOFF_CPURAIL 0x2U
+
+static inline uint32_t tegra_fc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_FLOWCTRL_BASE + off);
+}
+
+static inline void tegra_fc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_FLOWCTRL_BASE + off, val);
+}
+
+void tegra_fc_cluster_idle(uint32_t midr);
+void tegra_fc_cpu_powerdn(uint32_t mpidr);
+void tegra_fc_cluster_powerdn(uint32_t midr);
+void tegra_fc_soc_powerdn(uint32_t midr);
+void tegra_fc_cpu_on(int cpu);
+void tegra_fc_cpu_off(int cpu);
+void tegra_fc_lock_active_cluster(void);
+void tegra_fc_reset_bpmp(void);
+
+#endif /* __FLOWCTRL_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/mce.h b/plat/nvidia/tegra/include/drivers/mce.h
new file mode 100644
index 00000000..c7867a50
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/mce.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MCE_H__
+#define __MCE_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * MCE commands
+ ******************************************************************************/
+typedef enum mce_cmd {
+ MCE_CMD_ENTER_CSTATE = 0U,
+ MCE_CMD_UPDATE_CSTATE_INFO = 1U,
+ MCE_CMD_UPDATE_CROSSOVER_TIME = 2U,
+ MCE_CMD_READ_CSTATE_STATS = 3U,
+ MCE_CMD_WRITE_CSTATE_STATS = 4U,
+ MCE_CMD_IS_SC7_ALLOWED = 5U,
+ MCE_CMD_ONLINE_CORE = 6U,
+ MCE_CMD_CC3_CTRL = 7U,
+ MCE_CMD_ECHO_DATA = 8U,
+ MCE_CMD_READ_VERSIONS = 9U,
+ MCE_CMD_ENUM_FEATURES = 10U,
+ MCE_CMD_ROC_FLUSH_CACHE_TRBITS = 11U,
+ MCE_CMD_ENUM_READ_MCA = 12U,
+ MCE_CMD_ENUM_WRITE_MCA = 13U,
+ MCE_CMD_ROC_FLUSH_CACHE = 14U,
+ MCE_CMD_ROC_CLEAN_CACHE = 15U,
+ MCE_CMD_ENABLE_LATIC = 16U,
+ MCE_CMD_UNCORE_PERFMON_REQ = 17U,
+ MCE_CMD_MISC_CCPLEX = 18U,
+ MCE_CMD_IS_CCX_ALLOWED = 0xFEU,
+ MCE_CMD_MAX = 0xFFU,
+} mce_cmd_t;
+
+#define MCE_CMD_MASK 0xFFU
+
+/*******************************************************************************
+ * Timeout value used to powerdown a core
+ ******************************************************************************/
+#define MCE_CORE_SLEEP_TIME_INFINITE 0xFFFFFFFFU
+
+/*******************************************************************************
+ * Struct to prepare UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+typedef struct mce_cstate_info {
+ /* cluster cstate value */
+ uint32_t cluster;
+ /* ccplex cstate value */
+ uint32_t ccplex;
+ /* system cstate value */
+ uint32_t system;
+ /* force system state? */
+ uint8_t system_state_force;
+ /* wake mask value */
+ uint32_t wake_mask;
+ /* update the wake mask? */
+ uint8_t update_wake_mask;
+} mce_cstate_info_t;
+
+/* public interfaces */
+int mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2);
+int mce_update_reset_vector(void);
+int mce_update_gsc_videomem(void);
+int mce_update_gsc_tzdram(void);
+int mce_update_gsc_tzram(void);
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx);
+void mce_update_cstate_info(const mce_cstate_info_t *cstate);
+void mce_verify_firmware_version(void);
+
+#endif /* __MCE_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl.h b/plat/nvidia/tegra/include/drivers/memctrl.h
new file mode 100644
index 00000000..8413299d
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRL_H__
+#define __MEMCTRL_H__
+
+void tegra_memctrl_setup(void);
+void tegra_memctrl_restore_settings(void);
+void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes);
+void tegra_memctrl_disable_ahb_redirection(void);
+
+#endif /* __MEMCTRL_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v1.h b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
new file mode 100644
index 00000000..78ee2e76
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v1.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRLV1_H__
+#define __MEMCTRLV1_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/* SMMU registers */
+#define MC_SMMU_CONFIG_0 0x10U
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0U
+#define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1U
+#define MC_SMMU_TLB_CONFIG_0 0x14U
+#define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010U
+#define MC_SMMU_PTC_CONFIG_0 0x18U
+#define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003fU
+#define MC_SMMU_TLB_FLUSH_0 0x30U
+#define TLB_FLUSH_VA_MATCH_ALL 0U
+#define TLB_FLUSH_ASID_MATCH_DISABLE 0U
+#define TLB_FLUSH_ASID_MATCH_SHIFT 31U
+#define MC_SMMU_TLB_FLUSH_ALL \
+ (TLB_FLUSH_VA_MATCH_ALL | \
+ (TLB_FLUSH_ASID_MATCH_DISABLE << TLB_FLUSH_ASID_MATCH_SHIFT))
+#define MC_SMMU_PTC_FLUSH_0 0x34U
+#define MC_SMMU_PTC_FLUSH_ALL 0U
+#define MC_SMMU_ASID_SECURITY_0 0x38U
+#define MC_SMMU_ASID_SECURITY 0U
+#define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228U
+#define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22cU
+#define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230U
+#define MC_SMMU_TRANSLATION_ENABLE_3_0 0x234U
+#define MC_SMMU_TRANSLATION_ENABLE_4_0 0xb98U
+#define MC_SMMU_TRANSLATION_ENABLE (~0)
+
+/* MC IRAM aperture registers */
+#define MC_IRAM_BASE_LO 0x65CU
+#define MC_IRAM_TOP_LO 0x660U
+#define MC_IRAM_BASE_TOP_HI 0x980U
+#define MC_IRAM_REG_CTRL 0x964U
+#define MC_DISABLE_IRAM_CFG_WRITES 1U
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+#endif /* __MEMCTRLV1_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
new file mode 100644
index 00000000..60c8a040
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MEMCTRLV2_H__
+#define __MEMCTRLV2_H__
+
+#include <tegra_def.h>
+
+#ifndef __ASSEMBLY__
+
+#include <sys/types.h>
+
+/*******************************************************************************
+ * StreamID to indicate no SMMU translations (requests to be steered on the
+ * SMMU bypass path)
+ ******************************************************************************/
+#define MC_STREAM_ID_MAX 0x7F
+
+/*******************************************************************************
+ * Stream ID Override Config registers
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_CFG_PTCR 0x000
+#define MC_STREAMID_OVERRIDE_CFG_AFIR 0x070
+#define MC_STREAMID_OVERRIDE_CFG_HDAR 0x0A8
+#define MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR 0x0B0
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSRD 0x0E0
+#define MC_STREAMID_OVERRIDE_CFG_SATAR 0x0F8
+#define MC_STREAMID_OVERRIDE_CFG_MPCORER 0x138
+#define MC_STREAMID_OVERRIDE_CFG_NVENCSWR 0x158
+#define MC_STREAMID_OVERRIDE_CFG_AFIW 0x188
+#define MC_STREAMID_OVERRIDE_CFG_HDAW 0x1A8
+#define MC_STREAMID_OVERRIDE_CFG_MPCOREW 0x1C8
+#define MC_STREAMID_OVERRIDE_CFG_SATAW 0x1E8
+#define MC_STREAMID_OVERRIDE_CFG_ISPRA 0x220
+#define MC_STREAMID_OVERRIDE_CFG_ISPWA 0x230
+#define MC_STREAMID_OVERRIDE_CFG_ISPWB 0x238
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR 0x250
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW 0x258
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR 0x260
+#define MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW 0x268
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRD 0x2A0
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWR 0x2A8
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD 0x2C0
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR 0x2C8
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRA 0x300
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAA 0x308
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCR 0x310
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCRAB 0x318
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWA 0x320
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAA 0x328
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCW 0x330
+#define MC_STREAMID_OVERRIDE_CFG_SDMMCWAB 0x338
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD 0x360
+#define MC_STREAMID_OVERRIDE_CFG_VICSWR 0x368
+#define MC_STREAMID_OVERRIDE_CFG_VIW 0x390
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD 0x3C0
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSWR 0x3C8
+#define MC_STREAMID_OVERRIDE_CFG_APER 0x3D0
+#define MC_STREAMID_OVERRIDE_CFG_APEW 0x3D8
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSRD 0x3F0
+#define MC_STREAMID_OVERRIDE_CFG_NVJPGSWR 0x3F8
+#define MC_STREAMID_OVERRIDE_CFG_SESRD 0x400
+#define MC_STREAMID_OVERRIDE_CFG_SESWR 0x408
+#define MC_STREAMID_OVERRIDE_CFG_ETRR 0x420
+#define MC_STREAMID_OVERRIDE_CFG_ETRW 0x428
+#define MC_STREAMID_OVERRIDE_CFG_TSECSRDB 0x430
+#define MC_STREAMID_OVERRIDE_CFG_TSECSWRB 0x438
+#define MC_STREAMID_OVERRIDE_CFG_GPUSRD2 0x440
+#define MC_STREAMID_OVERRIDE_CFG_GPUSWR2 0x448
+#define MC_STREAMID_OVERRIDE_CFG_AXISR 0x460
+#define MC_STREAMID_OVERRIDE_CFG_AXISW 0x468
+#define MC_STREAMID_OVERRIDE_CFG_EQOSR 0x470
+#define MC_STREAMID_OVERRIDE_CFG_EQOSW 0x478
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCR 0x480
+#define MC_STREAMID_OVERRIDE_CFG_UFSHCW 0x488
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR 0x490
+#define MC_STREAMID_OVERRIDE_CFG_BPMPR 0x498
+#define MC_STREAMID_OVERRIDE_CFG_BPMPW 0x4A0
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAR 0x4A8
+#define MC_STREAMID_OVERRIDE_CFG_BPMPDMAW 0x4B0
+#define MC_STREAMID_OVERRIDE_CFG_AONR 0x4B8
+#define MC_STREAMID_OVERRIDE_CFG_AONW 0x4C0
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAR 0x4C8
+#define MC_STREAMID_OVERRIDE_CFG_AONDMAW 0x4D0
+#define MC_STREAMID_OVERRIDE_CFG_SCER 0x4D8
+#define MC_STREAMID_OVERRIDE_CFG_SCEW 0x4E0
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAR 0x4E8
+#define MC_STREAMID_OVERRIDE_CFG_SCEDMAW 0x4F0
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAR 0x4F8
+#define MC_STREAMID_OVERRIDE_CFG_APEDMAW 0x500
+#define MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1 0x508
+#define MC_STREAMID_OVERRIDE_CFG_VICSRD1 0x510
+#define MC_STREAMID_OVERRIDE_CFG_NVDECSRD1 0x518
+
+/*******************************************************************************
+ * Macro to calculate Security cfg register addr from StreamID Override register
+ ******************************************************************************/
+#define MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(addr) (addr + sizeof(uint32_t))
+
+/*******************************************************************************
+ * Memory Controller transaction override config registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CONFIG_HDAR 0x10a8
+#define MC_TXN_OVERRIDE_CONFIG_BPMPW 0x14a0
+#define MC_TXN_OVERRIDE_CONFIG_PTCR 0x1000
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR 0x1490
+#define MC_TXN_OVERRIDE_CONFIG_EQOSW 0x1478
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSWR 0x13f8
+#define MC_TXN_OVERRIDE_CONFIG_ISPRA 0x1220
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAA 0x1328
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD 0x1360
+#define MC_TXN_OVERRIDE_CONFIG_MPCOREW 0x11c8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD 0x12c0
+#define MC_TXN_OVERRIDE_CONFIG_AXISR 0x1460
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAW 0x14f0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCW 0x1330
+#define MC_TXN_OVERRIDE_CONFIG_EQOSR 0x1470
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAR 0x14f8
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSRD 0x10e0
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAB 0x1318
+#define MC_TXN_OVERRIDE_CONFIG_VICSRD1 0x1510
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAR 0x14a8
+#define MC_TXN_OVERRIDE_CONFIG_VIW 0x1390
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRAA 0x1308
+#define MC_TXN_OVERRIDE_CONFIG_AXISW 0x1468
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVR 0x1260
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCR 0x1480
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWR 0x12a8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR 0x12c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAR 0x10f8
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTW 0x1258
+#define MC_TXN_OVERRIDE_CONFIG_TSECSWRB 0x1438
+#define MC_TXN_OVERRIDE_CONFIG_GPUSRD2 0x1440
+#define MC_TXN_OVERRIDE_CONFIG_SCEDMAR 0x14e8
+#define MC_TXN_OVERRIDE_CONFIG_GPUSWR2 0x1448
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAW 0x14d0
+#define MC_TXN_OVERRIDE_CONFIG_APEDMAW 0x1500
+#define MC_TXN_OVERRIDE_CONFIG_AONW 0x14c0
+#define MC_TXN_OVERRIDE_CONFIG_HOST1XDMAR 0x10b0
+#define MC_TXN_OVERRIDE_CONFIG_ETRR 0x1420
+#define MC_TXN_OVERRIDE_CONFIG_SESWR 0x1408
+#define MC_TXN_OVERRIDE_CONFIG_NVJPGSRD 0x13f0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD 0x13c0
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRDB 0x1430
+#define MC_TXN_OVERRIDE_CONFIG_BPMPDMAW 0x14b0
+#define MC_TXN_OVERRIDE_CONFIG_APER 0x13d0
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSRD1 0x1518
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_HOSTR 0x1250
+#define MC_TXN_OVERRIDE_CONFIG_ISPWA 0x1230
+#define MC_TXN_OVERRIDE_CONFIG_SESRD 0x1400
+#define MC_TXN_OVERRIDE_CONFIG_SCER 0x14d8
+#define MC_TXN_OVERRIDE_CONFIG_AONR 0x14b8
+#define MC_TXN_OVERRIDE_CONFIG_MPCORER 0x1138
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWA 0x1320
+#define MC_TXN_OVERRIDE_CONFIG_HDAW 0x11a8
+#define MC_TXN_OVERRIDE_CONFIG_NVDECSWR 0x13c8
+#define MC_TXN_OVERRIDE_CONFIG_UFSHCW 0x1488
+#define MC_TXN_OVERRIDE_CONFIG_AONDMAR 0x14c8
+#define MC_TXN_OVERRIDE_CONFIG_SATAW 0x11e8
+#define MC_TXN_OVERRIDE_CONFIG_ETRW 0x1428
+#define MC_TXN_OVERRIDE_CONFIG_VICSWR 0x1368
+#define MC_TXN_OVERRIDE_CONFIG_NVENCSWR 0x1158
+#define MC_TXN_OVERRIDE_CONFIG_AFIR 0x1070
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCWAB 0x1338
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCRA 0x1300
+#define MC_TXN_OVERRIDE_CONFIG_NVDISPLAYR1 0x1508
+#define MC_TXN_OVERRIDE_CONFIG_ISPWB 0x1238
+#define MC_TXN_OVERRIDE_CONFIG_BPMPR 0x1498
+#define MC_TXN_OVERRIDE_CONFIG_APEW 0x13d8
+#define MC_TXN_OVERRIDE_CONFIG_SDMMCR 0x1310
+#define MC_TXN_OVERRIDE_CONFIG_XUSB_DEVW 0x1268
+#define MC_TXN_OVERRIDE_CONFIG_TSECSRD 0x12a0
+#define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188
+#define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0
+
+/*******************************************************************************
+ * Structure to hold the transaction override settings to use to override
+ * client inputs
+ ******************************************************************************/
+typedef struct mc_txn_override_cfg {
+ uint32_t offset;
+ uint8_t cgid_tag;
+} mc_txn_override_cfg_t;
+
+#define mc_make_txn_override_cfg(off, val) \
+ { \
+ .offset = MC_TXN_OVERRIDE_CONFIG_ ## off, \
+ .cgid_tag = MC_TXN_OVERRIDE_ ## val \
+ }
+
+/*******************************************************************************
+ * Structure to hold the Stream ID to use to override client inputs
+ ******************************************************************************/
+typedef struct mc_streamid_override_cfg {
+ uint32_t offset;
+ uint8_t stream_id;
+} mc_streamid_override_cfg_t;
+
+/*******************************************************************************
+ * Structure to hold the Stream ID Security Configuration settings
+ ******************************************************************************/
+typedef struct mc_streamid_security_cfg {
+ char *name;
+ uint32_t offset;
+ int override_enable;
+ int override_client_inputs;
+ int override_client_ns_flag;
+} mc_streamid_security_cfg_t;
+
+#define OVERRIDE_DISABLE 1
+#define OVERRIDE_ENABLE 0
+#define CLIENT_FLAG_SECURE 0
+#define CLIENT_FLAG_NON_SECURE 1
+#define CLIENT_INPUTS_OVERRIDE 1
+#define CLIENT_INPUTS_NO_OVERRIDE 0
+
+#define mc_make_sec_cfg(off, ns, ovrrd, access) \
+ { \
+ .name = # off, \
+ .offset = MC_STREAMID_OVERRIDE_TO_SECURITY_CFG( \
+ MC_STREAMID_OVERRIDE_CFG_ ## off), \
+ .override_client_ns_flag = CLIENT_FLAG_ ## ns, \
+ .override_client_inputs = CLIENT_INPUTS_ ## ovrrd, \
+ .override_enable = OVERRIDE_ ## access \
+ }
+
+/*******************************************************************************
+ * Structure to hold Memory Controller's Configuration settings
+ ******************************************************************************/
+typedef struct tegra_mc_settings {
+ const uint32_t *streamid_override_cfg;
+ uint32_t num_streamid_override_cfgs;
+ const mc_streamid_security_cfg_t *streamid_security_cfg;
+ uint32_t num_streamid_security_cfgs;
+ const mc_txn_override_cfg_t *txn_override_cfg;
+ uint32_t num_txn_override_cfgs;
+} tegra_mc_settings_t;
+
+#endif /* __ASSEMBLY__ */
+
+/*******************************************************************************
+ * Memory Controller SMMU Bypass config register
+ ******************************************************************************/
+#define MC_SMMU_BYPASS_CONFIG 0x1820
+#define MC_SMMU_BYPASS_CTRL_MASK 0x3
+#define MC_SMMU_BYPASS_CTRL_SHIFT 0
+#define MC_SMMU_CTRL_TBU_BYPASS_ALL (0 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_RSVD (1 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID (2 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_CTRL_TBU_BYPASS_NONE (3 << MC_SMMU_BYPASS_CTRL_SHIFT)
+#define MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT (1 << 31)
+#define MC_SMMU_BYPASS_CONFIG_SETTINGS (MC_SMMU_BYPASS_CONFIG_WRITE_ACCESS_BIT | \
+ MC_SMMU_CTRL_TBU_BYPASS_SPL_STREAMID)
+
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1 << 0)
+#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2 << 4)
+#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1 << 12)
+
+/*******************************************************************************
+ * Non-SO_DEV transactions override values for CGID_TAG bitfield for the
+ * MC_TXN_OVERRIDE_CONFIG_{module} registers
+ ******************************************************************************/
+#define MC_TXN_OVERRIDE_CGID_TAG_DEFAULT 0
+#define MC_TXN_OVERRIDE_CGID_TAG_CLIENT_AXI_ID 1
+#define MC_TXN_OVERRIDE_CGID_TAG_ZERO 2
+#define MC_TXN_OVERRIDE_CGID_TAG_ADR 3
+#define MC_TXN_OVERRIDE_CGID_TAG_MASK 3
+
+/*******************************************************************************
+ * Memory Controller Reset Control registers
+ ******************************************************************************/
+#define MC_CLIENT_HOTRESET_CTRL0 0x200
+#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0
+#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1 << 0)
+#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1 << 6)
+#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1 << 7)
+#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1 << 8)
+#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1 << 9)
+#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1 << 11)
+#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1 << 15)
+#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1 << 17)
+#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1 << 18)
+#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1 << 19)
+#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1 << 20)
+#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1 << 22)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1 << 29)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1 << 30)
+#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1 << 31)
+#define MC_CLIENT_HOTRESET_STATUS0 0x204
+#define MC_CLIENT_HOTRESET_CTRL1 0x970
+#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0
+#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1 << 0)
+#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1 << 2)
+#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1 << 5)
+#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1 << 6)
+#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1 << 7)
+#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1 << 8)
+#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1 << 12)
+#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1 << 13)
+#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1 << 18)
+#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1 << 19)
+#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1 << 20)
+#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1 << 21)
+#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1 << 22)
+#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1 << 23)
+#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1 << 24)
+#define MC_CLIENT_HOTRESET_STATUS1 0x974
+
+/*******************************************************************************
+ * Memory Controller's PCFIFO client configuration registers
+ ******************************************************************************/
+#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4
+#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0 << 17)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1 << 17)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0 << 21)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1 << 21)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0 << 29)
+#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1 << 29)
+
+#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8
+#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0 << 11)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1 << 11)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0 << 13)
+#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1 << 13)
+
+#define MC_PCFIFO_CLIENT_CONFIG3 0xddc
+#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0
+#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0 << 7)
+#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1 << 7)
+
+#define MC_PCFIFO_CLIENT_CONFIG4 0xde0
+#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0 << 1)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1 << 1)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0 << 5)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1 << 5)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0 << 13)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1 << 13)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0 << 15)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1 << 15)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0 << 17)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1 << 17)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0 << 22)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1 << 22)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0 << 26)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1 << 26)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0 << 30)
+#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1 << 30)
+
+#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4
+#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0
+#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0 << 0)
+#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1 << 0)
+
+/*******************************************************************************
+ * Memory Controller's SMMU client configuration registers
+ ******************************************************************************/
+#define MC_SMMU_CLIENT_CONFIG1 0x44
+#define MC_SMMU_CLIENT_CONFIG1_RESET_VAL 0x20000
+#define MC_SMMU_CLIENT_CONFIG1_AFIW_UNORDERED (0 << 17)
+#define MC_SMMU_CLIENT_CONFIG1_AFIW_MASK (1 << 17)
+#define MC_SMMU_CLIENT_CONFIG1_HDAW_UNORDERED (0 << 21)
+#define MC_SMMU_CLIENT_CONFIG1_HDAW_MASK (1 << 21)
+#define MC_SMMU_CLIENT_CONFIG1_SATAW_UNORDERED (0 << 29)
+#define MC_SMMU_CLIENT_CONFIG1_SATAW_MASK (1 << 29)
+
+#define MC_SMMU_CLIENT_CONFIG2 0x48
+#define MC_SMMU_CLIENT_CONFIG2_RESET_VAL 0x20000
+#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_UNORDERED (0 << 11)
+#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_MASK (1 << 11)
+#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_UNORDERED (0 << 13)
+#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_MASK (1 << 13)
+
+#define MC_SMMU_CLIENT_CONFIG3 0x4c
+#define MC_SMMU_CLIENT_CONFIG3_RESET_VAL 0
+#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_UNORDERED (0 << 7)
+#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_MASK (1 << 7)
+
+#define MC_SMMU_CLIENT_CONFIG4 0xb9c
+#define MC_SMMU_CLIENT_CONFIG4_RESET_VAL 0
+#define MC_SMMU_CLIENT_CONFIG4_SESWR_UNORDERED (0 << 1)
+#define MC_SMMU_CLIENT_CONFIG4_SESWR_MASK (1 << 1)
+#define MC_SMMU_CLIENT_CONFIG4_ETRW_UNORDERED (0 << 5)
+#define MC_SMMU_CLIENT_CONFIG4_ETRW_MASK (1 << 5)
+#define MC_SMMU_CLIENT_CONFIG4_AXISW_UNORDERED (0 << 13)
+#define MC_SMMU_CLIENT_CONFIG4_AXISW_MASK (1 << 13)
+#define MC_SMMU_CLIENT_CONFIG4_EQOSW_UNORDERED (0 << 15)
+#define MC_SMMU_CLIENT_CONFIG4_EQOSW_MASK (1 << 15)
+#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_UNORDERED (0 << 17)
+#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_MASK (1 << 17)
+#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_UNORDERED (0 << 22)
+#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_MASK (1 << 22)
+#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_UNORDERED (0 << 26)
+#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_MASK (1 << 26)
+#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_UNORDERED (0 << 30)
+#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_MASK (1 << 30)
+
+#define MC_SMMU_CLIENT_CONFIG5 0xbac
+#define MC_SMMU_CLIENT_CONFIG5_RESET_VAL 0
+#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_UNORDERED (0 << 0)
+#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_MASK (1 << 0)
+
+#ifndef __ASSEMBLY__
+
+#include <mmio.h>
+
+static inline uint32_t tegra_mc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_BASE + off);
+}
+
+static inline void tegra_mc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_BASE + off, val);
+}
+
+static inline uint32_t tegra_mc_streamid_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_MC_STREAMID_BASE + off);
+}
+
+static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_MC_STREAMID_BASE + off, val);
+}
+
+#define mc_set_pcfifo_unordered_boot_so_mss(id, client) \
+ (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
+ MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+
+#define mc_set_smmu_unordered_boot_so_mss(id, client) \
+ (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \
+ MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED)
+
+#define mc_set_tsa_passthrough(client) \
+ { \
+ mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \
+ (TSA_CONFIG_STATIC0_CSW_##client##_RESET & \
+ ~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \
+ TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \
+ }
+
+#define mc_set_forced_coherent_cfg(client) \
+ { \
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+ MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV); \
+ }
+
+#define mc_set_forced_coherent_so_dev_cfg(client) \
+ { \
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+ MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
+ MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
+ }
+
+#define mc_set_forced_coherent_axid_so_dev_cfg(client) \
+ { \
+ tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \
+ MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \
+ MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \
+ MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \
+ }
+
+/*******************************************************************************
+ * Handler to read memory configuration settings
+ *
+ * Implemented by SoCs under tegra/soc/txxx
+ ******************************************************************************/
+tegra_mc_settings_t *tegra_get_mc_settings(void);
+
+#endif /* __ASSMEBLY__ */
+
+#endif /* __MEMCTRLV2_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/pmc.h b/plat/nvidia/tegra/include/drivers/pmc.h
new file mode 100644
index 00000000..ea9392b6
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/pmc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PMC_H__
+#define __PMC_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+#include <utils_def.h>
+
+#define PMC_CONFIG U(0x0)
+#define PMC_PWRGATE_STATUS U(0x38)
+#define PMC_PWRGATE_TOGGLE U(0x30)
+#define PMC_TOGGLE_START U(0x100)
+#define PMC_SCRATCH39 U(0x138)
+#define PMC_SECURE_DISABLE2 U(0x2c4)
+#define PMC_SECURE_DISABLE2_WRITE22_ON (U(1) << 28)
+#define PMC_SECURE_SCRATCH22 U(0x338)
+#define PMC_SECURE_DISABLE3 U(0x2d8)
+#define PMC_SECURE_DISABLE3_WRITE34_ON (U(1) << 20)
+#define PMC_SECURE_DISABLE3_WRITE35_ON (U(1) << 22)
+#define PMC_SECURE_SCRATCH34 U(0x368)
+#define PMC_SECURE_SCRATCH35 U(0x36c)
+
+static inline uint32_t tegra_pmc_read_32(uint32_t off)
+{
+ return mmio_read_32(TEGRA_PMC_BASE + off);
+}
+
+static inline void tegra_pmc_write_32(uint32_t off, uint32_t val)
+{
+ mmio_write_32(TEGRA_PMC_BASE + off, val);
+}
+
+void tegra_pmc_cpu_setup(uint64_t reset_addr);
+void tegra_pmc_lock_cpu_vectors(void);
+void tegra_pmc_cpu_on(int32_t cpu);
+__dead2 void tegra_pmc_system_reset(void);
+
+#endif /* __PMC_H__ */
diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h
new file mode 100644
index 00000000..86e911a2
--- /dev/null
+++ b/plat/nvidia/tegra/include/drivers/smmu.h
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SMMU_H
+#define __SMMU_H
+
+#include <memctrl_v2.h>
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * SMMU Register constants
+ ******************************************************************************/
+#define SMMU_CBn_SCTLR (0x0U)
+#define SMMU_CBn_SCTLR_STAGE2 (0x0U)
+#define SMMU_CBn_ACTLR (0x4U)
+#define SMMU_CBn_RESUME (0x8U)
+#define SMMU_CBn_TCR2 (0x10U)
+#define SMMU_CBn_TTBR0_LO (0x20U)
+#define SMMU_CBn_TTBR0_HI (0x24U)
+#define SMMU_CBn_TTBR1_LO (0x28U)
+#define SMMU_CBn_TTBR1_HI (0x2cU)
+#define SMMU_CBn_TCR_LPAE (0x30U)
+#define SMMU_CBn_TCR (0x30U)
+#define SMMU_CBn_TCR_EAE_1 (0x30U)
+#define SMMU_CBn_TCR (0x30U)
+#define SMMU_CBn_CONTEXTIDR (0x34U)
+#define SMMU_CBn_CONTEXTIDR_EAE_1 (0x34U)
+#define SMMU_CBn_PRRR_MAIR0 (0x38U)
+#define SMMU_CBn_NMRR_MAIR1 (0x3cU)
+#define SMMU_CBn_SMMU_CBn_PAR (0x50U)
+#define SMMU_CBn_SMMU_CBn_PAR0 (0x50U)
+#define SMMU_CBn_SMMU_CBn_PAR1 (0x54U)
+/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x50U) */
+/* SMMU_CBn_SMMU_CBn_PAR0_Fault (0x54U) */
+#define SMMU_CBn_FSR (0x58U)
+#define SMMU_CBn_FSRRESTORE (0x5cU)
+#define SMMU_CBn_FAR_LO (0x60U)
+#define SMMU_CBn_FAR_HI (0x64U)
+#define SMMU_CBn_FSYNR0 (0x68U)
+#define SMMU_CBn_IPAFAR_LO (0x70U)
+#define SMMU_CBn_IPAFAR_HI (0x74U)
+#define SMMU_CBn_TLBIVA_LO (0x600U)
+#define SMMU_CBn_TLBIVA_HI (0x604U)
+#define SMMU_CBn_TLBIVA_AARCH_32 (0x600U)
+#define SMMU_CBn_TLBIVAA_LO (0x608U)
+#define SMMU_CBn_TLBIVAA_HI (0x60cU)
+#define SMMU_CBn_TLBIVAA_AARCH_32 (0x608U)
+#define SMMU_CBn_TLBIASID (0x610U)
+#define SMMU_CBn_TLBIALL (0x618U)
+#define SMMU_CBn_TLBIVAL_LO (0x620U)
+#define SMMU_CBn_TLBIVAL_HI (0x624U)
+#define SMMU_CBn_TLBIVAL_AARCH_32 (0x618U)
+#define SMMU_CBn_TLBIVAAL_LO (0x628U)
+#define SMMU_CBn_TLBIVAAL_HI (0x62cU)
+#define SMMU_CBn_TLBIVAAL_AARCH_32 (0x628U)
+#define SMMU_CBn_TLBIIPAS2_LO (0x630U)
+#define SMMU_CBn_TLBIIPAS2_HI (0x634U)
+#define SMMU_CBn_TLBIIPAS2L_LO (0x638U)
+#define SMMU_CBn_TLBIIPAS2L_HI (0x63cU)
+#define SMMU_CBn_TLBSYNC (0x7f0U)
+#define SMMU_CBn_TLBSTATUS (0x7f4U)
+#define SMMU_CBn_ATSR (0x800U)
+#define SMMU_CBn_PMEVCNTR0 (0xe00U)
+#define SMMU_CBn_PMEVCNTR1 (0xe04U)
+#define SMMU_CBn_PMEVCNTR2 (0xe08U)
+#define SMMU_CBn_PMEVCNTR3 (0xe0cU)
+#define SMMU_CBn_PMEVTYPER0 (0xe80U)
+#define SMMU_CBn_PMEVTYPER1 (0xe84U)
+#define SMMU_CBn_PMEVTYPER2 (0xe88U)
+#define SMMU_CBn_PMEVTYPER3 (0xe8cU)
+#define SMMU_CBn_PMCFGR (0xf00U)
+#define SMMU_CBn_PMCR (0xf04U)
+#define SMMU_CBn_PMCEID (0xf20U)
+#define SMMU_CBn_PMCNTENSE (0xf40U)
+#define SMMU_CBn_PMCNTENCLR (0xf44U)
+#define SMMU_CBn_PMCNTENSET (0xf48U)
+#define SMMU_CBn_PMINTENCLR (0xf4cU)
+#define SMMU_CBn_PMOVSCLR (0xf50U)
+#define SMMU_CBn_PMOVSSET (0xf58U)
+#define SMMU_CBn_PMAUTHSTATUS (0xfb8U)
+#define SMMU_GNSR0_CR0 (0x0U)
+#define SMMU_GNSR0_CR2 (0x8U)
+#define SMMU_GNSR0_ACR (0x10U)
+#define SMMU_GNSR0_IDR0 (0x20U)
+#define SMMU_GNSR0_IDR1 (0x24U)
+#define SMMU_GNSR0_IDR2 (0x28U)
+#define SMMU_GNSR0_IDR7 (0x3cU)
+#define SMMU_GNSR0_GFAR_LO (0x40U)
+#define SMMU_GNSR0_GFAR_HI (0x44U)
+#define SMMU_GNSR0_GFSR (0x48U)
+#define SMMU_GNSR0_GFSRRESTORE (0x4cU)
+#define SMMU_GNSR0_GFSYNR0 (0x50U)
+#define SMMU_GNSR0_GFSYNR1 (0x54U)
+#define SMMU_GNSR0_GFSYNR1_v2 (0x54U)
+#define SMMU_GNSR0_TLBIVMID (0x64U)
+#define SMMU_GNSR0_TLBIALLNSNH (0x68U)
+#define SMMU_GNSR0_TLBIALLH (0x6cU)
+#define SMMU_GNSR0_TLBGSYNC (0x70U)
+#define SMMU_GNSR0_TLBGSTATUS (0x74U)
+#define SMMU_GNSR0_TLBIVAH_LO (0x78U)
+#define SMMU_GNSR0_TLBIVALH64_LO (0xb0U)
+#define SMMU_GNSR0_TLBIVALH64_HI (0xb4U)
+#define SMMU_GNSR0_TLBIVMIDS1 (0xb8U)
+#define SMMU_GNSR0_TLBIVAH64_LO (0xc0U)
+#define SMMU_GNSR0_TLBIVAH64_HI (0xc4U)
+#define SMMU_GNSR0_SMR0 (0x800U)
+#define SMMU_GNSR0_SMRn (0x800U)
+#define SMMU_GNSR0_SMR1 (0x804U)
+#define SMMU_GNSR0_SMR2 (0x808U)
+#define SMMU_GNSR0_SMR3 (0x80cU)
+#define SMMU_GNSR0_SMR4 (0x810U)
+#define SMMU_GNSR0_SMR5 (0x814U)
+#define SMMU_GNSR0_SMR6 (0x818U)
+#define SMMU_GNSR0_SMR7 (0x81cU)
+#define SMMU_GNSR0_SMR8 (0x820U)
+#define SMMU_GNSR0_SMR9 (0x824U)
+#define SMMU_GNSR0_SMR10 (0x828U)
+#define SMMU_GNSR0_SMR11 (0x82cU)
+#define SMMU_GNSR0_SMR12 (0x830U)
+#define SMMU_GNSR0_SMR13 (0x834U)
+#define SMMU_GNSR0_SMR14 (0x838U)
+#define SMMU_GNSR0_SMR15 (0x83cU)
+#define SMMU_GNSR0_SMR16 (0x840U)
+#define SMMU_GNSR0_SMR17 (0x844U)
+#define SMMU_GNSR0_SMR18 (0x848U)
+#define SMMU_GNSR0_SMR19 (0x84cU)
+#define SMMU_GNSR0_SMR20 (0x850U)
+#define SMMU_GNSR0_SMR21 (0x854U)
+#define SMMU_GNSR0_SMR22 (0x858U)
+#define SMMU_GNSR0_SMR23 (0x85cU)
+#define SMMU_GNSR0_SMR24 (0x860U)
+#define SMMU_GNSR0_SMR25 (0x864U)
+#define SMMU_GNSR0_SMR26 (0x868U)
+#define SMMU_GNSR0_SMR27 (0x86cU)
+#define SMMU_GNSR0_SMR28 (0x870U)
+#define SMMU_GNSR0_SMR29 (0x874U)
+#define SMMU_GNSR0_SMR30 (0x878U)
+#define SMMU_GNSR0_SMR31 (0x87cU)
+#define SMMU_GNSR0_SMR32 (0x880U)
+#define SMMU_GNSR0_SMR33 (0x884U)
+#define SMMU_GNSR0_SMR34 (0x888U)
+#define SMMU_GNSR0_SMR35 (0x88cU)
+#define SMMU_GNSR0_SMR36 (0x890U)
+#define SMMU_GNSR0_SMR37 (0x894U)
+#define SMMU_GNSR0_SMR38 (0x898U)
+#define SMMU_GNSR0_SMR39 (0x89cU)
+#define SMMU_GNSR0_SMR40 (0x8a0U)
+#define SMMU_GNSR0_SMR41 (0x8a4U)
+#define SMMU_GNSR0_SMR42 (0x8a8U)
+#define SMMU_GNSR0_SMR43 (0x8acU)
+#define SMMU_GNSR0_SMR44 (0x8b0U)
+#define SMMU_GNSR0_SMR45 (0x8b4U)
+#define SMMU_GNSR0_SMR46 (0x8b8U)
+#define SMMU_GNSR0_SMR47 (0x8bcU)
+#define SMMU_GNSR0_SMR48 (0x8c0U)
+#define SMMU_GNSR0_SMR49 (0x8c4U)
+#define SMMU_GNSR0_SMR50 (0x8c8U)
+#define SMMU_GNSR0_SMR51 (0x8ccU)
+#define SMMU_GNSR0_SMR52 (0x8d0U)
+#define SMMU_GNSR0_SMR53 (0x8d4U)
+#define SMMU_GNSR0_SMR54 (0x8d8U)
+#define SMMU_GNSR0_SMR55 (0x8dcU)
+#define SMMU_GNSR0_SMR56 (0x8e0U)
+#define SMMU_GNSR0_SMR57 (0x8e4U)
+#define SMMU_GNSR0_SMR58 (0x8e8U)
+#define SMMU_GNSR0_SMR59 (0x8ecU)
+#define SMMU_GNSR0_SMR60 (0x8f0U)
+#define SMMU_GNSR0_SMR61 (0x8f4U)
+#define SMMU_GNSR0_SMR62 (0x8f8U)
+#define SMMU_GNSR0_SMR63 (0x8fcU)
+#define SMMU_GNSR0_SMR64 (0x900U)
+#define SMMU_GNSR0_SMR65 (0x904U)
+#define SMMU_GNSR0_SMR66 (0x908U)
+#define SMMU_GNSR0_SMR67 (0x90cU)
+#define SMMU_GNSR0_SMR68 (0x910U)
+#define SMMU_GNSR0_SMR69 (0x914U)
+#define SMMU_GNSR0_SMR70 (0x918U)
+#define SMMU_GNSR0_SMR71 (0x91cU)
+#define SMMU_GNSR0_SMR72 (0x920U)
+#define SMMU_GNSR0_SMR73 (0x924U)
+#define SMMU_GNSR0_SMR74 (0x928U)
+#define SMMU_GNSR0_SMR75 (0x92cU)
+#define SMMU_GNSR0_SMR76 (0x930U)
+#define SMMU_GNSR0_SMR77 (0x934U)
+#define SMMU_GNSR0_SMR78 (0x938U)
+#define SMMU_GNSR0_SMR79 (0x93cU)
+#define SMMU_GNSR0_SMR80 (0x940U)
+#define SMMU_GNSR0_SMR81 (0x944U)
+#define SMMU_GNSR0_SMR82 (0x948U)
+#define SMMU_GNSR0_SMR83 (0x94cU)
+#define SMMU_GNSR0_SMR84 (0x950U)
+#define SMMU_GNSR0_SMR85 (0x954U)
+#define SMMU_GNSR0_SMR86 (0x958U)
+#define SMMU_GNSR0_SMR87 (0x95cU)
+#define SMMU_GNSR0_SMR88 (0x960U)
+#define SMMU_GNSR0_SMR89 (0x964U)
+#define SMMU_GNSR0_SMR90 (0x968U)
+#define SMMU_GNSR0_SMR91 (0x96cU)
+#define SMMU_GNSR0_SMR92 (0x970U)
+#define SMMU_GNSR0_SMR93 (0x974U)
+#define SMMU_GNSR0_SMR94 (0x978U)
+#define SMMU_GNSR0_SMR95 (0x97cU)
+#define SMMU_GNSR0_SMR96 (0x980U)
+#define SMMU_GNSR0_SMR97 (0x984U)
+#define SMMU_GNSR0_SMR98 (0x988U)
+#define SMMU_GNSR0_SMR99 (0x98cU)
+#define SMMU_GNSR0_SMR100 (0x990U)
+#define SMMU_GNSR0_SMR101 (0x994U)
+#define SMMU_GNSR0_SMR102 (0x998U)
+#define SMMU_GNSR0_SMR103 (0x99cU)
+#define SMMU_GNSR0_SMR104 (0x9a0U)
+#define SMMU_GNSR0_SMR105 (0x9a4U)
+#define SMMU_GNSR0_SMR106 (0x9a8U)
+#define SMMU_GNSR0_SMR107 (0x9acU)
+#define SMMU_GNSR0_SMR108 (0x9b0U)
+#define SMMU_GNSR0_SMR109 (0x9b4U)
+#define SMMU_GNSR0_SMR110 (0x9b8U)
+#define SMMU_GNSR0_SMR111 (0x9bcU)
+#define SMMU_GNSR0_SMR112 (0x9c0U)
+#define SMMU_GNSR0_SMR113 (0x9c4U)
+#define SMMU_GNSR0_SMR114 (0x9c8U)
+#define SMMU_GNSR0_SMR115 (0x9ccU)
+#define SMMU_GNSR0_SMR116 (0x9d0U)
+#define SMMU_GNSR0_SMR117 (0x9d4U)
+#define SMMU_GNSR0_SMR118 (0x9d8U)
+#define SMMU_GNSR0_SMR119 (0x9dcU)
+#define SMMU_GNSR0_SMR120 (0x9e0U)
+#define SMMU_GNSR0_SMR121 (0x9e4U)
+#define SMMU_GNSR0_SMR122 (0x9e8U)
+#define SMMU_GNSR0_SMR123 (0x9ecU)
+#define SMMU_GNSR0_SMR124 (0x9f0U)
+#define SMMU_GNSR0_SMR125 (0x9f4U)
+#define SMMU_GNSR0_SMR126 (0x9f8U)
+#define SMMU_GNSR0_SMR127 (0x9fcU)
+#define SMMU_GNSR0_S2CR0 (0xc00U)
+#define SMMU_GNSR0_S2CRn (0xc00U)
+#define SMMU_GNSR0_S2CRn (0xc00U)
+#define SMMU_GNSR0_S2CR1 (0xc04U)
+#define SMMU_GNSR0_S2CR2 (0xc08U)
+#define SMMU_GNSR0_S2CR3 (0xc0cU)
+#define SMMU_GNSR0_S2CR4 (0xc10U)
+#define SMMU_GNSR0_S2CR5 (0xc14U)
+#define SMMU_GNSR0_S2CR6 (0xc18U)
+#define SMMU_GNSR0_S2CR7 (0xc1cU)
+#define SMMU_GNSR0_S2CR8 (0xc20U)
+#define SMMU_GNSR0_S2CR9 (0xc24U)
+#define SMMU_GNSR0_S2CR10 (0xc28U)
+#define SMMU_GNSR0_S2CR11 (0xc2cU)
+#define SMMU_GNSR0_S2CR12 (0xc30U)
+#define SMMU_GNSR0_S2CR13 (0xc34U)
+#define SMMU_GNSR0_S2CR14 (0xc38U)
+#define SMMU_GNSR0_S2CR15 (0xc3cU)
+#define SMMU_GNSR0_S2CR16 (0xc40U)
+#define SMMU_GNSR0_S2CR17 (0xc44U)
+#define SMMU_GNSR0_S2CR18 (0xc48U)
+#define SMMU_GNSR0_S2CR19 (0xc4cU)
+#define SMMU_GNSR0_S2CR20 (0xc50U)
+#define SMMU_GNSR0_S2CR21 (0xc54U)
+#define SMMU_GNSR0_S2CR22 (0xc58U)
+#define SMMU_GNSR0_S2CR23 (0xc5cU)
+#define SMMU_GNSR0_S2CR24 (0xc60U)
+#define SMMU_GNSR0_S2CR25 (0xc64U)
+#define SMMU_GNSR0_S2CR26 (0xc68U)
+#define SMMU_GNSR0_S2CR27 (0xc6cU)
+#define SMMU_GNSR0_S2CR28 (0xc70U)
+#define SMMU_GNSR0_S2CR29 (0xc74U)
+#define SMMU_GNSR0_S2CR30 (0xc78U)
+#define SMMU_GNSR0_S2CR31 (0xc7cU)
+#define SMMU_GNSR0_S2CR32 (0xc80U)
+#define SMMU_GNSR0_S2CR33 (0xc84U)
+#define SMMU_GNSR0_S2CR34 (0xc88U)
+#define SMMU_GNSR0_S2CR35 (0xc8cU)
+#define SMMU_GNSR0_S2CR36 (0xc90U)
+#define SMMU_GNSR0_S2CR37 (0xc94U)
+#define SMMU_GNSR0_S2CR38 (0xc98U)
+#define SMMU_GNSR0_S2CR39 (0xc9cU)
+#define SMMU_GNSR0_S2CR40 (0xca0U)
+#define SMMU_GNSR0_S2CR41 (0xca4U)
+#define SMMU_GNSR0_S2CR42 (0xca8U)
+#define SMMU_GNSR0_S2CR43 (0xcacU)
+#define SMMU_GNSR0_S2CR44 (0xcb0U)
+#define SMMU_GNSR0_S2CR45 (0xcb4U)
+#define SMMU_GNSR0_S2CR46 (0xcb8U)
+#define SMMU_GNSR0_S2CR47 (0xcbcU)
+#define SMMU_GNSR0_S2CR48 (0xcc0U)
+#define SMMU_GNSR0_S2CR49 (0xcc4U)
+#define SMMU_GNSR0_S2CR50 (0xcc8U)
+#define SMMU_GNSR0_S2CR51 (0xcccU)
+#define SMMU_GNSR0_S2CR52 (0xcd0U)
+#define SMMU_GNSR0_S2CR53 (0xcd4U)
+#define SMMU_GNSR0_S2CR54 (0xcd8U)
+#define SMMU_GNSR0_S2CR55 (0xcdcU)
+#define SMMU_GNSR0_S2CR56 (0xce0U)
+#define SMMU_GNSR0_S2CR57 (0xce4U)
+#define SMMU_GNSR0_S2CR58 (0xce8U)
+#define SMMU_GNSR0_S2CR59 (0xcecU)
+#define SMMU_GNSR0_S2CR60 (0xcf0U)
+#define SMMU_GNSR0_S2CR61 (0xcf4U)
+#define SMMU_GNSR0_S2CR62 (0xcf8U)
+#define SMMU_GNSR0_S2CR63 (0xcfcU)
+#define SMMU_GNSR0_S2CR64 (0xd00U)
+#define SMMU_GNSR0_S2CR65 (0xd04U)
+#define SMMU_GNSR0_S2CR66 (0xd08U)
+#define SMMU_GNSR0_S2CR67 (0xd0cU)
+#define SMMU_GNSR0_S2CR68 (0xd10U)
+#define SMMU_GNSR0_S2CR69 (0xd14U)
+#define SMMU_GNSR0_S2CR70 (0xd18U)
+#define SMMU_GNSR0_S2CR71 (0xd1cU)
+#define SMMU_GNSR0_S2CR72 (0xd20U)
+#define SMMU_GNSR0_S2CR73 (0xd24U)
+#define SMMU_GNSR0_S2CR74 (0xd28U)
+#define SMMU_GNSR0_S2CR75 (0xd2cU)
+#define SMMU_GNSR0_S2CR76 (0xd30U)
+#define SMMU_GNSR0_S2CR77 (0xd34U)
+#define SMMU_GNSR0_S2CR78 (0xd38U)
+#define SMMU_GNSR0_S2CR79 (0xd3cU)
+#define SMMU_GNSR0_S2CR80 (0xd40U)
+#define SMMU_GNSR0_S2CR81 (0xd44U)
+#define SMMU_GNSR0_S2CR82 (0xd48U)
+#define SMMU_GNSR0_S2CR83 (0xd4cU)
+#define SMMU_GNSR0_S2CR84 (0xd50U)
+#define SMMU_GNSR0_S2CR85 (0xd54U)
+#define SMMU_GNSR0_S2CR86 (0xd58U)
+#define SMMU_GNSR0_S2CR87 (0xd5cU)
+#define SMMU_GNSR0_S2CR88 (0xd60U)
+#define SMMU_GNSR0_S2CR89 (0xd64U)
+#define SMMU_GNSR0_S2CR90 (0xd68U)
+#define SMMU_GNSR0_S2CR91 (0xd6cU)
+#define SMMU_GNSR0_S2CR92 (0xd70U)
+#define SMMU_GNSR0_S2CR93 (0xd74U)
+#define SMMU_GNSR0_S2CR94 (0xd78U)
+#define SMMU_GNSR0_S2CR95 (0xd7cU)
+#define SMMU_GNSR0_S2CR96 (0xd80U)
+#define SMMU_GNSR0_S2CR97 (0xd84U)
+#define SMMU_GNSR0_S2CR98 (0xd88U)
+#define SMMU_GNSR0_S2CR99 (0xd8cU)
+#define SMMU_GNSR0_S2CR100 (0xd90U)
+#define SMMU_GNSR0_S2CR101 (0xd94U)
+#define SMMU_GNSR0_S2CR102 (0xd98U)
+#define SMMU_GNSR0_S2CR103 (0xd9cU)
+#define SMMU_GNSR0_S2CR104 (0xda0U)
+#define SMMU_GNSR0_S2CR105 (0xda4U)
+#define SMMU_GNSR0_S2CR106 (0xda8U)
+#define SMMU_GNSR0_S2CR107 (0xdacU)
+#define SMMU_GNSR0_S2CR108 (0xdb0U)
+#define SMMU_GNSR0_S2CR109 (0xdb4U)
+#define SMMU_GNSR0_S2CR110 (0xdb8U)
+#define SMMU_GNSR0_S2CR111 (0xdbcU)
+#define SMMU_GNSR0_S2CR112 (0xdc0U)
+#define SMMU_GNSR0_S2CR113 (0xdc4U)
+#define SMMU_GNSR0_S2CR114 (0xdc8U)
+#define SMMU_GNSR0_S2CR115 (0xdccU)
+#define SMMU_GNSR0_S2CR116 (0xdd0U)
+#define SMMU_GNSR0_S2CR117 (0xdd4U)
+#define SMMU_GNSR0_S2CR118 (0xdd8U)
+#define SMMU_GNSR0_S2CR119 (0xddcU)
+#define SMMU_GNSR0_S2CR120 (0xde0U)
+#define SMMU_GNSR0_S2CR121 (0xde4U)
+#define SMMU_GNSR0_S2CR122 (0xde8U)
+#define SMMU_GNSR0_S2CR123 (0xdecU)
+#define SMMU_GNSR0_S2CR124 (0xdf0U)
+#define SMMU_GNSR0_S2CR125 (0xdf4U)
+#define SMMU_GNSR0_S2CR126 (0xdf8U)
+#define SMMU_GNSR0_S2CR127 (0xdfcU)
+#define SMMU_GNSR0_PIDR0 (0xfe0U)
+#define SMMU_GNSR0_PIDR1 (0xfe4U)
+#define SMMU_GNSR0_PIDR2 (0xfe8U)
+#define SMMU_GNSR0_PIDR3 (0xfecU)
+#define SMMU_GNSR0_PIDR4 (0xfd0U)
+#define SMMU_GNSR0_PIDR5 (0xfd4U)
+#define SMMU_GNSR0_PIDR6 (0xfd8U)
+#define SMMU_GNSR0_PIDR7 (0xfdcU)
+#define SMMU_GNSR0_CIDR0 (0xff0U)
+#define SMMU_GNSR0_CIDR1 (0xff4U)
+#define SMMU_GNSR0_CIDR2 (0xff8U)
+#define SMMU_GNSR0_CIDR3 (0xffcU)
+#define SMMU_GNSR1_CBAR0 (0x0U)
+#define SMMU_GNSR1_CBARn (0x0U)
+#define SMMU_GNSR1_CBFRSYNRA0 (0x400U)
+#define SMMU_GNSR1_CBA2R0 (0x800U)
+#define SMMU_GNSR1_CBAR1 (0x4U)
+#define SMMU_GNSR1_CBFRSYNRA1 (0x404U)
+#define SMMU_GNSR1_CBA2R1 (0x804U)
+#define SMMU_GNSR1_CBAR2 (0x8U)
+#define SMMU_GNSR1_CBFRSYNRA2 (0x408U)
+#define SMMU_GNSR1_CBA2R2 (0x808U)
+#define SMMU_GNSR1_CBAR3 (0xcU)
+#define SMMU_GNSR1_CBFRSYNRA3 (0x40cU)
+#define SMMU_GNSR1_CBA2R3 (0x80cU)
+#define SMMU_GNSR1_CBAR4 (0x10U)
+#define SMMU_GNSR1_CBFRSYNRA4 (0x410U)
+#define SMMU_GNSR1_CBA2R4 (0x810U)
+#define SMMU_GNSR1_CBAR5 (0x14U)
+#define SMMU_GNSR1_CBFRSYNRA5 (0x414U)
+#define SMMU_GNSR1_CBA2R5 (0x814U)
+#define SMMU_GNSR1_CBAR6 (0x18U)
+#define SMMU_GNSR1_CBFRSYNRA6 (0x418U)
+#define SMMU_GNSR1_CBA2R6 (0x818U)
+#define SMMU_GNSR1_CBAR7 (0x1cU)
+#define SMMU_GNSR1_CBFRSYNRA7 (0x41cU)
+#define SMMU_GNSR1_CBA2R7 (0x81cU)
+#define SMMU_GNSR1_CBAR8 (0x20U)
+#define SMMU_GNSR1_CBFRSYNRA8 (0x420U)
+#define SMMU_GNSR1_CBA2R8 (0x820U)
+#define SMMU_GNSR1_CBAR9 (0x24U)
+#define SMMU_GNSR1_CBFRSYNRA9 (0x424U)
+#define SMMU_GNSR1_CBA2R9 (0x824U)
+#define SMMU_GNSR1_CBAR10 (0x28U)
+#define SMMU_GNSR1_CBFRSYNRA10 (0x428U)
+#define SMMU_GNSR1_CBA2R10 (0x828U)
+#define SMMU_GNSR1_CBAR11 (0x2cU)
+#define SMMU_GNSR1_CBFRSYNRA11 (0x42cU)
+#define SMMU_GNSR1_CBA2R11 (0x82cU)
+#define SMMU_GNSR1_CBAR12 (0x30U)
+#define SMMU_GNSR1_CBFRSYNRA12 (0x430U)
+#define SMMU_GNSR1_CBA2R12 (0x830U)
+#define SMMU_GNSR1_CBAR13 (0x34U)
+#define SMMU_GNSR1_CBFRSYNRA13 (0x434U)
+#define SMMU_GNSR1_CBA2R13 (0x834U)
+#define SMMU_GNSR1_CBAR14 (0x38U)
+#define SMMU_GNSR1_CBFRSYNRA14 (0x438U)
+#define SMMU_GNSR1_CBA2R14 (0x838U)
+#define SMMU_GNSR1_CBAR15 (0x3cU)
+#define SMMU_GNSR1_CBFRSYNRA15 (0x43cU)
+#define SMMU_GNSR1_CBA2R15 (0x83cU)
+#define SMMU_GNSR1_CBAR16 (0x40U)
+#define SMMU_GNSR1_CBFRSYNRA16 (0x440U)
+#define SMMU_GNSR1_CBA2R16 (0x840U)
+#define SMMU_GNSR1_CBAR17 (0x44U)
+#define SMMU_GNSR1_CBFRSYNRA17 (0x444U)
+#define SMMU_GNSR1_CBA2R17 (0x844U)
+#define SMMU_GNSR1_CBAR18 (0x48U)
+#define SMMU_GNSR1_CBFRSYNRA18 (0x448U)
+#define SMMU_GNSR1_CBA2R18 (0x848U)
+#define SMMU_GNSR1_CBAR19 (0x4cU)
+#define SMMU_GNSR1_CBFRSYNRA19 (0x44cU)
+#define SMMU_GNSR1_CBA2R19 (0x84cU)
+#define SMMU_GNSR1_CBAR20 (0x50U)
+#define SMMU_GNSR1_CBFRSYNRA20 (0x450U)
+#define SMMU_GNSR1_CBA2R20 (0x850U)
+#define SMMU_GNSR1_CBAR21 (0x54U)
+#define SMMU_GNSR1_CBFRSYNRA21 (0x454U)
+#define SMMU_GNSR1_CBA2R21 (0x854U)
+#define SMMU_GNSR1_CBAR22 (0x58U)
+#define SMMU_GNSR1_CBFRSYNRA22 (0x458U)
+#define SMMU_GNSR1_CBA2R22 (0x858U)
+#define SMMU_GNSR1_CBAR23 (0x5cU)
+#define SMMU_GNSR1_CBFRSYNRA23 (0x45cU)
+#define SMMU_GNSR1_CBA2R23 (0x85cU)
+#define SMMU_GNSR1_CBAR24 (0x60U)
+#define SMMU_GNSR1_CBFRSYNRA24 (0x460U)
+#define SMMU_GNSR1_CBA2R24 (0x860U)
+#define SMMU_GNSR1_CBAR25 (0x64U)
+#define SMMU_GNSR1_CBFRSYNRA25 (0x464U)
+#define SMMU_GNSR1_CBA2R25 (0x864U)
+#define SMMU_GNSR1_CBAR26 (0x68U)
+#define SMMU_GNSR1_CBFRSYNRA26 (0x468U)
+#define SMMU_GNSR1_CBA2R26 (0x868U)
+#define SMMU_GNSR1_CBAR27 (0x6cU)
+#define SMMU_GNSR1_CBFRSYNRA27 (0x46cU)
+#define SMMU_GNSR1_CBA2R27 (0x86cU)
+#define SMMU_GNSR1_CBAR28 (0x70U)
+#define SMMU_GNSR1_CBFRSYNRA28 (0x470U)
+#define SMMU_GNSR1_CBA2R28 (0x870U)
+#define SMMU_GNSR1_CBAR29 (0x74U)
+#define SMMU_GNSR1_CBFRSYNRA29 (0x474U)
+#define SMMU_GNSR1_CBA2R29 (0x874U)
+#define SMMU_GNSR1_CBAR30 (0x78U)
+#define SMMU_GNSR1_CBFRSYNRA30 (0x478U)
+#define SMMU_GNSR1_CBA2R30 (0x878U)
+#define SMMU_GNSR1_CBAR31 (0x7cU)
+#define SMMU_GNSR1_CBFRSYNRA31 (0x47cU)
+#define SMMU_GNSR1_CBA2R31 (0x87cU)
+#define SMMU_GNSR1_CBAR32 (0x80U)
+#define SMMU_GNSR1_CBFRSYNRA32 (0x480U)
+#define SMMU_GNSR1_CBA2R32 (0x880U)
+#define SMMU_GNSR1_CBAR33 (0x84U)
+#define SMMU_GNSR1_CBFRSYNRA33 (0x484U)
+#define SMMU_GNSR1_CBA2R33 (0x884U)
+#define SMMU_GNSR1_CBAR34 (0x88U)
+#define SMMU_GNSR1_CBFRSYNRA34 (0x488U)
+#define SMMU_GNSR1_CBA2R34 (0x888U)
+#define SMMU_GNSR1_CBAR35 (0x8cU)
+#define SMMU_GNSR1_CBFRSYNRA35 (0x48cU)
+#define SMMU_GNSR1_CBA2R35 (0x88cU)
+#define SMMU_GNSR1_CBAR36 (0x90U)
+#define SMMU_GNSR1_CBFRSYNRA36 (0x490U)
+#define SMMU_GNSR1_CBA2R36 (0x890U)
+#define SMMU_GNSR1_CBAR37 (0x94U)
+#define SMMU_GNSR1_CBFRSYNRA37 (0x494U)
+#define SMMU_GNSR1_CBA2R37 (0x894U)
+#define SMMU_GNSR1_CBAR38 (0x98U)
+#define SMMU_GNSR1_CBFRSYNRA38 (0x498U)
+#define SMMU_GNSR1_CBA2R38 (0x898U)
+#define SMMU_GNSR1_CBAR39 (0x9cU)
+#define SMMU_GNSR1_CBFRSYNRA39 (0x49cU)
+#define SMMU_GNSR1_CBA2R39 (0x89cU)
+#define SMMU_GNSR1_CBAR40 (0xa0U)
+#define SMMU_GNSR1_CBFRSYNRA40 (0x4a0U)
+#define SMMU_GNSR1_CBA2R40 (0x8a0U)
+#define SMMU_GNSR1_CBAR41 (0xa4U)
+#define SMMU_GNSR1_CBFRSYNRA41 (0x4a4U)
+#define SMMU_GNSR1_CBA2R41 (0x8a4U)
+#define SMMU_GNSR1_CBAR42 (0xa8U)
+#define SMMU_GNSR1_CBFRSYNRA42 (0x4a8U)
+#define SMMU_GNSR1_CBA2R42 (0x8a8U)
+#define SMMU_GNSR1_CBAR43 (0xacU)
+#define SMMU_GNSR1_CBFRSYNRA43 (0x4acU)
+#define SMMU_GNSR1_CBA2R43 (0x8acU)
+#define SMMU_GNSR1_CBAR44 (0xb0U)
+#define SMMU_GNSR1_CBFRSYNRA44 (0x4b0U)
+#define SMMU_GNSR1_CBA2R44 (0x8b0U)
+#define SMMU_GNSR1_CBAR45 (0xb4U)
+#define SMMU_GNSR1_CBFRSYNRA45 (0x4b4U)
+#define SMMU_GNSR1_CBA2R45 (0x8b4U)
+#define SMMU_GNSR1_CBAR46 (0xb8U)
+#define SMMU_GNSR1_CBFRSYNRA46 (0x4b8U)
+#define SMMU_GNSR1_CBA2R46 (0x8b8U)
+#define SMMU_GNSR1_CBAR47 (0xbcU)
+#define SMMU_GNSR1_CBFRSYNRA47 (0x4bcU)
+#define SMMU_GNSR1_CBA2R47 (0x8bcU)
+#define SMMU_GNSR1_CBAR48 (0xc0U)
+#define SMMU_GNSR1_CBFRSYNRA48 (0x4c0U)
+#define SMMU_GNSR1_CBA2R48 (0x8c0U)
+#define SMMU_GNSR1_CBAR49 (0xc4U)
+#define SMMU_GNSR1_CBFRSYNRA49 (0x4c4U)
+#define SMMU_GNSR1_CBA2R49 (0x8c4U)
+#define SMMU_GNSR1_CBAR50 (0xc8U)
+#define SMMU_GNSR1_CBFRSYNRA50 (0x4c8U)
+#define SMMU_GNSR1_CBA2R50 (0x8c8U)
+#define SMMU_GNSR1_CBAR51 (0xccU)
+#define SMMU_GNSR1_CBFRSYNRA51 (0x4ccU)
+#define SMMU_GNSR1_CBA2R51 (0x8ccU)
+#define SMMU_GNSR1_CBAR52 (0xd0U)
+#define SMMU_GNSR1_CBFRSYNRA52 (0x4d0U)
+#define SMMU_GNSR1_CBA2R52 (0x8d0U)
+#define SMMU_GNSR1_CBAR53 (0xd4U)
+#define SMMU_GNSR1_CBFRSYNRA53 (0x4d4U)
+#define SMMU_GNSR1_CBA2R53 (0x8d4U)
+#define SMMU_GNSR1_CBAR54 (0xd8U)
+#define SMMU_GNSR1_CBFRSYNRA54 (0x4d8U)
+#define SMMU_GNSR1_CBA2R54 (0x8d8U)
+#define SMMU_GNSR1_CBAR55 (0xdcU)
+#define SMMU_GNSR1_CBFRSYNRA55 (0x4dcU)
+#define SMMU_GNSR1_CBA2R55 (0x8dcU)
+#define SMMU_GNSR1_CBAR56 (0xe0U)
+#define SMMU_GNSR1_CBFRSYNRA56 (0x4e0U)
+#define SMMU_GNSR1_CBA2R56 (0x8e0U)
+#define SMMU_GNSR1_CBAR57 (0xe4U)
+#define SMMU_GNSR1_CBFRSYNRA57 (0x4e4U)
+#define SMMU_GNSR1_CBA2R57 (0x8e4U)
+#define SMMU_GNSR1_CBAR58 (0xe8U)
+#define SMMU_GNSR1_CBFRSYNRA58 (0x4e8U)
+#define SMMU_GNSR1_CBA2R58 (0x8e8U)
+#define SMMU_GNSR1_CBAR59 (0xecU)
+#define SMMU_GNSR1_CBFRSYNRA59 (0x4ecU)
+#define SMMU_GNSR1_CBA2R59 (0x8ecU)
+#define SMMU_GNSR1_CBAR60 (0xf0U)
+#define SMMU_GNSR1_CBFRSYNRA60 (0x4f0U)
+#define SMMU_GNSR1_CBA2R60 (0x8f0U)
+#define SMMU_GNSR1_CBAR61 (0xf4U)
+#define SMMU_GNSR1_CBFRSYNRA61 (0x4f4U)
+#define SMMU_GNSR1_CBA2R61 (0x8f4U)
+#define SMMU_GNSR1_CBAR62 (0xf8U)
+#define SMMU_GNSR1_CBFRSYNRA62 (0x4f8U)
+#define SMMU_GNSR1_CBA2R62 (0x8f8U)
+#define SMMU_GNSR1_CBAR63 (0xfcU)
+#define SMMU_GNSR1_CBFRSYNRA63 (0x4fcU)
+#define SMMU_GNSR1_CBA2R63 (0x8fcU)
+
+/*******************************************************************************
+ * SMMU Global Secure Aux. Configuration Register
+ ******************************************************************************/
+#define SMMU_GSR0_SECURE_ACR 0x10U
+#define SMMU_GNSR_ACR (SMMU_GSR0_SECURE_ACR + 0x400U)
+#define SMMU_GSR0_PGSIZE_SHIFT 16U
+#define SMMU_GSR0_PGSIZE_4K (0U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_GSR0_PGSIZE_64K (1U << SMMU_GSR0_PGSIZE_SHIFT)
+#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT (1U << 26)
+
+/*******************************************************************************
+ * SMMU Global Aux. Control Register
+ ******************************************************************************/
+#define SMMU_CBn_ACTLR_CPRE_BIT (1U << 1)
+
+/*******************************************************************************
+ * SMMU configuration constants
+ ******************************************************************************/
+#define ID1_PAGESIZE (1U << 31)
+#define ID1_NUMPAGENDXB_SHIFT 28U
+#define ID1_NUMPAGENDXB_MASK 7U
+#define ID1_NUMS2CB_SHIFT 16U
+#define ID1_NUMS2CB_MASK 0xffU
+#define ID1_NUMCB_SHIFT 0U
+#define ID1_NUMCB_MASK 0xffU
+#define PGSHIFT 16U
+#define CB_SIZE 0x800000U
+
+typedef struct smmu_regs {
+ uint32_t reg;
+ uint32_t val;
+} smmu_regs_t;
+
+#define mc_make_sid_override_cfg(name) \
+ { \
+ .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_CFG_ ## name, \
+ .val = 0x00000000U, \
+ }
+
+#define mc_make_sid_security_cfg(name) \
+ { \
+ .reg = TEGRA_MC_STREAMID_BASE + MC_STREAMID_OVERRIDE_TO_SECURITY_CFG(MC_STREAMID_OVERRIDE_CFG_ ## name), \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_gnsr0_sec_cfg(name) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_ ## name, \
+ .val = 0x00000000U, \
+ }
+
+/*
+ * On ARM-SMMU, conditional offset to access secure aliases of non-secure registers
+ * is 0x400. So, add it to register address
+ */
+#define smmu_make_gnsr0_nsec_cfg(name) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + 0x400U + SMMU_GNSR0_ ## name, \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_gnsr0_smr_cfg(n) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_SMR ## n, \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_gnsr0_s2cr_cfg(n) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + SMMU_GNSR0_S2CR ## n, \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_gnsr1_cbar_cfg(n) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBAR ## n, \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_gnsr1_cba2r_cfg(n) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + (1U << PGSHIFT) + SMMU_GNSR1_CBA2R ## n, \
+ .val = 0x00000000U, \
+ }
+
+#define make_smmu_cb_cfg(name, n) \
+ { \
+ .reg = TEGRA_SMMU0_BASE + (CB_SIZE >> 1) + (n * (1 << PGSHIFT)) \
+ + SMMU_CBn_ ## name, \
+ .val = 0x00000000U, \
+ }
+
+#define smmu_make_smrg_group(n) \
+ smmu_make_gnsr0_smr_cfg(n), \
+ smmu_make_gnsr0_s2cr_cfg(n), \
+ smmu_make_gnsr1_cbar_cfg(n), \
+ smmu_make_gnsr1_cba2r_cfg(n) /* don't put "," here. */
+
+#define smmu_make_cb_group(n) \
+ make_smmu_cb_cfg(SCTLR, n), \
+ make_smmu_cb_cfg(TCR2, n), \
+ make_smmu_cb_cfg(TTBR0_LO, n), \
+ make_smmu_cb_cfg(TTBR0_HI, n), \
+ make_smmu_cb_cfg(TCR, n), \
+ make_smmu_cb_cfg(PRRR_MAIR0, n),\
+ make_smmu_cb_cfg(FSR, n), \
+ make_smmu_cb_cfg(FAR_LO, n), \
+ make_smmu_cb_cfg(FAR_HI, n), \
+ make_smmu_cb_cfg(FSYNR0, n) /* don't put "," here. */
+
+#define smmu_bypass_cfg \
+ { \
+ .reg = TEGRA_MC_BASE + MC_SMMU_BYPASS_CONFIG, \
+ .val = 0x00000000U, \
+ }
+
+#define _START_OF_TABLE_ \
+ { \
+ .reg = 0xCAFE05C7U, \
+ .val = 0x00000000U, \
+ }
+
+#define _END_OF_TABLE_ \
+ { \
+ .reg = 0xFFFFFFFFU, \
+ .val = 0xFFFFFFFFU, \
+ }
+
+
+void tegra_smmu_init(void);
+void tegra_smmu_save_context(uint64_t smmu_ctx_addr);
+smmu_regs_t *plat_get_smmu_ctx(void);
+
+#endif /*__SMMU_H */
diff --git a/plat/nvidia/tegra/include/plat_macros.S b/plat/nvidia/tegra/include/plat_macros.S
new file mode 100644
index 00000000..f54e1686
--- /dev/null
+++ b/plat/nvidia/tegra/include/plat_macros.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+#include <gic_v2.h>
+#include <tegra_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+ .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+ .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n Offset:\t\t\tvalue\n"
+newline:
+ .asciz "\n"
+spacer:
+ .asciz ":\t\t0x"
+
+/* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL31.
+ * ---------------------------------------------
+ */
+.macro plat_crash_print_regs
+ mov_imm x16, TEGRA_GICC_BASE
+
+ /* gicc base address is now in x16 */
+ adr x6, gicc_regs /* Load the gicc reg list to x6 */
+ /* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+ ldr w8, [x16, #GICC_HPPIR]
+ ldr w9, [x16, #GICC_AHPPIR]
+ ldr w10, [x16, #GICC_CTLR]
+ /* Store to the crash buf and print to cosole */
+ bl str_in_crash_buf_print
+
+ /* Print the GICD_ISPENDR regs */
+ mov_imm x16, TEGRA_GICD_BASE
+ add x7, x16, #GICD_ISPENDR
+ adr x4, gicd_pend_reg
+ bl asm_print_str
+2:
+ sub x4, x7, x16
+ cmp x4, #0x280
+ b.eq 1f
+ bl asm_print_hex
+ adr x4, spacer
+ bl asm_print_str
+ ldr x4, [x7], #8
+ bl asm_print_hex
+ adr x4, newline
+ bl asm_print_str
+ b 2b
+1:
+.endm
+
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/nvidia/tegra/include/platform_def.h b/plat/nvidia/tegra/include/platform_def.h
new file mode 100644
index 00000000..4894442a
--- /dev/null
+++ b/plat/nvidia/tegra/include/platform_def.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <tegra_def.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#ifdef IMAGE_BL31
+#define PLATFORM_STACK_SIZE U(0x400)
+#endif
+
+#define TEGRA_PRIMARY_CPU U(0x0)
+
+#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_MAX_CPUS_PER_CLUSTER)
+#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CORE_COUNT + \
+ PLATFORM_CLUSTER_COUNT + 1)
+
+/*******************************************************************************
+ * Platform console related constants
+ ******************************************************************************/
+#define TEGRA_CONSOLE_BAUDRATE U(115200)
+#define TEGRA_BOOT_UART_CLK_IN_HZ U(408000000)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* Size of trusted dram */
+#define TZDRAM_SIZE U(0x00400000)
+#define TZDRAM_END (TZDRAM_BASE + TZDRAM_SIZE)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+#define BL31_SIZE U(0x40000)
+#define BL31_BASE TZDRAM_BASE
+#define BL31_LIMIT (TZDRAM_BASE + BL31_SIZE - 1)
+#define BL32_BASE (TZDRAM_BASE + BL31_SIZE)
+#define BL32_LIMIT TZDRAM_END
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 35)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 35)
+
+/*******************************************************************************
+ * Some data must be aligned on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (U(1) << CACHE_WRITEBACK_SHIFT)
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t132/tegra_def.h b/plat/nvidia/tegra/include/t132/tegra_def.h
new file mode 100644
index 00000000..ae00fb5c
--- /dev/null
+++ b/plat/nvidia/tegra/include/t132/tegra_def.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call as the `state-id` field in the 'power state' parameter.
+ ******************************************************************************/
+#define PSTATE_ID_SOC_POWERDN U(0xD)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE (PSTATE_ID_SOC_POWERDN + U(1))
+
+/*******************************************************************************
+ * GIC memory map
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x50041000)
+#define TEGRA_GICC_BASE U(0x50042000)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x60005010)
+#define TEGRA_TMRUS_SIZE U(0x1000)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x60006000)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x28C)
+#define GPU_RESET_BIT (U(1) << 24)
+
+/*******************************************************************************
+ * Tegra Flow Controller constants
+ ******************************************************************************/
+#define TEGRA_FLOWCTRL_BASE U(0x60007000)
+
+/*******************************************************************************
+ * Tegra Secure Boot Controller constants
+ ******************************************************************************/
+#define TEGRA_SB_BASE U(0x6000C200)
+
+/*******************************************************************************
+ * Tegra Exception Vectors constants
+ ******************************************************************************/
+#define TEGRA_EVP_BASE U(0x6000F000)
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x70000000)
+#define HARDWARE_REVISION_OFFSET U(0x804)
+
+/*******************************************************************************
+ * Tegra UART controller base addresses
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x70006000)
+#define TEGRA_UARTB_BASE U(0x70006040)
+#define TEGRA_UARTC_BASE U(0x70006200)
+#define TEGRA_UARTD_BASE U(0x70006300)
+#define TEGRA_UARTE_BASE U(0x70006400)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x7000E400)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_BASE U(0x70019000)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x7C010000)
+#define TEGRA_TZRAM_SIZE U(0x10000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h
new file mode 100644
index 00000000..d0331472
--- /dev/null
+++ b/plat/nvidia/tegra/include/t186/tegra_def.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MCE apertures used by the ARI interface
+ *
+ * Aperture 0 - Cpu0 (ARM Cortex A-57)
+ * Aperture 1 - Cpu1 (ARM Cortex A-57)
+ * Aperture 2 - Cpu2 (ARM Cortex A-57)
+ * Aperture 3 - Cpu3 (ARM Cortex A-57)
+ * Aperture 4 - Cpu4 (Denver15)
+ * Aperture 5 - Cpu5 (Denver15)
+ ******************************************************************************/
+#define MCE_ARI_APERTURE_0_OFFSET U(0x0)
+#define MCE_ARI_APERTURE_1_OFFSET U(0x10000)
+#define MCE_ARI_APERTURE_2_OFFSET U(0x20000)
+#define MCE_ARI_APERTURE_3_OFFSET U(0x30000)
+#define MCE_ARI_APERTURE_4_OFFSET U(0x40000)
+#define MCE_ARI_APERTURE_5_OFFSET U(0x50000)
+#define MCE_ARI_APERTURE_OFFSET_MAX MCE_APERTURE_5_OFFSET
+
+/* number of apertures */
+#define MCE_ARI_APERTURES_MAX U(6)
+
+/* each ARI aperture is 64KB */
+#define MCE_ARI_APERTURE_SIZE U(0x10000)
+
+/*******************************************************************************
+ * CPU core id macros for the MCE_ONLINE_CORE ARI
+ ******************************************************************************/
+#define MCE_CORE_ID_MAX U(8)
+#define MCE_CORE_ID_MASK U(0x7)
+
+/*******************************************************************************
+ * These values are used by the PSCI implementation during the `CPU_SUSPEND`
+ * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state'
+ * parameter.
+ ******************************************************************************/
+#define PSTATE_ID_CORE_IDLE U(6)
+#define PSTATE_ID_CORE_POWERDN U(7)
+#define PSTATE_ID_SOC_POWERDN U(2)
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE U(8)
+
+/*******************************************************************************
+ * Secure IRQ definitions
+ ******************************************************************************/
+#define TEGRA186_TOP_WDT_IRQ U(49)
+#define TEGRA186_AON_WDT_IRQ U(50)
+
+#define TEGRA186_SEC_IRQ_TARGET_MASK U(0xF3) /* 4 A57 - 2 Denver */
+
+/*******************************************************************************
+ * Tegra Miscellanous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x00100000)
+#define HARDWARE_REVISION_OFFSET U(0x4)
+
+#define MISCREG_PFCFG U(0x200C)
+
+/*******************************************************************************
+ * Tegra TSA Controller constants
+ ******************************************************************************/
+#define TEGRA_TSA_BASE U(0x02400000)
+
+/*******************************************************************************
+ * TSA configuration registers
+ ******************************************************************************/
+#define TSA_CONFIG_STATIC0_CSW_SESWR U(0x4010)
+#define TSA_CONFIG_STATIC0_CSW_SESWR_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_ETRW U(0x4038)
+#define TSA_CONFIG_STATIC0_CSW_ETRW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB U(0x5010)
+#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AXISW U(0x7008)
+#define TSA_CONFIG_STATIC0_CSW_AXISW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_HDAW U(0xA008)
+#define TSA_CONFIG_STATIC0_CSW_HDAW_RESET U(0x100)
+#define TSA_CONFIG_STATIC0_CSW_AONDMAW U(0xB018)
+#define TSA_CONFIG_STATIC0_CSW_AONDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SCEDMAW U(0xD018)
+#define TSA_CONFIG_STATIC0_CSW_SCEDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW U(0xD028)
+#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_APEDMAW U(0x12018)
+#define TSA_CONFIG_STATIC0_CSW_APEDMAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_UFSHCW U(0x13008)
+#define TSA_CONFIG_STATIC0_CSW_UFSHCW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_AFIW U(0x13018)
+#define TSA_CONFIG_STATIC0_CSW_AFIW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_SATAW U(0x13028)
+#define TSA_CONFIG_STATIC0_CSW_SATAW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_EQOSW U(0x13038)
+#define TSA_CONFIG_STATIC0_CSW_EQOSW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW U(0x15008)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW_RESET U(0x1100)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW U(0x15018)
+#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET U(0x1100)
+
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (U(0x3) << 11)
+#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (U(0) << 11)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_STREAMID_BASE U(0x02C00000)
+#define TEGRA_MC_BASE U(0x02C10000)
+
+/* General Security Carveout register macros */
+#define MC_GSC_CONFIG_REGS_SIZE U(0x40)
+#define MC_GSC_LOCK_CFG_SETTINGS_BIT (U(1) << 1)
+#define MC_GSC_ENABLE_TZ_LOCK_BIT (U(1) << 0)
+#define MC_GSC_SIZE_RANGE_4KB_SHIFT U(27)
+#define MC_GSC_BASE_LO_SHIFT U(12)
+#define MC_GSC_BASE_LO_MASK U(0xFFFFF)
+#define MC_GSC_BASE_HI_SHIFT U(0)
+#define MC_GSC_BASE_HI_MASK U(3)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64C)
+
+/*
+ * Carveout (MC_SECURITY_CARVEOUT24) registers used to clear the
+ * non-overlapping Video memory region
+ */
+#define MC_VIDEO_PROTECT_CLEAR_CFG U(0x25A0)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_LO U(0x25A4)
+#define MC_VIDEO_PROTECT_CLEAR_BASE_HI U(0x25A8)
+#define MC_VIDEO_PROTECT_CLEAR_SIZE U(0x25AC)
+#define MC_VIDEO_PROTECT_CLEAR_ACCESS_CFG0 U(0x25B0)
+
+/* TZRAM carveout (MC_SECURITY_CARVEOUT11) configuration registers */
+#define MC_TZRAM_CARVEOUT_CFG U(0x2190)
+#define MC_TZRAM_BASE_LO U(0x2194)
+#define MC_TZRAM_BASE_HI U(0x2198)
+#define MC_TZRAM_SIZE U(0x219C)
+#define MC_TZRAM_CLIENT_ACCESS_CFG0 U(0x21A0)
+
+/*******************************************************************************
+ * Tegra UART Controller constants
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x03100000)
+#define TEGRA_UARTB_BASE U(0x03110000)
+#define TEGRA_UARTC_BASE U(0x0C280000)
+#define TEGRA_UARTD_BASE U(0x03130000)
+#define TEGRA_UARTE_BASE U(0x03140000)
+#define TEGRA_UARTF_BASE U(0x03150000)
+#define TEGRA_UARTG_BASE U(0x0C290000)
+
+/*******************************************************************************
+ * Tegra Fuse Controller related constants
+ ******************************************************************************/
+#define TEGRA_FUSE_BASE U(0x03820000)
+#define OPT_SUBREVISION U(0x248)
+#define SUBREVISION_MASK U(0xFF)
+
+/*******************************************************************************
+ * GICv2 & interrupt handling related constants
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x03881000)
+#define TEGRA_GICC_BASE U(0x03882000)
+
+/*******************************************************************************
+ * Security Engine related constants
+ ******************************************************************************/
+#define TEGRA_SE0_BASE U(0x03AC0000)
+#define SE_MUTEX_WATCHDOG_NS_LIMIT U(0x6C)
+#define TEGRA_PKA1_BASE U(0x03AD0000)
+#define PKA_MUTEX_WATCHDOG_NS_LIMIT U(0x8144)
+#define TEGRA_RNG1_BASE U(0x03AE0000)
+#define RNG_MUTEX_WATCHDOG_NS_LIMIT U(0xFE0)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x05000000)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x30)
+#define GPU_RESET_BIT (U(1) << 0)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x0C2E0000)
+#define TEGRA_TMRUS_SIZE U(0x1000)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x0C360000)
+
+/*******************************************************************************
+ * Tegra scratch registers constants
+ ******************************************************************************/
+#define TEGRA_SCRATCH_BASE U(0x0C390000)
+#define SECURE_SCRATCH_RSV1_LO U(0x658)
+#define SECURE_SCRATCH_RSV1_HI U(0x65C)
+#define SECURE_SCRATCH_RSV6 U(0x680)
+#define SECURE_SCRATCH_RSV11_LO U(0x6A8)
+#define SECURE_SCRATCH_RSV11_HI U(0x6AC)
+#define SECURE_SCRATCH_RSV53_LO U(0x7F8)
+#define SECURE_SCRATCH_RSV53_HI U(0x7FC)
+#define SECURE_SCRATCH_RSV54_HI U(0x804)
+#define SECURE_SCRATCH_RSV55_LO U(0x808)
+#define SECURE_SCRATCH_RSV55_HI U(0x80C)
+
+/*******************************************************************************
+ * Tegra Memory Mapped Control Register Access constants
+ ******************************************************************************/
+#define TEGRA_MMCRAB_BASE U(0x0E000000)
+
+/*******************************************************************************
+ * Tegra Memory Mapped Activity Monitor Register Access constants
+ ******************************************************************************/
+#define TEGRA_ARM_ACTMON_CTR_BASE U(0x0E060000)
+#define TEGRA_DENVER_ACTMON_CTR_BASE U(0x0E070000)
+
+/*******************************************************************************
+ * Tegra SMMU Controller constants
+ ******************************************************************************/
+#define TEGRA_SMMU0_BASE U(0x12000000)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x30000000)
+#define TEGRA_TZRAM_SIZE U(0x40000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/t210/tegra_def.h b/plat/nvidia/tegra/include/t210/tegra_def.h
new file mode 100644
index 00000000..454c666d
--- /dev/null
+++ b/plat/nvidia/tegra/include/t210/tegra_def.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_DEF_H__
+#define __TEGRA_DEF_H__
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Power down state IDs
+ ******************************************************************************/
+#define PSTATE_ID_CORE_POWERDN U(7)
+#define PSTATE_ID_CLUSTER_IDLE U(16)
+#define PSTATE_ID_CLUSTER_POWERDN U(17)
+#define PSTATE_ID_SOC_POWERDN U(27)
+
+/*******************************************************************************
+ * This value is used by the PSCI implementation during the `SYSTEM_SUSPEND`
+ * call as the `state-id` field in the 'power state' parameter.
+ ******************************************************************************/
+#define PLAT_SYS_SUSPEND_STATE_ID PSTATE_ID_SOC_POWERDN
+
+/*******************************************************************************
+ * Platform power states (used by PSCI framework)
+ *
+ * - PLAT_MAX_RET_STATE should be less than lowest PSTATE_ID
+ * - PLAT_MAX_OFF_STATE should be greater than the highest PSTATE_ID
+ ******************************************************************************/
+#define PLAT_MAX_RET_STATE U(1)
+#define PLAT_MAX_OFF_STATE (PSTATE_ID_SOC_POWERDN + U(1))
+
+/*******************************************************************************
+ * GIC memory map
+ ******************************************************************************/
+#define TEGRA_GICD_BASE U(0x50041000)
+#define TEGRA_GICC_BASE U(0x50042000)
+
+/*******************************************************************************
+ * Tegra Memory Select Switch Controller constants
+ ******************************************************************************/
+#define TEGRA_MSELECT_BASE U(0x50060000)
+
+#define MSELECT_CONFIG U(0x0)
+#define ENABLE_WRAP_INCR_MASTER2_BIT (U(1) << U(29))
+#define ENABLE_WRAP_INCR_MASTER1_BIT (U(1) << U(28))
+#define ENABLE_WRAP_INCR_MASTER0_BIT (U(1) << U(27))
+#define UNSUPPORTED_TX_ERR_MASTER2_BIT (U(1) << U(25))
+#define UNSUPPORTED_TX_ERR_MASTER1_BIT (U(1) << U(24))
+#define ENABLE_UNSUP_TX_ERRORS (UNSUPPORTED_TX_ERR_MASTER2_BIT | \
+ UNSUPPORTED_TX_ERR_MASTER1_BIT)
+#define ENABLE_WRAP_TO_INCR_BURSTS (ENABLE_WRAP_INCR_MASTER2_BIT | \
+ ENABLE_WRAP_INCR_MASTER1_BIT | \
+ ENABLE_WRAP_INCR_MASTER0_BIT)
+
+/*******************************************************************************
+ * Tegra micro-seconds timer constants
+ ******************************************************************************/
+#define TEGRA_TMRUS_BASE U(0x60005010)
+#define TEGRA_TMRUS_SIZE U(0x1000)
+
+/*******************************************************************************
+ * Tegra Clock and Reset Controller constants
+ ******************************************************************************/
+#define TEGRA_CAR_RESET_BASE U(0x60006000)
+#define TEGRA_GPU_RESET_REG_OFFSET U(0x28C)
+#define GPU_RESET_BIT (U(1) << 24)
+
+/*******************************************************************************
+ * Tegra Flow Controller constants
+ ******************************************************************************/
+#define TEGRA_FLOWCTRL_BASE U(0x60007000)
+
+/*******************************************************************************
+ * Tegra Secure Boot Controller constants
+ ******************************************************************************/
+#define TEGRA_SB_BASE U(0x6000C200)
+
+/*******************************************************************************
+ * Tegra Exception Vectors constants
+ ******************************************************************************/
+#define TEGRA_EVP_BASE U(0x6000F000)
+
+/*******************************************************************************
+ * Tegra Miscellaneous register constants
+ ******************************************************************************/
+#define TEGRA_MISC_BASE U(0x70000000)
+#define HARDWARE_REVISION_OFFSET U(0x804)
+
+/*******************************************************************************
+ * Tegra UART controller base addresses
+ ******************************************************************************/
+#define TEGRA_UARTA_BASE U(0x70006000)
+#define TEGRA_UARTB_BASE U(0x70006040)
+#define TEGRA_UARTC_BASE U(0x70006200)
+#define TEGRA_UARTD_BASE U(0x70006300)
+#define TEGRA_UARTE_BASE U(0x70006400)
+
+/*******************************************************************************
+ * Tegra Power Mgmt Controller constants
+ ******************************************************************************/
+#define TEGRA_PMC_BASE U(0x7000E400)
+
+/*******************************************************************************
+ * Tegra Memory Controller constants
+ ******************************************************************************/
+#define TEGRA_MC_BASE U(0x70019000)
+
+/* TZDRAM carveout configuration registers */
+#define MC_SECURITY_CFG0_0 U(0x70)
+#define MC_SECURITY_CFG1_0 U(0x74)
+#define MC_SECURITY_CFG3_0 U(0x9BC)
+
+/* Video Memory carveout configuration registers */
+#define MC_VIDEO_PROTECT_BASE_HI U(0x978)
+#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
+#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
+
+/*******************************************************************************
+ * Tegra TZRAM constants
+ ******************************************************************************/
+#define TEGRA_TZRAM_BASE U(0x7C010000)
+#define TEGRA_TZRAM_SIZE U(0x10000)
+
+#endif /* __TEGRA_DEF_H__ */
diff --git a/plat/nvidia/tegra/include/tegra_platform.h b/plat/nvidia/tegra/include/tegra_platform.h
new file mode 100644
index 00000000..fbaad6e7
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_platform.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_PLATFORM_H__
+#define __TEGRA_PLATFORM_H__
+
+#include <sys/cdefs.h>
+
+/*
+ * Tegra chip major/minor version
+ */
+uint32_t tegra_get_chipid_major(void);
+uint32_t tegra_get_chipid_minor(void);
+
+/*
+ * Tegra chip identifiers
+ */
+uint8_t tegra_chipid_is_t132(void);
+uint8_t tegra_chipid_is_t210(void);
+uint8_t tegra_chipid_is_t186(void);
+
+
+/*
+ * Tegra platform identifiers
+ */
+uint8_t tegra_platform_is_silicon(void);
+uint8_t tegra_platform_is_qt(void);
+uint8_t tegra_platform_is_emulation(void);
+uint8_t tegra_platform_is_fpga(void);
+
+#endif /* __TEGRA_PLATFORM_H__ */
diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h
new file mode 100644
index 00000000..ec7a277c
--- /dev/null
+++ b/plat/nvidia/tegra/include/tegra_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TEGRA_PRIVATE_H__
+#define __TEGRA_PRIVATE_H__
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * Tegra DRAM memory base address
+ ******************************************************************************/
+#define TEGRA_DRAM_BASE ULL(0x80000000)
+#define TEGRA_DRAM_END ULL(0x27FFFFFFF)
+
+/*******************************************************************************
+ * Struct for parameters received from BL2
+ ******************************************************************************/
+typedef struct plat_params_from_bl2 {
+ /* TZ memory size */
+ uint64_t tzdram_size;
+ /* TZ memory base */
+ uint64_t tzdram_base;
+ /* UART port ID */
+ int uart_id;
+} plat_params_from_bl2_t;
+
+/*******************************************************************************
+ * Per-CPU struct describing FIQ state to be stored
+ ******************************************************************************/
+typedef struct pcpu_fiq_state {
+ uint64_t elr_el3;
+ uint64_t spsr_el3;
+} pcpu_fiq_state_t;
+
+/*******************************************************************************
+ * Struct describing per-FIQ configuration settings
+ ******************************************************************************/
+typedef struct irq_sec_cfg {
+ /* IRQ number */
+ unsigned int irq;
+ /* Target CPUs servicing this interrupt */
+ unsigned int target_cpus;
+ /* type = INTR_TYPE_S_EL1 or INTR_TYPE_EL3 */
+ uint32_t type;
+} irq_sec_cfg_t;
+
+/* Declarations for plat_psci_handlers.c */
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state);
+
+/* Declarations for plat_setup.c */
+const mmap_region_t *plat_get_mmio_map(void);
+uint32_t plat_get_console_from_id(int id);
+void plat_gic_setup(void);
+bl31_params_t *plat_get_bl31_params(void);
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void);
+
+/* Declarations for plat_secondary.c */
+void plat_secondary_setup(void);
+int plat_lock_cpu_vectors(void);
+
+/* Declarations for tegra_fiq_glue.c */
+void tegra_fiq_handler_setup(void);
+int tegra_fiq_get_intr_context(void);
+void tegra_fiq_set_ns_entrypoint(uint64_t entrypoint);
+
+/* Declarations for tegra_gic.c */
+void tegra_gic_cpuif_deactivate(void);
+void tegra_gic_setup(const irq_sec_cfg_t *irq_sec_ptr, uint32_t num_irqs);
+
+/* Declarations for tegra_security.c */
+void tegra_security_setup(void);
+void tegra_security_setup_videomem(uintptr_t base, uint64_t size);
+
+/* Declarations for tegra_pm.c */
+extern uint8_t tegra_fake_system_suspend;
+
+void tegra_pm_system_suspend_entry(void);
+void tegra_pm_system_suspend_exit(void);
+int tegra_system_suspended(void);
+
+/* Declarations for tegraXXX_pm.c */
+int tegra_prepare_cpu_suspend(unsigned int id, unsigned int afflvl);
+int tegra_prepare_cpu_on_finish(unsigned long mpidr);
+
+/* Declarations for tegra_bl31_setup.c */
+plat_params_from_bl2_t *bl31_get_plat_params(void);
+int bl31_check_ns_address(uint64_t base, uint64_t size_in_bytes);
+void plat_early_platform_setup(void);
+
+/* Declarations for tegra_delay_timer.c */
+void tegra_delay_timer_init(void);
+
+void tegra_secure_entrypoint(void);
+void tegra186_cpu_reset_handler(void);
+
+#endif /* __TEGRA_PRIVATE_H__ */
diff --git a/plat/nvidia/tegra/platform.mk b/plat/nvidia/tegra/platform.mk
new file mode 100644
index 00000000..9a9e79e2
--- /dev/null
+++ b/plat/nvidia/tegra/platform.mk
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SOC_DIR := plat/nvidia/tegra/soc/${TARGET_SOC}
+
+# dump the state on crash console
+CRASH_REPORTING := 1
+$(eval $(call add_define,CRASH_REPORTING))
+
+# enable assert() for release/debug builds
+ENABLE_ASSERTIONS := 1
+
+# Disable the PSCI platform compatibility layer
+ENABLE_PLAT_COMPAT := 0
+
+# enable dynamic memory mapping
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
+
+# Enable PSCI v1.0 extended state ID format
+PSCI_EXTENDED_STATE_ID := 1
+
+# code and read-only data should be put on separate memory pages
+SEPARATE_CODE_AND_RODATA := 1
+
+# do not use coherent memory
+USE_COHERENT_MEM := 0
+
+include plat/nvidia/tegra/common/tegra_common.mk
+include ${SOC_DIR}/platform_${TARGET_SOC}.mk
+
+# modify BUILD_PLAT to point to SoC specific build directory
+BUILD_PLAT := ${BUILD_BASE}/${PLAT}/${TARGET_SOC}/${BUILD_TYPE}
+
+# enable signed comparison checks
+TF_CFLAGS += -Wsign-compare
diff --git a/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c
new file mode 100644
index 00000000..1cffb741
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_psci_handlers.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <denver.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/*
+ * Register used to clear CPU reset signals. Each CPU has two reset
+ * signals: CPU reset (3:0) and Core reset (19:16)
+ */
+#define CPU_CMPLX_RESET_CLR 0x344
+#define CPU_CORE_RESET_MASK 0x10001
+
+/* Clock and Reset controller registers for system clock's settings */
+#define SCLK_RATE 0x30
+#define SCLK_BURST_POLICY 0x28
+#define SCLK_BURST_POLICY_DEFAULT 0x10000000
+
+static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int state_id = psci_get_pstate_id(power_state);
+ int cpu = read_mpidr() & MPIDR_CPU_MASK;
+
+ /*
+ * Sanity check the requested state id, power level and CPU number.
+ * Currently T132 only supports SYSTEM_SUSPEND on last standing CPU
+ * i.e. CPU 0
+ */
+ if ((state_id != PSTATE_ID_SOC_POWERDN) || (cpu != 0)) {
+ ERROR("unsupported state id @ power level\n");
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ /* Set lower power states to PLAT_MAX_OFF_STATE */
+ for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+
+ /* Set the SYSTEM_SUSPEND state-id */
+ req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] =
+ PSTATE_ID_SOC_POWERDN;
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t mask = CPU_CORE_RESET_MASK << cpu;
+
+ if (cpu_powergate_mask[cpu] == 0) {
+
+ /* Deassert CPU reset signals */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
+
+ /* Power on CPU using PMC */
+ tegra_pmc_cpu_on(cpu);
+
+ /* Fill in the CPU powergate mask */
+ cpu_powergate_mask[cpu] = 1;
+
+ } else {
+ /* Power on CPU using Flow Controller */
+ tegra_fc_cpu_on(cpu);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ /*
+ * Lock scratch registers which hold the CPU vectors
+ */
+ tegra_pmc_lock_cpu_vectors();
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
+
+ /* Disable DCO operations */
+ denver_disable_dco();
+
+ /* Power down the CPU */
+ write_actlr_el1(DENVER_CPU_STATE_POWER_DOWN);
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+#if ENABLE_ASSERTIONS
+ int cpu = read_mpidr() & MPIDR_CPU_MASK;
+
+ /* SYSTEM_SUSPEND only on CPU0 */
+ assert(cpu == 0);
+#endif
+
+ /* Allow restarting CPU #1 using PMC on suspend exit */
+ cpu_powergate_mask[1] = 0;
+
+ /* Program FC to enter suspend state */
+ tegra_fc_cpu_powerdn(read_mpidr());
+
+ /* Disable DCO operations */
+ denver_disable_dco();
+
+ /* Program the suspend state ID */
+ write_actlr_el1(target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]);
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+ /*
+ * Set System Clock (SCLK) to POR default so that the clock source
+ * for the PMC APB clock would not be changed due to system reset.
+ */
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
+ SCLK_BURST_POLICY_DEFAULT);
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
+
+ /* Wait 1 ms to make sure clock source/device logic is stabilized. */
+ mdelay(1);
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_secondary.c b/plat/nvidia/tegra/soc/t132/plat_secondary.c
new file mode 100644
index 00000000..d5ca30c9
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_secondary.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <denver.h>
+#include <mmio.h>
+#include <platform.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+
+#define SB_CSR 0x0
+#define SB_CSR_NS_RST_VEC_WR_DIS (1 << 1)
+
+/* AARCH64 CPU reset vector */
+#define SB_AA64_RESET_LOW 0x30 /* width = 31:0 */
+#define SB_AA64_RESET_HI 0x34 /* width = 11:0 */
+
+/* AARCH32 CPU reset vector */
+#define EVP_CPU_RESET_VECTOR 0x100
+
+extern void tegra_secure_entrypoint(void);
+
+/*
+ * For T132, CPUs reset to AARCH32, so the reset vector is first
+ * armv8_trampoline which does a warm reset to AARCH64 and starts
+ * execution at the address in SB_AA64_RESET_LOW/SB_AA64_RESET_HI.
+ */
+__aligned(8) const uint32_t armv8_trampoline[] = {
+ 0xE3A00003, /* mov r0, #3 */
+ 0xEE0C0F50, /* mcr p15, 0, r0, c12, c0, 2 */
+ 0xEAFFFFFE, /* b . */
+};
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t val;
+ uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+ /*
+ * For T132, CPUs reset to AARCH32, so the reset vector is first
+ * armv8_trampoline, which does a warm reset to AARCH64 and starts
+ * execution at the address in SCRATCH34/SCRATCH35.
+ */
+ INFO("Setting up T132 CPU boot\n");
+
+ /* initial AARCH32 reset address */
+ tegra_pmc_write_32(PMC_SECURE_SCRATCH22,
+ (unsigned long)&armv8_trampoline);
+
+ /* set AARCH32 exception vector (read to flush) */
+ mmio_write_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR,
+ (unsigned long)&armv8_trampoline);
+ val = mmio_read_32(TEGRA_EVP_BASE + EVP_CPU_RESET_VECTOR);
+
+ /* setup secondary CPU vector */
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
+ (reset_addr & 0xFFFFFFFF) | 1);
+ val = reset_addr >> 32;
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
+
+ /* configure PMC */
+ tegra_pmc_cpu_setup(reset_addr);
+ tegra_pmc_lock_cpu_vectors();
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_setup.c b/plat/nvidia/tegra/soc/t132/plat_setup.c
new file mode 100644
index 00000000..24199654
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_setup.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores */
+ PLATFORM_CORE_COUNT,
+};
+
+/* sets of MMIO ranges setup */
+#define MMIO_RANGE_0_ADDR 0x50000000
+#define MMIO_RANGE_1_ADDR 0x60000000
+#define MMIO_RANGE_2_ADDR 0x70000000
+#define MMIO_RANGE_SIZE 0x200000
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return 12000000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA132_MAX_UART_PORTS 5
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra132_uart_addresses[TEGRA132_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+ if (id > TEGRA132_MAX_UART_PORTS)
+ return 0;
+
+ return tegra132_uart_addresses[id];
+}
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t132/plat_sip_calls.c b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
new file mode 100644
index 00000000..adc1c712
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/plat_sip_calls.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <tegra_private.h>
+
+#define NS_SWITCH_AARCH32 1
+#define SCR_RW_BITPOS __builtin_ctz(SCR_RW_BIT)
+
+/*******************************************************************************
+ * Tegra132 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_AARCH_SWITCH 0x82000004
+
+/*******************************************************************************
+ * SPSR settings for AARCH32/AARCH64 modes
+ ******************************************************************************/
+#define SPSR32 SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, \
+ DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT)
+#define SPSR64 SPSR_64(MODE_EL2, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)
+
+/*******************************************************************************
+ * This function is responsible for handling all T132 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ switch (smc_fid) {
+
+ case TEGRA_SIP_AARCH_SWITCH:
+
+ /* clean up the high bits */
+ x1 = (uint32_t)x1;
+ x2 = (uint32_t)x2;
+
+ if (!x1 || x2 > NS_SWITCH_AARCH32) {
+ ERROR("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /* x1 = ns entry point */
+ cm_set_elr_spsr_el3(NON_SECURE, x1,
+ (x2 == NS_SWITCH_AARCH32) ? SPSR32 : SPSR64);
+
+ /* switch NS world mode */
+ cm_write_scr_el3_bit(NON_SECURE, SCR_RW_BITPOS, !x2);
+
+ INFO("CPU switched to AARCH%s mode\n",
+ (x2 == NS_SWITCH_AARCH32) ? "32" : "64");
+ return 0;
+
+ default:
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ break;
+ }
+
+ return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/soc/t132/platform_t132.mk b/plat/nvidia/tegra/soc/t132/platform_t132.mk
new file mode 100644
index 00000000..8b3d238f
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t132/platform_t132.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TZDRAM_BASE := 0xF5C00000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT := 1
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER := 2
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES := 3
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 8
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+BL31_SOURCES += lib/cpus/aarch64/denver.S \
+ ${COMMON_DIR}/drivers/flowctrl/flowctrl.c \
+ ${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_sip_calls.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/plat_secondary.c
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
new file mode 100644
index 00000000..26197e97
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce_private.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MCE_PRIVATE_H__
+#define __MCE_PRIVATE_H__
+
+#include <mmio.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Macros to prepare CSTATE info request
+ ******************************************************************************/
+/* Description of the parameters for UPDATE_CSTATE_INFO request */
+#define CLUSTER_CSTATE_MASK ULL(0x7)
+#define CLUSTER_CSTATE_SHIFT U(0)
+#define CLUSTER_CSTATE_UPDATE_BIT (ULL(1) << 7)
+#define CCPLEX_CSTATE_MASK ULL(0x3)
+#define CCPLEX_CSTATE_SHIFT ULL(8)
+#define CCPLEX_CSTATE_UPDATE_BIT (ULL(1) << 15)
+#define SYSTEM_CSTATE_MASK ULL(0xF)
+#define SYSTEM_CSTATE_SHIFT ULL(16)
+#define SYSTEM_CSTATE_FORCE_UPDATE_SHIFT ULL(22)
+#define SYSTEM_CSTATE_FORCE_UPDATE_BIT (ULL(1) << 22)
+#define SYSTEM_CSTATE_UPDATE_BIT (ULL(1) << 23)
+#define CSTATE_WAKE_MASK_UPDATE_BIT (ULL(1) << 31)
+#define CSTATE_WAKE_MASK_SHIFT ULL(32)
+#define CSTATE_WAKE_MASK_CLEAR U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Auto-CC3 control macros
+ ******************************************************************************/
+#define MCE_AUTO_CC3_FREQ_MASK U(0x1FF)
+#define MCE_AUTO_CC3_FREQ_SHIFT U(0)
+#define MCE_AUTO_CC3_VTG_MASK U(0x7F)
+#define MCE_AUTO_CC3_VTG_SHIFT U(16)
+#define MCE_AUTO_CC3_ENABLE_BIT (U(1) << 31)
+
+/*******************************************************************************
+ * Macros for the 'IS_SC7_ALLOWED' command
+ ******************************************************************************/
+#define MCE_SC7_ALLOWED_MASK U(0x7)
+#define MCE_SC7_WAKE_TIME_SHIFT U(32)
+
+/*******************************************************************************
+ * Macros for 'read/write ctats' commands
+ ******************************************************************************/
+#define MCE_CSTATE_STATS_TYPE_SHIFT ULL(32)
+#define MCE_CSTATE_WRITE_DATA_LO_MASK U(0xF)
+
+/*******************************************************************************
+ * Macros for 'update crossover threshold' command
+ ******************************************************************************/
+#define MCE_CROSSOVER_THRESHOLD_TIME_SHIFT U(32)
+
+/*******************************************************************************
+ * MCA argument macros
+ ******************************************************************************/
+#define MCA_ARG_ERROR_MASK U(0xFF)
+#define MCA_ARG_FINISH_SHIFT U(24)
+#define MCA_ARG_FINISH_MASK U(0xFF)
+
+/*******************************************************************************
+ * Uncore PERFMON ARI struct
+ ******************************************************************************/
+#define UNCORE_PERFMON_CMD_READ U(0)
+#define UNCORE_PERFMON_CMD_WRITE U(1)
+
+#define UNCORE_PERFMON_CMD_MASK U(0xFF)
+#define UNCORE_PERFMON_CMD_SHIFT U(24)
+#define UNCORE_PERFMON_UNIT_GRP_MASK U(0xF)
+#define UNCORE_PERFMON_SELECTOR_MASK U(0xF)
+#define UNCORE_PERFMON_REG_MASK U(0xFF)
+#define UNCORE_PERFMON_CTR_MASK U(0xFF)
+#define UNCORE_PERFMON_RESP_STATUS_MASK U(0xFF)
+#define UNCORE_PERFMON_RESP_STATUS_SHIFT U(24)
+
+/*******************************************************************************
+ * Structure populated by arch specific code to export routines which perform
+ * common low level MCE functions
+ ******************************************************************************/
+typedef struct arch_mce_ops {
+ /*
+ * This ARI request sets up the MCE to start execution on assertion
+ * of STANDBYWFI, update the core power state and expected wake time,
+ * then determine the proper power state to enter.
+ */
+ int32_t (*enter_cstate)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows updating of the CLUSTER_CSTATE,
+ * CCPLEX_CSTATE, and SYSTEM_CSTATE register values.
+ */
+ int32_t (*update_cstate_info)(uint32_t ari_base,
+ uint32_t cluster,
+ uint32_t ccplex,
+ uint32_t system,
+ uint8_t sys_state_force,
+ uint32_t wake_mask,
+ uint8_t update_wake_mask);
+ /*
+ * This ARI request allows updating of power state crossover
+ * threshold times. An index value specifies which crossover
+ * state is being updated.
+ */
+ int32_t (*update_crossover_time)(uint32_t ari_base,
+ uint32_t type,
+ uint32_t time);
+ /*
+ * This ARI request allows read access to statistical information
+ * related to power states.
+ */
+ uint64_t (*read_cstate_stats)(uint32_t ari_base,
+ uint32_t state);
+ /*
+ * This ARI request allows write access to statistical information
+ * related to power states.
+ */
+ int32_t (*write_cstate_stats)(uint32_t ari_base,
+ uint32_t state,
+ uint32_t stats);
+ /*
+ * This ARI request allows the CPU to understand the features
+ * supported by the MCE firmware.
+ */
+ uint64_t (*call_enum_misc)(uint32_t ari_base, uint32_t cmd,
+ uint32_t data);
+ /*
+ * This ARI request allows querying the CCPLEX to determine if
+ * the CCx state is allowed given a target core C-state and wake
+ * time. If the CCx state is allowed, the response indicates CCx
+ * must be entered. If the CCx state is not allowed, the response
+ * indicates CC6/CC7 can't be entered
+ */
+ int32_t (*is_ccx_allowed)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows querying the CCPLEX to determine if
+ * the SC7 state is allowed given a target core C-state and wake
+ * time. If the SC7 state is allowed, all cores but the associated
+ * core are offlined (WAKE_EVENTS are set to 0) and the response
+ * indicates SC7 must be entered. If the SC7 state is not allowed,
+ * the response indicates SC7 can't be entered
+ */
+ int32_t (*is_sc7_allowed)(uint32_t ari_base, uint32_t state,
+ uint32_t wake_time);
+ /*
+ * This ARI request allows a core to bring another offlined core
+ * back online to the C0 state. Note that a core is offlined by
+ * entering a C-state where the WAKE_MASK is all 0.
+ */
+ int32_t (*online_core)(uint32_t ari_base, uint32_t cpuid);
+ /*
+ * This ARI request allows the CPU to enable/disable Auto-CC3 idle
+ * state.
+ */
+ int32_t (*cc3_ctrl)(uint32_t ari_base,
+ uint32_t freq,
+ uint32_t volt,
+ uint8_t enable);
+ /*
+ * This ARI request allows updating the reset vector register for
+ * D15 and A57 CPUs.
+ */
+ int32_t (*update_reset_vector)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to flush A57 data caches in
+ * order to maintain coherency with the Denver cluster.
+ */
+ int32_t (*roc_flush_cache)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to flush A57 data caches along
+ * with the caches covering ARM code in order to maintain coherency
+ * with the Denver cluster.
+ */
+ int32_t (*roc_flush_cache_trbits)(uint32_t ari_base);
+ /*
+ * This ARI request instructs the ROC to clean A57 data caches along
+ * with the caches covering ARM code in order to maintain coherency
+ * with the Denver cluster.
+ */
+ int32_t (*roc_clean_cache)(uint32_t ari_base);
+ /*
+ * This ARI request reads/writes the Machine Check Arch. (MCA)
+ * registers.
+ */
+ uint64_t (*read_write_mca)(uint32_t ari_base,
+ uint64_t cmd,
+ uint64_t *data);
+ /*
+ * Some MC GSC (General Security Carveout) register values are
+ * expected to be changed by TrustZone secure ARM code after boot.
+ * Since there is no hardware mechanism for the CCPLEX to know
+ * that an MC GSC register has changed to allow it to update its
+ * own internal GSC register, there needs to be a mechanism that
+ * can be used by ARM code to cause the CCPLEX to update its GSC
+ * register value. This ARI request allows updating the GSC register
+ * value for a certain carveout in the CCPLEX.
+ */
+ int32_t (*update_ccplex_gsc)(uint32_t ari_base, uint32_t gsc_idx);
+ /*
+ * This ARI request instructs the CCPLEX to either shutdown or
+ * reset the entire system
+ */
+ void (*enter_ccplex_state)(uint32_t ari_base, uint32_t state_idx);
+ /*
+ * This ARI request reads/writes data from/to Uncore PERFMON
+ * registers
+ */
+ int32_t (*read_write_uncore_perfmon)(uint32_t ari_base,
+ uint64_t req, uint64_t *data);
+ /*
+ * This ARI implements ARI_MISC_CCPLEX commands. This can be
+ * used to enable/disable coresight clock gating.
+ */
+ void (*misc_ccplex)(uint32_t ari_base, uint32_t index,
+ uint32_t value);
+} arch_mce_ops_t;
+
+/* declarations for ARI/NVG handler functions */
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask);
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data);
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t ari_online_core(uint32_t ari_base, uint32_t core);
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+int32_t ari_reset_vector_update(uint32_t ari_base);
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base);
+int32_t ari_roc_flush_cache(uint32_t ari_base);
+int32_t ari_roc_clean_cache(uint32_t ari_base);
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data);
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx);
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx);
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base,
+ uint64_t req, uint64_t *data);
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value);
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask);
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time);
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state);
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats);
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time);
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core);
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable);
+
+extern void nvg_set_request_data(uint64_t req, uint64_t data);
+extern void nvg_set_request(uint64_t req);
+extern uint64_t nvg_get_result(void);
+#endif /* __MCE_PRIVATE_H__ */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
new file mode 100644
index 00000000..8c6f30c8
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/include/t18x_ari.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef T18X_TEGRA_ARI_H
+#define T18X_TEGRA_ARI_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * t18x_ari.h
+ *
+ * Global ARI definitions.
+ * ----------------------------------------------------------------------------
+ */
+
+enum {
+ TEGRA_ARI_VERSION_MAJOR = 3U,
+ TEGRA_ARI_VERSION_MINOR = 1U,
+};
+
+typedef enum {
+ /* indexes below get the core lock */
+ TEGRA_ARI_MISC = 0U,
+ /* index 1 is deprecated */
+ /* index 2 is deprecated */
+ /* index 3 is deprecated */
+ TEGRA_ARI_ONLINE_CORE = 4U,
+
+ /* indexes below need cluster lock */
+ TEGRA_ARI_MISC_CLUSTER = 41U,
+ TEGRA_ARI_IS_CCX_ALLOWED = 42U,
+ TEGRA_ARI_CC3_CTRL = 43U,
+
+ /* indexes below need ccplex lock */
+ TEGRA_ARI_ENTER_CSTATE = 80U,
+ TEGRA_ARI_UPDATE_CSTATE_INFO = 81U,
+ TEGRA_ARI_IS_SC7_ALLOWED = 82U,
+ /* index 83 is deprecated */
+ TEGRA_ARI_PERFMON = 84U,
+ TEGRA_ARI_UPDATE_CCPLEX_GSC = 85U,
+ /* index 86 is depracated */
+ /* index 87 is deprecated */
+ TEGRA_ARI_ROC_FLUSH_CACHE_ONLY = 88U,
+ TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS = 89U,
+ TEGRA_ARI_MISC_CCPLEX = 90U,
+ TEGRA_ARI_MCA = 91U,
+ TEGRA_ARI_UPDATE_CROSSOVER = 92U,
+ TEGRA_ARI_CSTATE_STATS = 93U,
+ TEGRA_ARI_WRITE_CSTATE_STATS = 94U,
+ TEGRA_ARI_COPY_MISCREG_AA64_RST = 95U,
+ TEGRA_ARI_ROC_CLEAN_CACHE_ONLY = 96U,
+} tegra_ari_req_id_t;
+
+typedef enum {
+ TEGRA_ARI_MISC_ECHO = 0U,
+ TEGRA_ARI_MISC_VERSION = 1U,
+ TEGRA_ARI_MISC_FEATURE_LEAF_0 = 2U,
+} tegra_ari_misc_index_t;
+
+typedef enum {
+ TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF = 0U,
+ TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT = 1U,
+ TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL = 2U,
+ TEGRA_ARI_MISC_CCPLEX_EDBGREQ = 3U,
+} tegra_ari_misc_ccplex_index_t;
+
+typedef enum {
+ TEGRA_ARI_CORE_C0 = 0U,
+ TEGRA_ARI_CORE_C1 = 1U,
+ TEGRA_ARI_CORE_C6 = 6U,
+ TEGRA_ARI_CORE_C7 = 7U,
+ TEGRA_ARI_CORE_WARMRSTREQ = 8U,
+} tegra_ari_core_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CLUSTER_CC0 = 0U,
+ TEGRA_ARI_CLUSTER_CC1 = 1U,
+ TEGRA_ARI_CLUSTER_CC6 = 6U,
+ TEGRA_ARI_CLUSTER_CC7 = 7U,
+} tegra_ari_cluster_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CCPLEX_CCP0 = 0U,
+ TEGRA_ARI_CCPLEX_CCP1 = 1U,
+ TEGRA_ARI_CCPLEX_CCP3 = 3U, /* obsoleted */
+} tegra_ari_ccplex_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_SYSTEM_SC0 = 0U,
+ TEGRA_ARI_SYSTEM_SC1 = 1U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC2 = 2U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC3 = 3U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC4 = 4U, /* obsoleted */
+ TEGRA_ARI_SYSTEM_SC7 = 7U,
+ TEGRA_ARI_SYSTEM_SC8 = 8U,
+} tegra_ari_system_sleep_state_t;
+
+typedef enum {
+ TEGRA_ARI_CROSSOVER_C1_C6 = 0U,
+ TEGRA_ARI_CROSSOVER_CC1_CC6 = 1U,
+ TEGRA_ARI_CROSSOVER_CC1_CC7 = 2U,
+ TEGRA_ARI_CROSSOVER_CCP1_CCP3 = 3U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC2 = 4U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC3 = 5U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC4 = 6U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_CCP3_SC7 = 7U, /* obsoleted */
+ TEGRA_ARI_CROSSOVER_SC0_SC7 = 7U,
+ TEGRA_ARI_CROSSOVER_CCP3_SC1 = 8U, /* obsoleted */
+} tegra_ari_crossover_index_t;
+
+typedef enum {
+ TEGRA_ARI_CSTATE_STATS_CLEAR = 0U,
+ TEGRA_ARI_CSTATE_STATS_SC7_ENTRIES = 1U,
+ TEGRA_ARI_CSTATE_STATS_SC4_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_SC3_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_SC2_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_CCP3_ENTRIES, /* obsoleted */
+ TEGRA_ARI_CSTATE_STATS_A57_CC6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_CC7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_CC6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_CC7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_0_C6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_1_C6_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_D15_0_C7_ENTRIES = 14U,
+ TEGRA_ARI_CSTATE_STATS_D15_1_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_0_C7_ENTRIES = 18U,
+ TEGRA_ARI_CSTATE_STATS_A57_1_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_2_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_A57_3_C7_ENTRIES,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 26U,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2,
+ TEGRA_ARI_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3,
+} tegra_ari_cstate_stats_index_t;
+
+typedef enum {
+ TEGRA_ARI_GSC_ALL = 0U,
+ TEGRA_ARI_GSC_BPMP = 6U,
+ TEGRA_ARI_GSC_APE = 7U,
+ TEGRA_ARI_GSC_SPE = 8U,
+ TEGRA_ARI_GSC_SCE = 9U,
+ TEGRA_ARI_GSC_APR = 10U,
+ TEGRA_ARI_GSC_TZRAM = 11U,
+ TEGRA_ARI_GSC_SE = 12U,
+ TEGRA_ARI_GSC_BPMP_TO_SPE = 16U,
+ TEGRA_ARI_GSC_SPE_TO_BPMP = 17U,
+ TEGRA_ARI_GSC_CPU_TZ_TO_BPMP = 18U,
+ TEGRA_ARI_GSC_BPMP_TO_CPU_TZ = 19U,
+ TEGRA_ARI_GSC_CPU_NS_TO_BPMP = 20U,
+ TEGRA_ARI_GSC_BPMP_TO_CPU_NS = 21U,
+ TEGRA_ARI_GSC_IPC_SE_SPE_SCE_BPMP = 22U,
+ TEGRA_ARI_GSC_SC7_RESUME_FW = 23U,
+ TEGRA_ARI_GSC_TZ_DRAM_IDX = 34U,
+ TEGRA_ARI_GSC_VPR_IDX = 35U,
+} tegra_ari_gsc_index_t;
+
+/* This macro will produce enums for __name##_LSB, __name##_MSB and __name##_MSK */
+#define TEGRA_ARI_ENUM_MASK_LSB_MSB(__name, __lsb, __msb) __name##_LSB = __lsb, __name##_MSB = __msb
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE, 0U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CLUSTER_CSTATE_PRESENT, 7U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE, 8U, 9U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__CCPLEX_CSTATE_PRESENT, 15U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE, 16U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__IGNORE_CROSSOVERS, 22U, 22U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__SYSTEM_CSTATE_PRESENT, 23U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_UPDATE_CSTATE_INFO__WAKE_MASK_PRESENT, 31U, 31U),
+} tegra_ari_update_cstate_info_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL__EN, 0U, 0U),
+} tegra_ari_misc_ccplex_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_FREQ, 0U, 8U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__IDLE_VOLT, 16U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_CC3_CTRL__ENABLE, 31U, 31U),
+} tegra_ari_cc3_ctrl_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_NOP = 0U,
+ TEGRA_ARI_MCA_READ_SERR = 1U,
+ TEGRA_ARI_MCA_WRITE_SERR = 2U,
+ TEGRA_ARI_MCA_CLEAR_SERR = 4U,
+ TEGRA_ARI_MCA_REPORT_SERR = 5U,
+ TEGRA_ARI_MCA_READ_INTSTS = 6U,
+ TEGRA_ARI_MCA_WRITE_INTSTS = 7U,
+ TEGRA_ARI_MCA_READ_PREBOOT_SERR = 8U,
+} tegra_ari_mca_commands_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_RD_WR_DPMU = 0U,
+ TEGRA_ARI_MCA_RD_WR_IOB = 1U,
+ TEGRA_ARI_MCA_RD_WR_MCB = 2U,
+ TEGRA_ARI_MCA_RD_WR_CCE = 3U,
+ TEGRA_ARI_MCA_RD_WR_CQX = 4U,
+ TEGRA_ARI_MCA_RD_WR_CTU = 5U,
+ TEGRA_ARI_MCA_RD_WR_JSR_MTS = 7U,
+ TEGRA_ARI_MCA_RD_BANK_INFO = 0x0fU,
+ TEGRA_ARI_MCA_RD_BANK_TEMPLATE = 0x10U,
+ TEGRA_ARI_MCA_RD_WR_SECURE_ACCESS_REGISTER = 0x11U,
+ TEGRA_ARI_MCA_RD_WR_GLOBAL_CONFIG_REGISTER = 0x12U,
+} tegra_ari_mca_rd_wr_indexes_t;
+
+typedef enum {
+ TEGRA_ARI_MCA_RD_WR_ASERRX_CTRL = 0U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_STATUS = 1U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_ADDR = 2U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_MISC1 = 3U,
+ TEGRA_ARI_MCA_RD_WR_ASERRX_MISC2 = 4U,
+} tegra_ari_mca_read_asserx_subindexes_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_SETTING_ENABLES_NS_PERMITTED, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_READING_STATUS_NS_PERMITTED, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_PENDING_MCA_ERRORS_NS_PERMITTED, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SECURE_REGISTER_CLEARING_MCA_INTERRUPTS_NS_PERMITTED, 3U, 3U),
+} tegra_ari_mca_secure_register_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_CRAB_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_RD_WR_N, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UCODE_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_PWM, 20U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_ADDR, 0U, 41U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_ADDR_UCODE_ERRCD, 42U, 52U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_PWM_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_CRAB_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR0_CTRL_EN_UCODE_ERR, 3U, 3U),
+} tegra_ari_mca_aserr0_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MSI_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_IHI_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CRI_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MMCRAB_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CSI_ERR, 20U, 20U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RD_WR_N, 21U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_REQ_ERRT, 22U, 23U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_RESP_ERRT, 24U, 25U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_AXI_ID, 0U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_ID, 8U, 27U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CID, 28U, 31U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_STAT_CQX_CMD, 32U, 35U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MSI_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_IHI_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CRI_ERR, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_MMCRAB_ERR, 3U, 3U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_CTRL_EN_CSI_ERR, 4U, 4U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR1_MISC_ADDR, 0U, 41U),
+} tegra_ari_mca_aserr1_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MC_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_SYSRAM_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_CLIENT_ID, 18U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ID, 0U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_CMD, 18U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_ADDR_ADDR, 22U, 53U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR2_CTRL_EN_MC_ERR, 0U, 0U),
+} tegra_ari_mca_aserr2_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_TO_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_STAT_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_DST_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UNC_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MH_ERR, 20U, 20U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PERR, 21U, 21U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_PSN_ERR, 22U, 22U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_CMD, 0U, 5U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_ADDR_ADDR, 6U, 47U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TO, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_DIV4, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_TLIMIT, 2U, 11U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC1_PSN_ERR_CORR_MSK, 12U, 25U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_MORE_INFO, 0U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TO_INFO, 18U, 43U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_SRC, 44U, 45U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_MISC2_TID, 46U, 52U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_TO_ERR, 0U, 0U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_STAT_ERR, 1U, 1U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_DST_ERR, 2U, 2U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_UNC_ERR, 3U, 3U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_MH_ERR, 4U, 4U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PERR, 5U, 5U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR3_CTRL_EN_PSN_ERR, 6U, 19U),
+} tegra_ari_mca_aserr3_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_SRC_ERR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_DST_ERR, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_REQ_ERR, 18U, 18U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_RSP_ERR, 19U, 19U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR4_CTRL_EN_CPE_ERR, 0U, 0U),
+} tegra_ari_mca_aserr4_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_CTUPAR, 16U, 16U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MULTI, 17U, 17U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_SRC, 0U, 7U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ID, 8U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_DATA, 16U, 26U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_CMD, 32U, 35U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_ADDR_ADDR, 36U, 45U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_ASERR5_CTRL_EN_CTUPAR, 0U, 0U),
+} tegra_ari_mca_aserr5_bitmasks_t;
+
+typedef enum {
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_SERR_ERR_CODE, 0U, 15U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_AV, 58U, 58U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_MV, 59U, 59U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_EN, 60U, 60U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_UC, 61U, 61U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_OVF, 62U, 62U),
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_STAT_VAL, 63U, 63U),
+
+ TEGRA_ARI_ENUM_MASK_LSB_MSB(TEGRA_ARI_MCA_SERR1_ADDR_TBD_INFO, 0U, 63U),
+} tegra_ari_mca_serr1_bitmasks_t;
+
+#undef TEGRA_ARI_ENUM_MASK_LSB_MSB
+
+typedef enum {
+ TEGRA_NVG_CHANNEL_PMIC = 0U,
+ TEGRA_NVG_CHANNEL_POWER_PERF = 1U,
+ TEGRA_NVG_CHANNEL_POWER_MODES = 2U,
+ TEGRA_NVG_CHANNEL_WAKE_TIME = 3U,
+ TEGRA_NVG_CHANNEL_CSTATE_INFO = 4U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 = 5U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC6 = 6U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CC1_CC7 = 7U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP1_CCP3 = 8U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC2 = 9U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC3 = 10U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC4 = 11U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC7 = 12U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CROSSOVER_SC0_SC7 = 12U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR = 13U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC7_ENTRIES = 14U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC4_ENTRIES = 15U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC3_ENTRIES = 16U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_SC2_ENTRIES = 17U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_CCP3_ENTRIES = 18U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC6_ENTRIES = 19U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_CC7_ENTRIES = 20U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC6_ENTRIES = 21U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_CC7_ENTRIES = 22U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C6_ENTRIES = 23U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C6_ENTRIES = 24U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C6_ENTRIES = 25U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C6_ENTRIES = 26U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_0_C7_ENTRIES = 27U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_1_C7_ENTRIES = 28U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_2_C7_ENTRIES = 29U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_D15_3_C7_ENTRIES = 30U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_0_C7_ENTRIES = 31U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_1_C7_ENTRIES = 32U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_2_C7_ENTRIES = 33U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_A57_3_C7_ENTRIES = 34U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_0 = 35U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_1 = 36U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_2 = 37U, /* Reserved (for Denver15 core 2) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_D15_3 = 38U, /* Reserved (for Denver15 core 3) */
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_0 = 39U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_1 = 40U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_2 = 41U,
+ TEGRA_NVG_CHANNEL_CSTATE_STATS_LAST_CSTATE_ENTRY_A57_3 = 42U,
+ TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED = 43U,
+ TEGRA_NVG_CHANNEL_ONLINE_CORE = 44U,
+ TEGRA_NVG_CHANNEL_CC3_CTRL = 45U,
+ TEGRA_NVG_CHANNEL_CROSSOVER_CCP3_SC1 = 46U, /* obsoleted */
+ TEGRA_NVG_CHANNEL_LAST_INDEX,
+} tegra_nvg_channel_id_t;
+
+#endif /* T18X_TEGRA_ARI_H */
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
new file mode 100644
index 00000000..e3591cec
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/aarch64/nvg_helpers.S
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+ .globl nvg_set_request_data
+ .globl nvg_set_request
+ .globl nvg_get_result
+
+/* void nvg_set_request_data(uint64_t req, uint64_t data) */
+func nvg_set_request_data
+ msr s3_0_c15_c1_2, x0
+ msr s3_0_c15_c1_3, x1
+ ret
+endfunc nvg_set_request_data
+
+/* void nvg_set_request(uint64_t req) */
+func nvg_set_request
+ msr s3_0_c15_c1_2, x0
+ ret
+endfunc nvg_set_request
+
+/* uint64_t nvg_get_result(void) */
+func nvg_get_result
+ mrs x0, s3_0_c15_c1_3
+ ret
+endfunc nvg_get_result
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
new file mode 100644
index 00000000..7eb6c6c8
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <denver.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <platform.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+/*******************************************************************************
+ * Register offsets for ARI request/results
+ ******************************************************************************/
+#define ARI_REQUEST 0x0U
+#define ARI_REQUEST_EVENT_MASK 0x4U
+#define ARI_STATUS 0x8U
+#define ARI_REQUEST_DATA_LO 0xCU
+#define ARI_REQUEST_DATA_HI 0x10U
+#define ARI_RESPONSE_DATA_LO 0x14U
+#define ARI_RESPONSE_DATA_HI 0x18U
+
+/* Status values for the current request */
+#define ARI_REQ_PENDING 1U
+#define ARI_REQ_ONGOING 3U
+#define ARI_REQUEST_VALID_BIT (1U << 8)
+#define ARI_EVT_MASK_STANDBYWFI_BIT (1U << 7)
+
+/* default timeout (ms) to wait for ARI completion */
+#define ARI_MAX_RETRY_COUNT 2000
+
+/*******************************************************************************
+ * ARI helper functions
+ ******************************************************************************/
+static inline uint32_t ari_read_32(uint32_t ari_base, uint32_t reg)
+{
+ return mmio_read_32((uint64_t)ari_base + (uint64_t)reg);
+}
+
+static inline void ari_write_32(uint32_t ari_base, uint32_t val, uint32_t reg)
+{
+ mmio_write_32((uint64_t)ari_base + (uint64_t)reg, val);
+}
+
+static inline uint32_t ari_get_request_low(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_REQUEST_DATA_LO);
+}
+
+static inline uint32_t ari_get_request_high(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_REQUEST_DATA_HI);
+}
+
+static inline uint32_t ari_get_response_low(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_RESPONSE_DATA_LO);
+}
+
+static inline uint32_t ari_get_response_high(uint32_t ari_base)
+{
+ return ari_read_32(ari_base, ARI_RESPONSE_DATA_HI);
+}
+
+static inline void ari_clobber_response(uint32_t ari_base)
+{
+ ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_LO);
+ ari_write_32(ari_base, 0, ARI_RESPONSE_DATA_HI);
+}
+
+static int32_t ari_request_wait(uint32_t ari_base, uint32_t evt_mask, uint32_t req,
+ uint32_t lo, uint32_t hi)
+{
+ uint32_t retries = ARI_MAX_RETRY_COUNT;
+ uint32_t status;
+ int32_t ret = 0;
+
+ /* program the request, event_mask, hi and lo registers */
+ ari_write_32(ari_base, lo, ARI_REQUEST_DATA_LO);
+ ari_write_32(ari_base, hi, ARI_REQUEST_DATA_HI);
+ ari_write_32(ari_base, evt_mask, ARI_REQUEST_EVENT_MASK);
+ ari_write_32(ari_base, req | ARI_REQUEST_VALID_BIT, ARI_REQUEST);
+
+ /*
+ * For commands that have an event trigger, we should bypass
+ * ARI_STATUS polling, since MCE is waiting for SW to trigger
+ * the event.
+ */
+ if (evt_mask != 0U) {
+ ret = 0;
+ } else {
+ /* For shutdown/reboot commands, we dont have to check for timeouts */
+ if ((req == (uint32_t)TEGRA_ARI_MISC_CCPLEX) &&
+ ((lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) ||
+ (lo == (uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT))) {
+ ret = 0;
+ } else {
+ /*
+ * Wait for the command response for not more than the timeout
+ */
+ while (retries != 0U) {
+
+ /* read the command status */
+ status = ari_read_32(ari_base, ARI_STATUS);
+ if ((status & (ARI_REQ_ONGOING | ARI_REQ_PENDING)) == 0U) {
+ break;
+ }
+
+ /* delay 1 ms */
+ mdelay(1);
+
+ /* decrement the retry count */
+ retries--;
+ }
+
+ /* assert if the command timed out */
+ if (retries == 0U) {
+ ERROR("ARI request timed out: req %d on CPU %d\n",
+ req, plat_my_core_pos());
+ assert(retries != 0U);
+ }
+ }
+ }
+
+ return ret;
+}
+
+int32_t ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret = 0;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) &&
+ (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) &&
+ (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* Enter the cstate, to be woken up after wake_time (TSC ticks) */
+ ret = ari_request_wait(ari_base, ARI_EVT_MASK_STANDBYWFI_BIT,
+ TEGRA_ARI_ENTER_CSTATE, state, wake_time);
+ }
+
+ return ret;
+}
+
+int32_t ari_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask)
+{
+ uint32_t val = 0U;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* update CLUSTER_CSTATE? */
+ if (cluster != 0U) {
+ val |= (cluster & (uint32_t)CLUSTER_CSTATE_MASK) |
+ (uint32_t)CLUSTER_CSTATE_UPDATE_BIT;
+ }
+
+ /* update CCPLEX_CSTATE? */
+ if (ccplex != 0U) {
+ val |= ((ccplex & (uint32_t)CCPLEX_CSTATE_MASK) << (uint32_t)CCPLEX_CSTATE_SHIFT) |
+ (uint32_t)CCPLEX_CSTATE_UPDATE_BIT;
+ }
+
+ /* update SYSTEM_CSTATE? */
+ if (system != 0U) {
+ val |= ((system & (uint32_t)SYSTEM_CSTATE_MASK) << (uint32_t)SYSTEM_CSTATE_SHIFT) |
+ (((uint32_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+ (uint32_t)SYSTEM_CSTATE_UPDATE_BIT);
+ }
+
+ /* update wake mask value? */
+ if (update_wake_mask != 0U) {
+ val |= (uint32_t)CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
+
+ /* set the updated cstate info */
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CSTATE_INFO, val,
+ wake_mask);
+}
+
+int32_t ari_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+ int32_t ret = 0;
+
+ /* sanity check crossover type */
+ if ((type == TEGRA_ARI_CROSSOVER_C1_C6) ||
+ (type > TEGRA_ARI_CROSSOVER_CCP3_SC1)) {
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* update crossover threshold time */
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CROSSOVER,
+ type, time);
+ }
+
+ return ret;
+}
+
+uint64_t ari_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+ int32_t ret;
+ uint64_t result;
+
+ /* sanity check crossover type */
+ if (state == 0U) {
+ result = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_CSTATE_STATS, state, 0U);
+ if (ret != 0) {
+ result = EINVAL;
+ } else {
+ result = (uint64_t)ari_get_response_low(ari_base);
+ }
+ }
+ return result;
+}
+
+int32_t ari_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* write the cstate stats */
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_WRITE_CSTATE_STATS, state,
+ stats);
+}
+
+uint64_t ari_enumeration_misc(uint32_t ari_base, uint32_t cmd, uint32_t data)
+{
+ uint64_t resp;
+ int32_t ret;
+ uint32_t local_data = data;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* ARI_REQUEST_DATA_HI is reserved for commands other than 'ECHO' */
+ if (cmd != TEGRA_ARI_MISC_ECHO) {
+ local_data = 0U;
+ }
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC, cmd, local_data);
+ if (ret != 0) {
+ resp = (uint64_t)ret;
+ } else {
+ /* get the command response */
+ resp = ari_get_response_low(ari_base);
+ resp |= ((uint64_t)ari_get_response_high(ari_base) << 32);
+ }
+
+ return resp;
+}
+
+int32_t ari_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret;
+ uint32_t result;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_CCX_ALLOWED, state & 0x7U,
+ wake_time);
+ if (ret != 0) {
+ ERROR("%s: failed (%d)\n", __func__, ret);
+ result = 0U;
+ } else {
+ result = ari_get_response_low(ari_base) & 0x1U;
+ }
+
+ /* 1 = CCx allowed, 0 = CCx not allowed */
+ return (int32_t)result;
+}
+
+int32_t ari_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret, result;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) &&
+ (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) &&
+ (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ result = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_IS_SC7_ALLOWED, state,
+ wake_time);
+ if (ret != 0) {
+ ERROR("%s: failed (%d)\n", __func__, ret);
+ result = 0;
+ } else {
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ result = (ari_get_response_low(ari_base) != 0U) ? 1 : 0;
+ }
+ }
+
+ return result;
+}
+
+int32_t ari_online_core(uint32_t ari_base, uint32_t core)
+{
+ uint64_t cpu = read_mpidr() & (uint64_t)(MPIDR_CPU_MASK);
+ uint64_t cluster = (read_mpidr() & (uint64_t)(MPIDR_CLUSTER_MASK)) >>
+ (uint64_t)(MPIDR_AFFINITY_BITS);
+ uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+ int32_t ret;
+
+ /* construct the current CPU # */
+ cpu |= (cluster << 2);
+
+ /* sanity check target core id */
+ if ((core >= MCE_CORE_ID_MAX) || (cpu == (uint64_t)core)) {
+ ERROR("%s: unsupported core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /*
+ * The Denver cluster has 2 CPUs only - 0, 1.
+ */
+ if ((impl == (uint32_t)DENVER_IMPL) &&
+ ((core == 2U) || (core == 3U))) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_ONLINE_CORE, core, 0U);
+ }
+ }
+
+ return ret;
+}
+
+int32_t ari_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+ uint32_t val;
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+ * the SW visible voltage/frequency request registers for all non
+ * floorswept cores valid independent of StandbyWFI and disabling
+ * the IDLE voltage/frequency request register. If set, Auto-CC3
+ * will be enabled by setting the ARM SW visible voltage/frequency
+ * request registers for all non floorswept cores to be enabled by
+ * StandbyWFI or the equivalent signal, and always keeping the IDLE
+ * voltage/frequency request register enabled.
+ */
+ val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+ ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_CC3_CTRL, val, 0U);
+}
+
+int32_t ari_reset_vector_update(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * Need to program the CPU reset vector one time during cold boot
+ * and SC7 exit
+ */
+ (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_COPY_MISCREG_AA64_RST, 0U, 0U);
+
+ return 0;
+}
+
+int32_t ari_roc_flush_cache_trbits(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_TRBITS,
+ 0U, 0U);
+}
+
+int32_t ari_roc_flush_cache(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_FLUSH_CACHE_ONLY,
+ 0U, 0U);
+}
+
+int32_t ari_roc_clean_cache(uint32_t ari_base)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ return ari_request_wait(ari_base, 0U, TEGRA_ARI_ROC_CLEAN_CACHE_ONLY,
+ 0U, 0U);
+}
+
+uint64_t ari_read_write_mca(uint32_t ari_base, uint64_t cmd, uint64_t *data)
+{
+ uint64_t mca_arg_data, result = 0;
+ uint32_t resp_lo, resp_hi;
+ uint32_t mca_arg_err, mca_arg_finish;
+ int32_t ret;
+
+ /* Set data (write) */
+ mca_arg_data = (data != NULL) ? *data : 0ULL;
+
+ /* Set command */
+ ari_write_32(ari_base, (uint32_t)cmd, ARI_RESPONSE_DATA_LO);
+ ari_write_32(ari_base, (uint32_t)(cmd >> 32U), ARI_RESPONSE_DATA_HI);
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_MCA,
+ (uint32_t)mca_arg_data,
+ (uint32_t)(mca_arg_data >> 32U));
+ if (ret == 0) {
+ resp_lo = ari_get_response_low(ari_base);
+ resp_hi = ari_get_response_high(ari_base);
+
+ mca_arg_err = resp_lo & MCA_ARG_ERROR_MASK;
+ mca_arg_finish = (resp_hi >> MCA_ARG_FINISH_SHIFT) &
+ MCA_ARG_FINISH_MASK;
+
+ if (mca_arg_finish == 0U) {
+ result = (uint64_t)mca_arg_err;
+ } else {
+ if (data != NULL) {
+ resp_lo = ari_get_request_low(ari_base);
+ resp_hi = ari_get_request_high(ari_base);
+ *data = ((uint64_t)resp_hi << 32U) |
+ (uint64_t)resp_lo;
+ }
+ }
+ }
+
+ return result;
+}
+
+int32_t ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx)
+{
+ int32_t ret = 0;
+ /* sanity check GSC ID */
+ if (gsc_idx > (uint32_t)TEGRA_ARI_GSC_VPR_IDX) {
+ ret = EINVAL;
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * The MCE code will read the GSC carveout value, corrseponding to
+ * the ID, from the MC registers and update the internal GSC registers
+ * of the CCPLEX.
+ */
+ (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_UPDATE_CCPLEX_GSC, gsc_idx, 0U);
+ }
+
+ return ret;
+}
+
+void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx)
+{
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /*
+ * The MCE will shutdown or restart the entire system
+ */
+ (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, state_idx, 0U);
+}
+
+int32_t ari_read_write_uncore_perfmon(uint32_t ari_base, uint64_t req,
+ uint64_t *data)
+{
+ int32_t ret, result;
+ uint32_t val;
+ uint8_t req_cmd, req_status;
+
+ req_cmd = (uint8_t)(req >> UNCORE_PERFMON_CMD_SHIFT);
+
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+
+ /* sanity check input parameters */
+ if ((req_cmd == UNCORE_PERFMON_CMD_READ) && (data == NULL)) {
+ ERROR("invalid parameters\n");
+ result = EINVAL;
+ } else {
+ /*
+ * For "write" commands get the value that has to be written
+ * to the uncore perfmon registers
+ */
+ val = (req_cmd == UNCORE_PERFMON_CMD_WRITE) ?
+ (uint32_t)*data : 0U;
+
+ ret = ari_request_wait(ari_base, 0U, TEGRA_ARI_PERFMON, val,
+ (uint32_t)req);
+ if (ret != 0) {
+ result = ret;
+ } else {
+ /* read the command status value */
+ req_status = (uint8_t)ari_get_response_high(ari_base) &
+ UNCORE_PERFMON_RESP_STATUS_MASK;
+
+ /*
+ * For "read" commands get the data from the uncore
+ * perfmon registers
+ */
+ req_status >>= UNCORE_PERFMON_RESP_STATUS_SHIFT;
+ if ((req_status == 0U) && (req_cmd == UNCORE_PERFMON_CMD_READ)) {
+ *data = ari_get_response_low(ari_base);
+ }
+ result = (int32_t)req_status;
+ }
+ }
+
+ return result;
+}
+
+void ari_misc_ccplex(uint32_t ari_base, uint32_t index, uint32_t value)
+{
+ /*
+ * This invokes the ARI_MISC_CCPLEX commands. This can be
+ * used to enable/disable coresight clock gating.
+ */
+
+ if ((index > TEGRA_ARI_MISC_CCPLEX_EDBGREQ) ||
+ ((index == TEGRA_ARI_MISC_CCPLEX_CORESIGHT_CG_CTRL) &&
+ (value > 1U))) {
+ ERROR("%s: invalid parameters \n", __func__);
+ } else {
+ /* clean the previous response state */
+ ari_clobber_response(ari_base);
+ (void)ari_request_wait(ari_base, 0U, TEGRA_ARI_MISC_CCPLEX, index, value);
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
new file mode 100644
index 00000000..5435ce6e
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+
+/* NVG functions handlers */
+static arch_mce_ops_t nvg_mce_ops = {
+ .enter_cstate = nvg_enter_cstate,
+ .update_cstate_info = nvg_update_cstate_info,
+ .update_crossover_time = nvg_update_crossover_time,
+ .read_cstate_stats = nvg_read_cstate_stats,
+ .write_cstate_stats = nvg_write_cstate_stats,
+ .call_enum_misc = ari_enumeration_misc,
+ .is_ccx_allowed = nvg_is_ccx_allowed,
+ .is_sc7_allowed = nvg_is_sc7_allowed,
+ .online_core = nvg_online_core,
+ .cc3_ctrl = nvg_cc3_ctrl,
+ .update_reset_vector = ari_reset_vector_update,
+ .roc_flush_cache = ari_roc_flush_cache,
+ .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+ .roc_clean_cache = ari_roc_clean_cache,
+ .read_write_mca = ari_read_write_mca,
+ .update_ccplex_gsc = ari_update_ccplex_gsc,
+ .enter_ccplex_state = ari_enter_ccplex_state,
+ .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+ .misc_ccplex = ari_misc_ccplex
+};
+
+/* ARI functions handlers */
+static arch_mce_ops_t ari_mce_ops = {
+ .enter_cstate = ari_enter_cstate,
+ .update_cstate_info = ari_update_cstate_info,
+ .update_crossover_time = ari_update_crossover_time,
+ .read_cstate_stats = ari_read_cstate_stats,
+ .write_cstate_stats = ari_write_cstate_stats,
+ .call_enum_misc = ari_enumeration_misc,
+ .is_ccx_allowed = ari_is_ccx_allowed,
+ .is_sc7_allowed = ari_is_sc7_allowed,
+ .online_core = ari_online_core,
+ .cc3_ctrl = ari_cc3_ctrl,
+ .update_reset_vector = ari_reset_vector_update,
+ .roc_flush_cache = ari_roc_flush_cache,
+ .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
+ .roc_clean_cache = ari_roc_clean_cache,
+ .read_write_mca = ari_read_write_mca,
+ .update_ccplex_gsc = ari_update_ccplex_gsc,
+ .enter_ccplex_state = ari_enter_ccplex_state,
+ .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
+ .misc_ccplex = ari_misc_ccplex
+};
+
+typedef struct {
+ uint32_t ari_base;
+ arch_mce_ops_t *ops;
+} mce_config_t;
+
+/* Table to hold the per-CPU ARI base address and function handlers */
+static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
+ {
+ /* A57 Core 0 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 1 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 2 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* A57 Core 3 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
+ .ops = &ari_mce_ops,
+ },
+ {
+ /* D15 Core 0 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
+ .ops = &nvg_mce_ops,
+ },
+ {
+ /* D15 Core 1 */
+ .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
+ .ops = &nvg_mce_ops,
+ }
+};
+
+static uint32_t mce_get_curr_cpu_ari_base(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
+
+ /*
+ * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+ * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+ * numbers start from 0. In order to get the proper arch_mce_ops_t
+ * struct, we have to convert the Denver CPU ids to the corresponding
+ * indices in the mce_ops_table array.
+ */
+ if (impl == DENVER_IMPL) {
+ cpuid |= 0x4U;
+ }
+
+ return mce_cfg_table[cpuid].ari_base;
+}
+
+static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
+{
+ uint64_t mpidr = read_mpidr();
+ uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
+ (uint64_t)MIDR_IMPL_MASK;
+
+ /*
+ * T186 has 2 CPU clusters, one with Denver CPUs and the other with
+ * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
+ * numbers start from 0. In order to get the proper arch_mce_ops_t
+ * struct, we have to convert the Denver CPU ids to the corresponding
+ * indices in the mce_ops_table array.
+ */
+ if (impl == DENVER_IMPL) {
+ cpuid |= 0x4U;
+ }
+
+ return mce_cfg_table[cpuid].ops;
+}
+
+/*******************************************************************************
+ * Common handler for all MCE commands
+ ******************************************************************************/
+int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2)
+{
+ const arch_mce_ops_t *ops;
+ gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE));
+ uint32_t cpu_ari_base;
+ uint64_t ret64 = 0, arg3, arg4, arg5;
+ int32_t ret = 0;
+
+ assert(gp_regs != NULL);
+
+ /* get a pointer to the CPU's arch_mce_ops_t struct */
+ ops = mce_get_curr_cpu_ops();
+
+ /* get the CPU's ARI base address */
+ cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+ switch (cmd) {
+ case MCE_CMD_ENTER_CSTATE:
+ ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
+ if (ret < 0) {
+ ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_UPDATE_CSTATE_INFO:
+ /*
+ * get the parameters required for the update cstate info
+ * command
+ */
+ arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
+ arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
+ arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
+
+ ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
+ (uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
+ (uint32_t)arg4, (uint8_t)arg5);
+ if (ret < 0) {
+ ERROR("%s: update_cstate_info failed(%d)\n",
+ __func__, ret);
+ }
+
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
+
+ break;
+
+ case MCE_CMD_UPDATE_CROSSOVER_TIME:
+ ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
+ if (ret < 0) {
+ ERROR("%s: update_crossover_time failed(%d)\n",
+ __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_READ_CSTATE_STATS:
+ ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
+
+ /* update context to return cstate stats value */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
+
+ break;
+
+ case MCE_CMD_WRITE_CSTATE_STATS:
+ ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
+ if (ret < 0) {
+ ERROR("%s: write_cstate_stats failed(%d)\n",
+ __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_IS_CCX_ALLOWED:
+ ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
+ if (ret < 0) {
+ ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
+ break;
+ }
+
+ /* update context to return CCx status value */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+ (uint64_t)(ret));
+
+ break;
+
+ case MCE_CMD_IS_SC7_ALLOWED:
+ ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
+ if (ret < 0) {
+ ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
+ break;
+ }
+
+ /* update context to return SC7 status value */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+ (uint64_t)(ret));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
+ (uint64_t)(ret));
+
+ break;
+
+ case MCE_CMD_ONLINE_CORE:
+ ret = ops->online_core(cpu_ari_base, arg0);
+ if (ret < 0) {
+ ERROR("%s: online_core failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_CC3_CTRL:
+ ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
+ if (ret < 0) {
+ ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_ECHO_DATA:
+ ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
+ arg0);
+
+ /* update context to return if echo'd data matched source */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+ ((ret64 == arg0) ? 1ULL : 0ULL));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
+ ((ret64 == arg0) ? 1ULL : 0ULL));
+
+ break;
+
+ case MCE_CMD_READ_VERSIONS:
+ ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
+ arg0);
+
+ /*
+ * version = minor(63:32) | major(31:0). Update context
+ * to return major and minor version number.
+ */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
+ (ret64));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
+ (ret64 >> 32ULL));
+
+ break;
+
+ case MCE_CMD_ENUM_FEATURES:
+ ret64 = ops->call_enum_misc(cpu_ari_base,
+ TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
+
+ /* update context to return features value */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+
+ break;
+
+ case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+ ret = ops->roc_flush_cache_trbits(cpu_ari_base);
+ if (ret < 0) {
+ ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
+ ret);
+ }
+
+ break;
+
+ case MCE_CMD_ROC_FLUSH_CACHE:
+ ret = ops->roc_flush_cache(cpu_ari_base);
+ if (ret < 0) {
+ ERROR("%s: flush cache failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_ROC_CLEAN_CACHE:
+ ret = ops->roc_clean_cache(cpu_ari_base);
+ if (ret < 0) {
+ ERROR("%s: clean cache failed(%d)\n", __func__, ret);
+ }
+
+ break;
+
+ case MCE_CMD_ENUM_READ_MCA:
+ ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return MCA data/error */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+
+ break;
+
+ case MCE_CMD_ENUM_WRITE_MCA:
+ ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return MCA error */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
+
+ break;
+
+#if ENABLE_CHIP_VERIFICATION_HARNESS
+ case MCE_CMD_ENABLE_LATIC:
+ /*
+ * This call is not for production use. The constant value,
+ * 0xFFFF0000, is specific to allowing for enabling LATIC on
+ * pre-production parts for the chip verification harness.
+ *
+ * Enabling LATIC allows S/W to read the MINI ISPs in the
+ * CCPLEX. The ISMs are used for various measurements relevant
+ * to particular locations in the Silicon. They are small
+ * counters which can be polled to determine how fast a
+ * particular location in the Silicon is.
+ */
+ ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
+ 0xFFFF0000);
+
+ break;
+#endif
+
+ case MCE_CMD_UNCORE_PERFMON_REQ:
+ ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
+
+ /* update context to return data */
+ write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
+ break;
+
+ case MCE_CMD_MISC_CCPLEX:
+ ops->misc_ccplex(cpu_ari_base, arg0, arg1);
+
+ break;
+
+ default:
+ ERROR("unknown MCE command (%lu)\n", cmd);
+ ret = EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*******************************************************************************
+ * Handler to update the reset vector for CPUs
+ ******************************************************************************/
+int32_t mce_update_reset_vector(void)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ ops->update_reset_vector(mce_get_curr_cpu_ari_base());
+
+ return 0;
+}
+
+static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for Video Memory Carveout region
+ ******************************************************************************/
+int32_t mce_update_gsc_videomem(void)
+{
+ return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZDRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzdram(void)
+{
+ return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
+}
+
+/*******************************************************************************
+ * Handler to update carveout values for TZ SysRAM aperture
+ ******************************************************************************/
+int32_t mce_update_gsc_tzram(void)
+{
+ return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
+}
+
+/*******************************************************************************
+ * Handler to shutdown/reset the entire system
+ ******************************************************************************/
+__dead2 void mce_enter_ccplex_state(uint32_t state_idx)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ /* sanity check state value */
+ if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) &&
+ (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) {
+ panic();
+ }
+
+ ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
+
+ /* wait till the CCPLEX powers down */
+ for (;;) {
+ ;
+ }
+
+}
+
+/*******************************************************************************
+ * Handler to issue the UPDATE_CSTATE_INFO request
+ ******************************************************************************/
+void mce_update_cstate_info(const mce_cstate_info_t *cstate)
+{
+ const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
+
+ /* issue the UPDATE_CSTATE_INFO request */
+ ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
+ cstate->ccplex, cstate->system, cstate->system_state_force,
+ cstate->wake_mask, cstate->update_wake_mask);
+}
+
+/*******************************************************************************
+ * Handler to read the MCE firmware version and check if it is compatible
+ * with interface header the BL3-1 was compiled against
+ ******************************************************************************/
+void mce_verify_firmware_version(void)
+{
+ const arch_mce_ops_t *ops;
+ uint32_t cpu_ari_base;
+ uint64_t version;
+ uint32_t major, minor;
+
+ /*
+ * MCE firmware is not supported on simulation platforms.
+ */
+ if (tegra_platform_is_emulation()) {
+
+ INFO("MCE firmware is not supported\n");
+
+ } else {
+ /* get a pointer to the CPU's arch_mce_ops_t struct */
+ ops = mce_get_curr_cpu_ops();
+
+ /* get the CPU's ARI base address */
+ cpu_ari_base = mce_get_curr_cpu_ari_base();
+
+ /*
+ * Read the MCE firmware version and extract the major and minor
+ * version fields
+ */
+ version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
+ major = (uint32_t)version;
+ minor = (uint32_t)(version >> 32);
+
+ INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
+ TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
+
+ /*
+ * Verify that the MCE firmware version and the interface header
+ * match
+ */
+ if (major != TEGRA_ARI_VERSION_MAJOR) {
+ ERROR("ARI major version mismatch\n");
+ panic();
+ }
+
+ if (minor < TEGRA_ARI_VERSION_MINOR) {
+ ERROR("ARI minor version mismatch\n");
+ panic();
+ }
+ }
+}
diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
new file mode 100644
index 00000000..243c8f3a
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/drivers/mce/nvg.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce_private.h>
+#include <mmio.h>
+#include <sys/errno.h>
+#include <t18x_ari.h>
+
+int32_t nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ int32_t ret = 0;
+
+ (void)ari_base;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /* time (TSC ticks) until the core is expected to get a wake event */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_WAKE_TIME, wake_time);
+
+ /* set the core cstate */
+ write_actlr_el1(state);
+ }
+
+ return ret;
+}
+
+/*
+ * This request allows updating of CLUSTER_CSTATE, CCPLEX_CSTATE and
+ * SYSTEM_CSTATE values.
+ */
+int32_t nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex,
+ uint32_t system, uint8_t sys_state_force, uint32_t wake_mask,
+ uint8_t update_wake_mask)
+{
+ uint64_t val = 0ULL;
+
+ (void)ari_base;
+
+ /* update CLUSTER_CSTATE? */
+ if (cluster != 0U) {
+ val |= ((uint64_t)cluster & CLUSTER_CSTATE_MASK) |
+ CLUSTER_CSTATE_UPDATE_BIT;
+ }
+
+ /* update CCPLEX_CSTATE? */
+ if (ccplex != 0U) {
+ val |= (((uint64_t)ccplex & CCPLEX_CSTATE_MASK) << CCPLEX_CSTATE_SHIFT) |
+ CCPLEX_CSTATE_UPDATE_BIT;
+ }
+
+ /* update SYSTEM_CSTATE? */
+ if (system != 0U) {
+ val |= (((uint64_t)system & SYSTEM_CSTATE_MASK) << SYSTEM_CSTATE_SHIFT) |
+ (((uint64_t)sys_state_force << SYSTEM_CSTATE_FORCE_UPDATE_SHIFT) |
+ SYSTEM_CSTATE_UPDATE_BIT);
+ }
+
+ /* update wake mask value? */
+ if (update_wake_mask != 0U) {
+ val |= CSTATE_WAKE_MASK_UPDATE_BIT;
+ }
+
+ /* set the wake mask */
+ val &= CSTATE_WAKE_MASK_CLEAR;
+ val |= ((uint64_t)wake_mask << CSTATE_WAKE_MASK_SHIFT);
+
+ /* set the updated cstate info */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_CSTATE_INFO, val);
+
+ return 0;
+}
+
+int32_t nvg_update_crossover_time(uint32_t ari_base, uint32_t type, uint32_t time)
+{
+ int32_t ret = 0;
+
+ (void)ari_base;
+
+ /* sanity check crossover type */
+ if (type > TEGRA_ARI_CROSSOVER_CCP3_SC1) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The crossover threshold limit types start from
+ * TEGRA_CROSSOVER_TYPE_C1_C6 to TEGRA_CROSSOVER_TYPE_CCP3_SC7.
+ * The command indices for updating the threshold be generated
+ * by adding the type to the NVG_SET_THRESHOLD_CROSSOVER_C1_C6
+ * command index.
+ */
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CROSSOVER_C1_C6 +
+ (uint64_t)type), (uint64_t)time);
+ }
+
+ return ret;
+}
+
+uint64_t nvg_read_cstate_stats(uint32_t ari_base, uint32_t state)
+{
+ uint64_t ret;
+
+ (void)ari_base;
+
+ /* sanity check state */
+ if (state == 0U) {
+ ret = EINVAL;
+ } else {
+ /*
+ * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+ * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+ * reading the threshold can be generated by adding the type to
+ * the NVG_CLEAR_CSTATE_STATS command index.
+ */
+ nvg_set_request((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state));
+ ret = nvg_get_result();
+ }
+
+ return ret;
+}
+
+int32_t nvg_write_cstate_stats(uint32_t ari_base, uint32_t state, uint32_t stats)
+{
+ uint64_t val;
+
+ (void)ari_base;
+
+ /*
+ * The only difference between a CSTATE_STATS_WRITE and
+ * CSTATE_STATS_READ is the usage of the 63:32 in the request.
+ * 63:32 are set to '0' for a read, while a write contains the
+ * actual stats value to be written.
+ */
+ val = ((uint64_t)stats << MCE_CSTATE_STATS_TYPE_SHIFT) | state;
+
+ /*
+ * The cstate types start from NVG_READ_CSTATE_STATS_SC7_ENTRIES
+ * to NVG_GET_LAST_CSTATE_ENTRY_A57_3. The command indices for
+ * reading the threshold can be generated by adding the type to
+ * the NVG_CLEAR_CSTATE_STATS command index.
+ */
+ nvg_set_request_data((TEGRA_NVG_CHANNEL_CSTATE_STATS_CLEAR +
+ (uint64_t)state), val);
+
+ return 0;
+}
+
+int32_t nvg_is_ccx_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ (void)ari_base;
+ (void)state;
+ (void)wake_time;
+
+ /* This does not apply to the Denver cluster */
+ return 0;
+}
+
+int32_t nvg_is_sc7_allowed(uint32_t ari_base, uint32_t state, uint32_t wake_time)
+{
+ uint64_t val;
+ int32_t ret;
+
+ (void)ari_base;
+
+ /* check for allowed power state */
+ if ((state != TEGRA_ARI_CORE_C0) && (state != TEGRA_ARI_CORE_C1) &&
+ (state != TEGRA_ARI_CORE_C6) && (state != TEGRA_ARI_CORE_C7)) {
+ ERROR("%s: unknown cstate (%d)\n", __func__, state);
+ ret = EINVAL;
+ } else {
+ /*
+ * Request format -
+ * 63:32 = wake time
+ * 31:0 = C-state for this core
+ */
+ val = ((uint64_t)wake_time << MCE_SC7_WAKE_TIME_SHIFT) |
+ ((uint64_t)state & MCE_SC7_ALLOWED_MASK);
+
+ /* issue command to check if SC7 is allowed */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_IS_SC7_ALLOWED, val);
+
+ /* 1 = SC7 allowed, 0 = SC7 not allowed */
+ ret = (nvg_get_result() != 0ULL) ? 1 : 0;
+ }
+
+ return ret;
+}
+
+int32_t nvg_online_core(uint32_t ari_base, uint32_t core)
+{
+ uint64_t cpu = read_mpidr() & (uint64_t)MPIDR_CPU_MASK;
+ uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
+ (uint64_t)MIDR_IMPL_MASK;
+ int32_t ret = 0;
+
+ (void)ari_base;
+
+ /* sanity check code id */
+ if ((core >= (uint32_t)MCE_CORE_ID_MAX) || (cpu == core)) {
+ ERROR("%s: unsupported core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /*
+ * The Denver cluster has 2 CPUs only - 0, 1.
+ */
+ if ((impl == DENVER_IMPL) && ((core == 2U) || (core == 3U))) {
+ ERROR("%s: unknown core id (%d)\n", __func__, core);
+ ret = EINVAL;
+ } else {
+ /* get a core online */
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_ONLINE_CORE,
+ ((uint64_t)core & MCE_CORE_ID_MASK));
+ }
+ }
+
+ return ret;
+}
+
+int32_t nvg_cc3_ctrl(uint32_t ari_base, uint32_t freq, uint32_t volt, uint8_t enable)
+{
+ uint32_t val;
+
+ (void)ari_base;
+
+ /*
+ * If the enable bit is cleared, Auto-CC3 will be disabled by setting
+ * the SW visible voltage/frequency request registers for all non
+ * floorswept cores valid independent of StandbyWFI and disabling
+ * the IDLE voltage/frequency request register. If set, Auto-CC3
+ * will be enabled by setting the ARM SW visible voltage/frequency
+ * request registers for all non floorswept cores to be enabled by
+ * StandbyWFI or the equivalent signal, and always keeping the IDLE
+ * voltage/frequency request register enabled.
+ */
+ val = (((freq & MCE_AUTO_CC3_FREQ_MASK) << MCE_AUTO_CC3_FREQ_SHIFT) |\
+ ((volt & MCE_AUTO_CC3_VTG_MASK) << MCE_AUTO_CC3_VTG_SHIFT) |\
+ ((enable != 0U) ? MCE_AUTO_CC3_ENABLE_BIT : 0U));
+
+ nvg_set_request_data(TEGRA_NVG_CHANNEL_CC3_CTRL, (uint64_t)val);
+
+ return 0;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_memctrl.c b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
new file mode 100644
index 00000000..957ecf18
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_memctrl.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl_common.h>
+#include <memctrl_v2.h>
+
+/*******************************************************************************
+ * Array to hold stream_id override config register offsets
+ ******************************************************************************/
+const static uint32_t tegra186_streamid_override_regs[] = {
+ MC_STREAMID_OVERRIDE_CFG_PTCR,
+ MC_STREAMID_OVERRIDE_CFG_AFIR,
+ MC_STREAMID_OVERRIDE_CFG_HDAR,
+ MC_STREAMID_OVERRIDE_CFG_HOST1XDMAR,
+ MC_STREAMID_OVERRIDE_CFG_NVENCSRD,
+ MC_STREAMID_OVERRIDE_CFG_SATAR,
+ MC_STREAMID_OVERRIDE_CFG_MPCORER,
+ MC_STREAMID_OVERRIDE_CFG_NVENCSWR,
+ MC_STREAMID_OVERRIDE_CFG_AFIW,
+ MC_STREAMID_OVERRIDE_CFG_HDAW,
+ MC_STREAMID_OVERRIDE_CFG_MPCOREW,
+ MC_STREAMID_OVERRIDE_CFG_SATAW,
+ MC_STREAMID_OVERRIDE_CFG_ISPRA,
+ MC_STREAMID_OVERRIDE_CFG_ISPWA,
+ MC_STREAMID_OVERRIDE_CFG_ISPWB,
+ MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTR,
+ MC_STREAMID_OVERRIDE_CFG_XUSB_HOSTW,
+ MC_STREAMID_OVERRIDE_CFG_XUSB_DEVR,
+ MC_STREAMID_OVERRIDE_CFG_XUSB_DEVW,
+ MC_STREAMID_OVERRIDE_CFG_TSECSRD,
+ MC_STREAMID_OVERRIDE_CFG_TSECSWR,
+ MC_STREAMID_OVERRIDE_CFG_GPUSRD,
+ MC_STREAMID_OVERRIDE_CFG_GPUSWR,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRAA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCR,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCRAB,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWAA,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCW,
+ MC_STREAMID_OVERRIDE_CFG_SDMMCWAB,
+ MC_STREAMID_OVERRIDE_CFG_VICSRD,
+ MC_STREAMID_OVERRIDE_CFG_VICSWR,
+ MC_STREAMID_OVERRIDE_CFG_VIW,
+ MC_STREAMID_OVERRIDE_CFG_NVDECSRD,
+ MC_STREAMID_OVERRIDE_CFG_NVDECSWR,
+ MC_STREAMID_OVERRIDE_CFG_APER,
+ MC_STREAMID_OVERRIDE_CFG_APEW,
+ MC_STREAMID_OVERRIDE_CFG_NVJPGSRD,
+ MC_STREAMID_OVERRIDE_CFG_NVJPGSWR,
+ MC_STREAMID_OVERRIDE_CFG_SESRD,
+ MC_STREAMID_OVERRIDE_CFG_SESWR,
+ MC_STREAMID_OVERRIDE_CFG_ETRR,
+ MC_STREAMID_OVERRIDE_CFG_ETRW,
+ MC_STREAMID_OVERRIDE_CFG_TSECSRDB,
+ MC_STREAMID_OVERRIDE_CFG_TSECSWRB,
+ MC_STREAMID_OVERRIDE_CFG_GPUSRD2,
+ MC_STREAMID_OVERRIDE_CFG_GPUSWR2,
+ MC_STREAMID_OVERRIDE_CFG_AXISR,
+ MC_STREAMID_OVERRIDE_CFG_AXISW,
+ MC_STREAMID_OVERRIDE_CFG_EQOSR,
+ MC_STREAMID_OVERRIDE_CFG_EQOSW,
+ MC_STREAMID_OVERRIDE_CFG_UFSHCR,
+ MC_STREAMID_OVERRIDE_CFG_UFSHCW,
+ MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR,
+ MC_STREAMID_OVERRIDE_CFG_BPMPR,
+ MC_STREAMID_OVERRIDE_CFG_BPMPW,
+ MC_STREAMID_OVERRIDE_CFG_BPMPDMAR,
+ MC_STREAMID_OVERRIDE_CFG_BPMPDMAW,
+ MC_STREAMID_OVERRIDE_CFG_AONR,
+ MC_STREAMID_OVERRIDE_CFG_AONW,
+ MC_STREAMID_OVERRIDE_CFG_AONDMAR,
+ MC_STREAMID_OVERRIDE_CFG_AONDMAW,
+ MC_STREAMID_OVERRIDE_CFG_SCER,
+ MC_STREAMID_OVERRIDE_CFG_SCEW,
+ MC_STREAMID_OVERRIDE_CFG_SCEDMAR,
+ MC_STREAMID_OVERRIDE_CFG_SCEDMAW,
+ MC_STREAMID_OVERRIDE_CFG_APEDMAR,
+ MC_STREAMID_OVERRIDE_CFG_APEDMAW,
+ MC_STREAMID_OVERRIDE_CFG_NVDISPLAYR1,
+ MC_STREAMID_OVERRIDE_CFG_VICSRD1,
+ MC_STREAMID_OVERRIDE_CFG_NVDECSRD1
+};
+
+/*******************************************************************************
+ * Array to hold the security configs for stream IDs
+ ******************************************************************************/
+const static mc_streamid_security_cfg_t tegra186_streamid_sec_cfgs[] = {
+ mc_make_sec_cfg(SCEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AFIR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AFIW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDISPLAYR1, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(XUSB_DEVR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(VICSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVENCSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(TSECSRDB, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AXISW, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCWAB, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AONDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(GPUSWR2, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SATAW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(UFSHCW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(ETRW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(GPUSRD, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(VICSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SCEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(HDAW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(ISPWA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(EQOSW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(ISPRA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDECSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(XUSB_DEVW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(MPCOREW, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDISPLAYR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(BPMPDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVJPGSWR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVDECSRD1, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(TSECSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVJPGSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCWA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SCER, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(XUSB_HOSTR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(VICSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AONDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AONW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCRA, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(HOST1XDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(EQOSR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SATAR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(BPMPR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(HDAR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(AONR, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE),
+ mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE),
+ mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE),
+ mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE),
+};
+
+/*******************************************************************************
+ * Array to hold the transaction override configs
+ ******************************************************************************/
+const static mc_txn_override_cfg_t tegra186_txn_override_cfgs[] = {
+ mc_make_txn_override_cfg(BPMPW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(EQOSW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVJPGSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWAA, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(MPCOREW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SCEDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AXISW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(TSECSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(GPUSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(XUSB_HOSTW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(TSECSWRB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(GPUSWR2, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AONDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AONW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SESWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(BPMPDMAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWA, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(HDAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVDECSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(UFSHCW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SATAW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(ETRW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(VICSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(NVENCSWR, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SDMMCWAB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(ISPWB, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(APEW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(XUSB_DEVW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(AFIW, CGID_TAG_ADR),
+ mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR),
+};
+
+/*******************************************************************************
+ * Struct to hold the memory controller settings
+ ******************************************************************************/
+static tegra_mc_settings_t tegra186_mc_settings = {
+ .streamid_override_cfg = tegra186_streamid_override_regs,
+ .num_streamid_override_cfgs = ARRAY_SIZE(tegra186_streamid_override_regs),
+ .streamid_security_cfg = tegra186_streamid_sec_cfgs,
+ .num_streamid_security_cfgs = ARRAY_SIZE(tegra186_streamid_sec_cfgs),
+ .txn_override_cfg = tegra186_txn_override_cfgs,
+ .num_txn_override_cfgs = ARRAY_SIZE(tegra186_txn_override_cfgs)
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the memory controller's settings struct
+ ******************************************************************************/
+tegra_mc_settings_t *tegra_get_mc_settings(void)
+{
+ return &tegra186_mc_settings;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
new file mode 100644
index 00000000..095614e4
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <mce.h>
+#include <platform.h>
+#include <psci.h>
+#include <smmu.h>
+#include <string.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern void prepare_cpu_pwr_dwn(void);
+extern void tegra186_cpu_reset_handler(void);
+extern uint32_t __tegra186_cpu_reset_handler_end,
+ __tegra186_smmu_context;
+
+/* state id mask */
+#define TEGRA186_STATE_ID_MASK 0xF
+/* constants to get power state's wake time */
+#define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0
+#define TEGRA186_WAKE_TIME_SHIFT 4
+/* default core wake mask for CPU_SUSPEND */
+#define TEGRA186_CORE_WAKE_MASK 0x180c
+/* context size to save during system suspend */
+#define TEGRA186_SE_CONTEXT_SIZE 3
+
+static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
+static struct t18x_psci_percpu_data {
+ unsigned int wake_time;
+} __aligned(CACHE_WRITEBACK_GRANULE) percpu_data[PLATFORM_CORE_COUNT];
+
+/* System power down state */
+uint32_t tegra186_system_powerdn_state = TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF;
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
+ int cpu = plat_my_core_pos();
+
+ /* save the core wake time (in TSC ticks)*/
+ percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
+ << TEGRA186_WAKE_TIME_SHIFT;
+
+ /*
+ * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
+ * the correct value is read in tegra_soc_pwr_domain_suspend(), which
+ * is called with caches disabled. It is possible to read a stale value
+ * from DRAM in that function, because the L2 cache is not flushed
+ * unless the cluster is entering CC6/CC7.
+ */
+ clean_dcache_range((uint64_t)&percpu_data[cpu],
+ sizeof(percpu_data[cpu]));
+
+ /* Sanity check the requested state id */
+ switch (state_id) {
+ case PSTATE_ID_CORE_IDLE:
+ case PSTATE_ID_CORE_POWERDN:
+
+ /* Core powerdown request */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+ req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+
+ break;
+
+ default:
+ ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state;
+ unsigned int stateid_afflvl0, stateid_afflvl2;
+ int cpu = plat_my_core_pos();
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ mce_cstate_info_t cstate_info = { 0 };
+ uint64_t smmu_ctx_base;
+ uint32_t val;
+
+ /* get the state ID */
+ pwr_domain_state = target_state->pwr_domain_state;
+ stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
+ TEGRA186_STATE_ID_MASK;
+ stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA186_STATE_ID_MASK;
+
+ if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
+ (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
+
+ /* Enter CPU idle/powerdown */
+ val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
+ TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7;
+ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val,
+ percpu_data[cpu].wake_time, 0);
+
+ } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ /* save SE registers */
+ se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
+ SE_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
+ RNG_MUTEX_WATCHDOG_NS_LIMIT);
+ se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
+ PKA_MUTEX_WATCHDOG_NS_LIMIT);
+
+ /* save 'Secure Boot' Processor Feature Config Register */
+ val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val);
+
+ /* save SMMU context to TZDRAM */
+ smmu_ctx_base = params_from_bl2->tzdram_base +
+ ((uintptr_t)&__tegra186_smmu_context -
+ (uintptr_t)tegra186_cpu_reset_handler);
+ tegra_smmu_save_context((uintptr_t)smmu_ctx_base);
+
+ /* Prepare for system suspend */
+ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.system = TEGRA_ARI_SYSTEM_SC7;
+ cstate_info.system_state_force = 1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Loop until system suspend is allowed */
+ do {
+ val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+ TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0);
+ } while (val == 0);
+
+ /* Instruct the MCE to enter system suspend state */
+ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+ TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = *states;
+ int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1;
+ int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+ mce_cstate_info_t cstate_info = { 0 };
+
+ /* get the power state at this level */
+ if (lvl == MPIDR_AFFLVL1)
+ target = *(states + core_pos);
+ if (lvl == MPIDR_AFFLVL2)
+ target = *(states + cpu);
+
+ /* CPU suspend */
+ if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) {
+
+ /* Program default wake mask */
+ cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Check if CCx state is allowed. */
+ ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
+ TEGRA_ARI_CORE_C7, percpu_data[cpu].wake_time,
+ 0);
+ if (ret)
+ return PSTATE_ID_CORE_POWERDN;
+ }
+
+ /* CPU off */
+ if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) {
+
+ /* find out the number of ON cpus in the cluster */
+ do {
+ target = *states++;
+ if (target != PLAT_MAX_OFF_STATE)
+ cluster_powerdn = 0;
+ } while (--ncpu);
+
+ /* Enable cluster powerdn from last CPU in the cluster */
+ if (cluster_powerdn) {
+
+ /* Enable CC7 state and turn off wake mask */
+ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* Check if CCx state is allowed. */
+ ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED,
+ TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0);
+ if (ret)
+ return PSTATE_ID_CORE_POWERDN;
+
+ } else {
+
+ /* Turn off wake_mask */
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ }
+ }
+
+ /* System Suspend */
+ if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+ (target == PSTATE_ID_SOC_POWERDN))
+ return PSTATE_ID_SOC_POWERDN;
+
+ /* default state */
+ return PSCI_LOCAL_STATE_RUN;
+}
+
+int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
+{
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
+ TEGRA186_STATE_ID_MASK;
+ uint64_t val;
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+ /*
+ * The TZRAM loses power when we enter system suspend. To
+ * allow graceful exit from system suspend, we need to copy
+ * BL3-1 over to TZDRAM.
+ */
+ val = params_from_bl2->tzdram_base +
+ ((uintptr_t)&__tegra186_cpu_reset_handler_end -
+ (uintptr_t)tegra186_cpu_reset_handler);
+ memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
+ (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ uint32_t target_cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
+ MPIDR_AFFINITY_BITS;
+
+ if (target_cluster > MPIDR_AFFLVL1) {
+ ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
+ return PSCI_E_NOT_PRESENT;
+ }
+
+ /* construct the target CPU # */
+ target_cpu |= (target_cluster << 2);
+
+ mce_command_handler(MCE_CMD_ONLINE_CORE, target_cpu, 0, 0);
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
+ int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
+ mce_cstate_info_t cstate_info = { 0 };
+
+ /*
+ * Reset power state info for CPUs when onlining, we set
+ * deepest power when offlining a core but that may not be
+ * requested by non-secure sw which controls idle states. It
+ * will re-init this info from non-secure software when the
+ * core come online.
+ */
+ if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
+
+ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ }
+
+ /*
+ * Check if we are exiting from deep sleep and restore SE
+ * context if we are.
+ */
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[0]);
+ mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[1]);
+ mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
+ se_regs[2]);
+
+ /* Init SMMU */
+ tegra_smmu_init();
+
+ /*
+ * Reset power state info for the last core doing SC7
+ * entry and exit, we set deepest power state as CC7
+ * and SC7 for SC7 entry which may not be requested by
+ * non-secure SW which controls idle states.
+ */
+ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.system = TEGRA_ARI_SYSTEM_SC1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+ /* Disable Denver's DCO operations */
+ if (impl == DENVER_IMPL)
+ denver_disable_dco();
+
+ /* Turn off CPU */
+ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+ return PSCI_E_SUCCESS;
+}
+
+__dead2 void tegra_soc_prepare_system_off(void)
+{
+ mce_cstate_info_t cstate_info = { 0 };
+ uint32_t val;
+
+ if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) {
+
+ /* power off the entire system */
+ mce_enter_ccplex_state(tegra186_system_powerdn_state);
+
+ } else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) {
+
+ /* Prepare for quasi power down */
+ cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7;
+ cstate_info.system = TEGRA_ARI_SYSTEM_SC8;
+ cstate_info.system_state_force = 1;
+ cstate_info.update_wake_mask = 1;
+ mce_update_cstate_info(&cstate_info);
+
+ /* loop until other CPUs power down */
+ do {
+ val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED,
+ TEGRA_ARI_CORE_C7,
+ MCE_CORE_SLEEP_TIME_INFINITE,
+ 0);
+ } while (val == 0);
+
+ /* Enter quasi power down state */
+ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE,
+ TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0);
+
+ /* disable GICC */
+ tegra_gic_cpuif_deactivate();
+
+ /* power down core */
+ prepare_cpu_pwr_dwn();
+
+ /* flush L1/L2 data caches */
+ dcsw_op_all(DCCISW);
+
+ } else {
+ ERROR("%s: unsupported power down state (%d)\n", __func__,
+ tegra186_system_powerdn_state);
+ }
+
+ wfi();
+
+ /* wait for the system to power down */
+ for (;;) {
+ ;
+ }
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+ mce_enter_ccplex_state(TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c
new file mode 100644
index 00000000..52daab21
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <mce.h>
+#include <mmio.h>
+#include <string.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+#define MISCREG_CPU_RESET_VECTOR 0x2000
+#define MISCREG_AA64_RST_LOW 0x2004
+#define MISCREG_AA64_RST_HIGH 0x2008
+
+#define SCRATCH_SECURE_RSV1_SCRATCH_0 0x658
+#define SCRATCH_SECURE_RSV1_SCRATCH_1 0x65C
+
+#define CPU_RESET_MODE_AA64 1
+
+extern uint64_t tegra_bl31_phys_base;
+extern uint64_t __tegra186_cpu_reset_handler_end;
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t addr_low, addr_high;
+ plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
+ uint64_t cpu_reset_handler_base;
+
+ INFO("Setting up secondary CPU boot\n");
+
+ if ((tegra_bl31_phys_base >= TEGRA_TZRAM_BASE) &&
+ (tegra_bl31_phys_base <= (TEGRA_TZRAM_BASE + TEGRA_TZRAM_SIZE))) {
+
+ /*
+ * The BL31 code resides in the TZSRAM which loses state
+ * when we enter System Suspend. Copy the wakeup trampoline
+ * code to TZDRAM to help us exit from System Suspend.
+ */
+ cpu_reset_handler_base = params_from_bl2->tzdram_base;
+ memcpy16((void *)((uintptr_t)cpu_reset_handler_base),
+ (void *)(uintptr_t)tegra186_cpu_reset_handler,
+ (uintptr_t)&__tegra186_cpu_reset_handler_end -
+ (uintptr_t)tegra186_cpu_reset_handler);
+
+ } else {
+ cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint;
+ }
+
+ addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64;
+ addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff);
+
+ /* write lower 32 bits first, then the upper 11 bits */
+ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low);
+ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH, addr_high);
+
+ /* save reset vector to be used during SYSTEM_SUSPEND exit */
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_0,
+ addr_low);
+ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_1,
+ addr_high);
+
+ /* update reset vector address to the CCPLEX */
+ mce_update_reset_vector();
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c
new file mode 100644
index 00000000..ba245790
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_setup.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <console.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cortex_a57.h>
+#include <debug.h>
+#include <denver.h>
+#include <interrupt_mgmt.h>
+#include <mce.h>
+#include <platform.h>
+#include <tegra_def.h>
+#include <tegra_platform.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A57_L2CTLR_EL1)
+extern uint64_t tegra_enable_l2_ecc_parity_prot;
+
+/*******************************************************************************
+ * Tegra186 CPU numbers in cluster #0
+ *******************************************************************************
+ */
+#define TEGRA186_CLUSTER0_CORE2 2
+#define TEGRA186_CLUSTER0_CORE3 3
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores - cluster0 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster1 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SE0_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_PKA1_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_RNG1_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_CAR_RESET_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_PMC_BASE, 0x40000, /* 256KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SCRATCH_BASE, 0x10000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_MMCRAB_BASE, 0x60000, /* 384KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_ARM_ACTMON_CTR_BASE, 0x20000, /* 128KB - ARM/Denver */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(TEGRA_SMMU0_BASE, 0x1000000, /* 64KB */
+ MT_DEVICE | MT_RW | MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return 31250000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA186_MAX_UART_PORTS 7
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra186_uart_addresses[TEGRA186_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+ TEGRA_UARTF_BASE,
+ TEGRA_UARTG_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+ if (id > TEGRA186_MAX_UART_PORTS)
+ return 0;
+
+ return tegra186_uart_addresses[id];
+}
+
+/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */
+#define TEGRA186_VER_A02P 0x1201
+
+/*******************************************************************************
+ * Handler for early platform setup
+ ******************************************************************************/
+void plat_early_platform_setup(void)
+{
+ int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+ uint32_t chip_subrev, val;
+
+ /* sanity check MCE firmware compatibility */
+ mce_verify_firmware_version();
+
+ /*
+ * Enable ECC and Parity Protection for Cortex-A57 CPUs
+ * for Tegra A02p SKUs
+ */
+ if (impl != DENVER_IMPL) {
+
+ /* get the major, minor and sub-version values */
+ chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) &
+ SUBREVISION_MASK;
+
+ /* prepare chip version number */
+ val = (tegra_get_chipid_major() << 12) |
+ (tegra_get_chipid_minor() << 8) |
+ chip_subrev;
+
+ /* enable L2 ECC for Tegra186 A02P and beyond */
+ if (val >= TEGRA186_VER_A02P) {
+
+ val = read_l2ctlr_el1();
+ val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
+ write_l2ctlr_el1(val);
+
+ /*
+ * Set the flag to enable ECC/Parity Protection
+ * when we exit System Suspend or Cluster Powerdn
+ */
+ tegra_enable_l2_ecc_parity_prot = 1;
+ }
+ }
+}
+
+/* Secure IRQs for Tegra186 */
+static const irq_sec_cfg_t tegra186_sec_irqs[] = {
+ {
+ TEGRA186_TOP_WDT_IRQ,
+ TEGRA186_SEC_IRQ_TARGET_MASK,
+ INTR_TYPE_EL3,
+ },
+ {
+ TEGRA186_AON_WDT_IRQ,
+ TEGRA186_SEC_IRQ_TARGET_MASK,
+ INTR_TYPE_EL3,
+ },
+};
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(tegra186_sec_irqs,
+ sizeof(tegra186_sec_irqs) / sizeof(tegra186_sec_irqs[0]));
+
+ /*
+ * Initialize the FIQ handler only if the platform supports any
+ * FIQ interrupt sources.
+ */
+ if (sizeof(tegra186_sec_irqs) > 0)
+ tegra_fiq_handler_setup();
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 params from previous bootloader
+ ******************************************************************************/
+bl31_params_t *plat_get_bl31_params(void)
+{
+ uint32_t val;
+
+ val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_LO);
+
+ return (bl31_params_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * Return pointer to the BL31 platform params from previous bootloader
+ ******************************************************************************/
+plat_params_from_bl2_t *plat_get_bl31_plat_params(void)
+{
+ uint32_t val;
+
+ val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_HI);
+
+ return (plat_params_from_bl2_t *)(uintptr_t)val;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cluster_id, cpu_id, pos;
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ /*
+ * Validate cluster_id by checking whether it represents
+ * one of the two clusters present on the platform.
+ */
+ if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+ return PSCI_E_NOT_PRESENT;
+
+ /*
+ * Validate cpu_id by checking whether it represents a CPU in
+ * one of the two clusters present on the platform.
+ */
+ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+ return PSCI_E_NOT_PRESENT;
+
+ /* calculate the core position */
+ pos = cpu_id + (cluster_id << 2);
+
+ /* check for non-existent CPUs */
+ if (pos == TEGRA186_CLUSTER0_CORE2 || pos == TEGRA186_CLUSTER0_CORE3)
+ return PSCI_E_NOT_PRESENT;
+
+ return pos;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
new file mode 100644
index 00000000..dfe1c7db
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <denver.h>
+#include <errno.h>
+#include <mce.h>
+#include <memctrl.h>
+#include <runtime_svc.h>
+#include <t18x_ari.h>
+#include <tegra_private.h>
+
+extern uint32_t tegra186_system_powerdn_state;
+
+/*******************************************************************************
+ * Offset to read the ref_clk counter value
+ ******************************************************************************/
+#define REF_CLK_OFFSET 4
+
+/*******************************************************************************
+ * Tegra186 SiP SMCs
+ ******************************************************************************/
+#define TEGRA_SIP_SYSTEM_SHUTDOWN_STATE 0xC2FFFE01
+#define TEGRA_SIP_GET_ACTMON_CLK_COUNTERS 0xC2FFFE02
+#define TEGRA_SIP_MCE_CMD_ENTER_CSTATE 0xC2FFFF00
+#define TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO 0xC2FFFF01
+#define TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME 0xC2FFFF02
+#define TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS 0xC2FFFF03
+#define TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS 0xC2FFFF04
+#define TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED 0xC2FFFF05
+#define TEGRA_SIP_MCE_CMD_ONLINE_CORE 0xC2FFFF06
+#define TEGRA_SIP_MCE_CMD_CC3_CTRL 0xC2FFFF07
+#define TEGRA_SIP_MCE_CMD_ECHO_DATA 0xC2FFFF08
+#define TEGRA_SIP_MCE_CMD_READ_VERSIONS 0xC2FFFF09
+#define TEGRA_SIP_MCE_CMD_ENUM_FEATURES 0xC2FFFF0A
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS 0xC2FFFF0B
+#define TEGRA_SIP_MCE_CMD_ENUM_READ_MCA 0xC2FFFF0C
+#define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA 0xC2FFFF0D
+#define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE 0xC2FFFF0E
+#define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE 0xC2FFFF0F
+#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC 0xC2FFFF10
+#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ 0xC2FFFF11
+#define TEGRA_SIP_MCE_CMD_MISC_CCPLEX 0xC2FFFF12
+
+/*******************************************************************************
+ * This function is responsible for handling all T186 SiP calls
+ ******************************************************************************/
+int plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ int mce_ret;
+ int impl, cpu;
+ uint32_t base, core_clk_ctr, ref_clk_ctr;
+
+ if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+ /* 32-bit function, clear top parameter bits */
+
+ x1 = (uint32_t)x1;
+ x2 = (uint32_t)x2;
+ x3 = (uint32_t)x3;
+ }
+
+ /*
+ * Convert SMC FID to SMC64, to support SMC32/SMC64 configurations
+ */
+ smc_fid |= (SMC_64 << FUNCID_CC_SHIFT);
+
+ switch (smc_fid) {
+ /*
+ * Micro Coded Engine (MCE) commands reside in the 0x82FFFF00 -
+ * 0x82FFFFFF SiP SMC space
+ */
+ case TEGRA_SIP_MCE_CMD_ENTER_CSTATE:
+ case TEGRA_SIP_MCE_CMD_UPDATE_CSTATE_INFO:
+ case TEGRA_SIP_MCE_CMD_UPDATE_CROSSOVER_TIME:
+ case TEGRA_SIP_MCE_CMD_READ_CSTATE_STATS:
+ case TEGRA_SIP_MCE_CMD_WRITE_CSTATE_STATS:
+ case TEGRA_SIP_MCE_CMD_IS_SC7_ALLOWED:
+ case TEGRA_SIP_MCE_CMD_CC3_CTRL:
+ case TEGRA_SIP_MCE_CMD_ECHO_DATA:
+ case TEGRA_SIP_MCE_CMD_READ_VERSIONS:
+ case TEGRA_SIP_MCE_CMD_ENUM_FEATURES:
+ case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
+ case TEGRA_SIP_MCE_CMD_ENUM_READ_MCA:
+ case TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA:
+ case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE:
+ case TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE:
+ case TEGRA_SIP_MCE_CMD_ENABLE_LATIC:
+ case TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ:
+ case TEGRA_SIP_MCE_CMD_MISC_CCPLEX:
+
+ /* clean up the high bits */
+ smc_fid &= MCE_CMD_MASK;
+
+ /* execute the command and store the result */
+ mce_ret = mce_command_handler(smc_fid, x1, x2, x3);
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X0,
+ (uint64_t)mce_ret);
+
+ return 0;
+
+ case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE:
+
+ /* clean up the high bits */
+ x1 = (uint32_t)x1;
+
+ /*
+ * SC8 is a special Tegra186 system state where the CPUs and
+ * DRAM are powered down but the other subsystem is still
+ * alive.
+ */
+ if ((x1 == TEGRA_ARI_SYSTEM_SC8) ||
+ (x1 == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF)) {
+
+ tegra186_system_powerdn_state = x1;
+ flush_dcache_range(
+ (uintptr_t)&tegra186_system_powerdn_state,
+ sizeof(tegra186_system_powerdn_state));
+
+ } else {
+
+ ERROR("%s: unhandled powerdn state (%d)\n", __func__,
+ (uint32_t)x1);
+ return -ENOTSUP;
+ }
+
+ return 0;
+
+ /*
+ * This function ID reads the Activity monitor's core/ref clock
+ * counter values for a core/cluster.
+ *
+ * x1 = MPIDR of the target core
+ * x2 = MIDR of the target core
+ */
+ case TEGRA_SIP_GET_ACTMON_CLK_COUNTERS:
+
+ cpu = (uint32_t)x1 & MPIDR_CPU_MASK;
+ impl = ((uint32_t)x2 >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
+
+ /* sanity check target CPU number */
+ if (cpu > PLATFORM_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+ /* get the base address for the current CPU */
+ base = (impl == DENVER_IMPL) ? TEGRA_DENVER_ACTMON_CTR_BASE :
+ TEGRA_ARM_ACTMON_CTR_BASE;
+
+ /* read the clock counter values */
+ core_clk_ctr = mmio_read_32(base + (8 * cpu));
+ ref_clk_ctr = mmio_read_32(base + (8 * cpu) + REF_CLK_OFFSET);
+
+ /* return the counter values as two different parameters */
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1,
+ (uint64_t)core_clk_ctr);
+ write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X2,
+ (uint64_t)ref_clk_ctr);
+
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -ENOTSUP;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_smmu.c b/plat/nvidia/tegra/soc/t186/plat_smmu.c
new file mode 100644
index 00000000..4a8e1bee
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_smmu.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <bl_common.h>
+#include <smmu.h>
+#include <tegra_def.h>
+
+/*******************************************************************************
+ * Array to hold SMMU context for Tegra186
+ ******************************************************************************/
+static __attribute__((aligned(16))) smmu_regs_t tegra186_smmu_context[] = {
+ _START_OF_TABLE_,
+ mc_make_sid_security_cfg(SCEW),
+ mc_make_sid_security_cfg(AFIR),
+ mc_make_sid_security_cfg(NVDISPLAYR1),
+ mc_make_sid_security_cfg(XUSB_DEVR),
+ mc_make_sid_security_cfg(VICSRD1),
+ mc_make_sid_security_cfg(NVENCSWR),
+ mc_make_sid_security_cfg(TSECSRDB),
+ mc_make_sid_security_cfg(AXISW),
+ mc_make_sid_security_cfg(SDMMCWAB),
+ mc_make_sid_security_cfg(AONDMAW),
+ mc_make_sid_security_cfg(GPUSWR2),
+ mc_make_sid_security_cfg(SATAW),
+ mc_make_sid_security_cfg(UFSHCW),
+ mc_make_sid_security_cfg(AFIW),
+ mc_make_sid_security_cfg(SDMMCR),
+ mc_make_sid_security_cfg(SCEDMAW),
+ mc_make_sid_security_cfg(UFSHCR),
+ mc_make_sid_security_cfg(SDMMCWAA),
+ mc_make_sid_security_cfg(APEDMAW),
+ mc_make_sid_security_cfg(SESWR),
+ mc_make_sid_security_cfg(MPCORER),
+ mc_make_sid_security_cfg(PTCR),
+ mc_make_sid_security_cfg(BPMPW),
+ mc_make_sid_security_cfg(ETRW),
+ mc_make_sid_security_cfg(GPUSRD),
+ mc_make_sid_security_cfg(VICSWR),
+ mc_make_sid_security_cfg(SCEDMAR),
+ mc_make_sid_security_cfg(HDAW),
+ mc_make_sid_security_cfg(ISPWA),
+ mc_make_sid_security_cfg(EQOSW),
+ mc_make_sid_security_cfg(XUSB_HOSTW),
+ mc_make_sid_security_cfg(TSECSWR),
+ mc_make_sid_security_cfg(SDMMCRAA),
+ mc_make_sid_security_cfg(APER),
+ mc_make_sid_security_cfg(VIW),
+ mc_make_sid_security_cfg(APEW),
+ mc_make_sid_security_cfg(AXISR),
+ mc_make_sid_security_cfg(SDMMCW),
+ mc_make_sid_security_cfg(BPMPDMAW),
+ mc_make_sid_security_cfg(ISPRA),
+ mc_make_sid_security_cfg(NVDECSWR),
+ mc_make_sid_security_cfg(XUSB_DEVW),
+ mc_make_sid_security_cfg(NVDECSRD),
+ mc_make_sid_security_cfg(MPCOREW),
+ mc_make_sid_security_cfg(NVDISPLAYR),
+ mc_make_sid_security_cfg(BPMPDMAR),
+ mc_make_sid_security_cfg(NVJPGSWR),
+ mc_make_sid_security_cfg(NVDECSRD1),
+ mc_make_sid_security_cfg(TSECSRD),
+ mc_make_sid_security_cfg(NVJPGSRD),
+ mc_make_sid_security_cfg(SDMMCWA),
+ mc_make_sid_security_cfg(SCER),
+ mc_make_sid_security_cfg(XUSB_HOSTR),
+ mc_make_sid_security_cfg(VICSRD),
+ mc_make_sid_security_cfg(AONDMAR),
+ mc_make_sid_security_cfg(AONW),
+ mc_make_sid_security_cfg(SDMMCRA),
+ mc_make_sid_security_cfg(HOST1XDMAR),
+ mc_make_sid_security_cfg(EQOSR),
+ mc_make_sid_security_cfg(SATAR),
+ mc_make_sid_security_cfg(BPMPR),
+ mc_make_sid_security_cfg(HDAR),
+ mc_make_sid_security_cfg(SDMMCRAB),
+ mc_make_sid_security_cfg(ETRR),
+ mc_make_sid_security_cfg(AONR),
+ mc_make_sid_security_cfg(APEDMAR),
+ mc_make_sid_security_cfg(SESRD),
+ mc_make_sid_security_cfg(NVENCSRD),
+ mc_make_sid_security_cfg(GPUSWR),
+ mc_make_sid_security_cfg(TSECSWRB),
+ mc_make_sid_security_cfg(ISPWB),
+ mc_make_sid_security_cfg(GPUSRD2),
+ mc_make_sid_override_cfg(APER),
+ mc_make_sid_override_cfg(VICSRD),
+ mc_make_sid_override_cfg(NVENCSRD),
+ mc_make_sid_override_cfg(NVJPGSWR),
+ mc_make_sid_override_cfg(AONW),
+ mc_make_sid_override_cfg(BPMPR),
+ mc_make_sid_override_cfg(BPMPW),
+ mc_make_sid_override_cfg(HDAW),
+ mc_make_sid_override_cfg(NVDISPLAYR1),
+ mc_make_sid_override_cfg(APEDMAR),
+ mc_make_sid_override_cfg(AFIR),
+ mc_make_sid_override_cfg(AXISR),
+ mc_make_sid_override_cfg(VICSRD1),
+ mc_make_sid_override_cfg(TSECSRD),
+ mc_make_sid_override_cfg(BPMPDMAW),
+ mc_make_sid_override_cfg(MPCOREW),
+ mc_make_sid_override_cfg(XUSB_HOSTR),
+ mc_make_sid_override_cfg(GPUSWR),
+ mc_make_sid_override_cfg(XUSB_DEVR),
+ mc_make_sid_override_cfg(UFSHCW),
+ mc_make_sid_override_cfg(XUSB_HOSTW),
+ mc_make_sid_override_cfg(SDMMCWAB),
+ mc_make_sid_override_cfg(SATAW),
+ mc_make_sid_override_cfg(SCEDMAR),
+ mc_make_sid_override_cfg(HOST1XDMAR),
+ mc_make_sid_override_cfg(SDMMCWA),
+ mc_make_sid_override_cfg(APEDMAW),
+ mc_make_sid_override_cfg(SESWR),
+ mc_make_sid_override_cfg(AXISW),
+ mc_make_sid_override_cfg(AONDMAW),
+ mc_make_sid_override_cfg(TSECSWRB),
+ mc_make_sid_override_cfg(MPCORER),
+ mc_make_sid_override_cfg(ISPWB),
+ mc_make_sid_override_cfg(AONR),
+ mc_make_sid_override_cfg(BPMPDMAR),
+ mc_make_sid_override_cfg(HDAR),
+ mc_make_sid_override_cfg(SDMMCRA),
+ mc_make_sid_override_cfg(ETRW),
+ mc_make_sid_override_cfg(GPUSWR2),
+ mc_make_sid_override_cfg(EQOSR),
+ mc_make_sid_override_cfg(TSECSWR),
+ mc_make_sid_override_cfg(ETRR),
+ mc_make_sid_override_cfg(NVDECSRD),
+ mc_make_sid_override_cfg(TSECSRDB),
+ mc_make_sid_override_cfg(SDMMCRAA),
+ mc_make_sid_override_cfg(NVDECSRD1),
+ mc_make_sid_override_cfg(SDMMCR),
+ mc_make_sid_override_cfg(NVJPGSRD),
+ mc_make_sid_override_cfg(SCEDMAW),
+ mc_make_sid_override_cfg(SDMMCWAA),
+ mc_make_sid_override_cfg(APEW),
+ mc_make_sid_override_cfg(AONDMAR),
+ mc_make_sid_override_cfg(PTCR),
+ mc_make_sid_override_cfg(SCER),
+ mc_make_sid_override_cfg(ISPRA),
+ mc_make_sid_override_cfg(ISPWA),
+ mc_make_sid_override_cfg(VICSWR),
+ mc_make_sid_override_cfg(SESRD),
+ mc_make_sid_override_cfg(SDMMCW),
+ mc_make_sid_override_cfg(SDMMCRAB),
+ mc_make_sid_override_cfg(EQOSW),
+ mc_make_sid_override_cfg(GPUSRD2),
+ mc_make_sid_override_cfg(SCEW),
+ mc_make_sid_override_cfg(GPUSRD),
+ mc_make_sid_override_cfg(NVDECSWR),
+ mc_make_sid_override_cfg(XUSB_DEVW),
+ mc_make_sid_override_cfg(SATAR),
+ mc_make_sid_override_cfg(NVDISPLAYR),
+ mc_make_sid_override_cfg(VIW),
+ mc_make_sid_override_cfg(UFSHCR),
+ mc_make_sid_override_cfg(NVENCSWR),
+ mc_make_sid_override_cfg(AFIW),
+ smmu_make_gnsr0_nsec_cfg(CR0),
+ smmu_make_gnsr0_sec_cfg(IDR0),
+ smmu_make_gnsr0_sec_cfg(IDR1),
+ smmu_make_gnsr0_sec_cfg(IDR2),
+ smmu_make_gnsr0_nsec_cfg(GFSR),
+ smmu_make_gnsr0_nsec_cfg(GFSYNR0),
+ smmu_make_gnsr0_nsec_cfg(GFSYNR1),
+ smmu_make_gnsr0_nsec_cfg(TLBGSTATUS),
+ smmu_make_gnsr0_nsec_cfg(PIDR2),
+ smmu_make_smrg_group(0),
+ smmu_make_smrg_group(1),
+ smmu_make_smrg_group(2),
+ smmu_make_smrg_group(3),
+ smmu_make_smrg_group(4),
+ smmu_make_smrg_group(5),
+ smmu_make_smrg_group(6),
+ smmu_make_smrg_group(7),
+ smmu_make_smrg_group(8),
+ smmu_make_smrg_group(9),
+ smmu_make_smrg_group(10),
+ smmu_make_smrg_group(11),
+ smmu_make_smrg_group(12),
+ smmu_make_smrg_group(13),
+ smmu_make_smrg_group(14),
+ smmu_make_smrg_group(15),
+ smmu_make_smrg_group(16),
+ smmu_make_smrg_group(17),
+ smmu_make_smrg_group(18),
+ smmu_make_smrg_group(19),
+ smmu_make_smrg_group(20),
+ smmu_make_smrg_group(21),
+ smmu_make_smrg_group(22),
+ smmu_make_smrg_group(23),
+ smmu_make_smrg_group(24),
+ smmu_make_smrg_group(25),
+ smmu_make_smrg_group(26),
+ smmu_make_smrg_group(27),
+ smmu_make_smrg_group(28),
+ smmu_make_smrg_group(29),
+ smmu_make_smrg_group(30),
+ smmu_make_smrg_group(31),
+ smmu_make_smrg_group(32),
+ smmu_make_smrg_group(33),
+ smmu_make_smrg_group(34),
+ smmu_make_smrg_group(35),
+ smmu_make_smrg_group(36),
+ smmu_make_smrg_group(37),
+ smmu_make_smrg_group(38),
+ smmu_make_smrg_group(39),
+ smmu_make_smrg_group(40),
+ smmu_make_smrg_group(41),
+ smmu_make_smrg_group(42),
+ smmu_make_smrg_group(43),
+ smmu_make_smrg_group(44),
+ smmu_make_smrg_group(45),
+ smmu_make_smrg_group(46),
+ smmu_make_smrg_group(47),
+ smmu_make_smrg_group(48),
+ smmu_make_smrg_group(49),
+ smmu_make_smrg_group(50),
+ smmu_make_smrg_group(51),
+ smmu_make_smrg_group(52),
+ smmu_make_smrg_group(53),
+ smmu_make_smrg_group(54),
+ smmu_make_smrg_group(55),
+ smmu_make_smrg_group(56),
+ smmu_make_smrg_group(57),
+ smmu_make_smrg_group(58),
+ smmu_make_smrg_group(59),
+ smmu_make_smrg_group(60),
+ smmu_make_smrg_group(61),
+ smmu_make_smrg_group(62),
+ smmu_make_smrg_group(63),
+ smmu_make_cb_group(0),
+ smmu_make_cb_group(1),
+ smmu_make_cb_group(2),
+ smmu_make_cb_group(3),
+ smmu_make_cb_group(4),
+ smmu_make_cb_group(5),
+ smmu_make_cb_group(6),
+ smmu_make_cb_group(7),
+ smmu_make_cb_group(8),
+ smmu_make_cb_group(9),
+ smmu_make_cb_group(10),
+ smmu_make_cb_group(11),
+ smmu_make_cb_group(12),
+ smmu_make_cb_group(13),
+ smmu_make_cb_group(14),
+ smmu_make_cb_group(15),
+ smmu_make_cb_group(16),
+ smmu_make_cb_group(17),
+ smmu_make_cb_group(18),
+ smmu_make_cb_group(19),
+ smmu_make_cb_group(20),
+ smmu_make_cb_group(21),
+ smmu_make_cb_group(22),
+ smmu_make_cb_group(23),
+ smmu_make_cb_group(24),
+ smmu_make_cb_group(25),
+ smmu_make_cb_group(26),
+ smmu_make_cb_group(27),
+ smmu_make_cb_group(28),
+ smmu_make_cb_group(29),
+ smmu_make_cb_group(30),
+ smmu_make_cb_group(31),
+ smmu_make_cb_group(32),
+ smmu_make_cb_group(33),
+ smmu_make_cb_group(34),
+ smmu_make_cb_group(35),
+ smmu_make_cb_group(36),
+ smmu_make_cb_group(37),
+ smmu_make_cb_group(38),
+ smmu_make_cb_group(39),
+ smmu_make_cb_group(40),
+ smmu_make_cb_group(41),
+ smmu_make_cb_group(42),
+ smmu_make_cb_group(43),
+ smmu_make_cb_group(44),
+ smmu_make_cb_group(45),
+ smmu_make_cb_group(46),
+ smmu_make_cb_group(47),
+ smmu_make_cb_group(48),
+ smmu_make_cb_group(49),
+ smmu_make_cb_group(50),
+ smmu_make_cb_group(51),
+ smmu_make_cb_group(52),
+ smmu_make_cb_group(53),
+ smmu_make_cb_group(54),
+ smmu_make_cb_group(55),
+ smmu_make_cb_group(56),
+ smmu_make_cb_group(57),
+ smmu_make_cb_group(58),
+ smmu_make_cb_group(59),
+ smmu_make_cb_group(60),
+ smmu_make_cb_group(61),
+ smmu_make_cb_group(62),
+ smmu_make_cb_group(63),
+ smmu_bypass_cfg, /* TBU settings */
+ _END_OF_TABLE_,
+};
+
+/*******************************************************************************
+ * Handler to return the pointer to the SMMU's context struct
+ ******************************************************************************/
+smmu_regs_t *plat_get_smmu_ctx(void)
+{
+ /* index of _END_OF_TABLE_ */
+ tegra186_smmu_context[0].val = ARRAY_SIZE(tegra186_smmu_context) - 1;
+
+ return tegra186_smmu_context;
+}
diff --git a/plat/nvidia/tegra/soc/t186/plat_trampoline.S b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
new file mode 100644
index 00000000..6a17c332
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/plat_trampoline.S
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common_def.h>
+#include <memctrl_v2.h>
+#include <tegra_def.h>
+
+#define TEGRA186_SMMU_CTX_SIZE 0x420
+
+ .globl tegra186_cpu_reset_handler
+
+/* CPU reset handler routine */
+func tegra186_cpu_reset_handler _align=4
+ /*
+ * The TZRAM loses state during System Suspend. We use this
+ * information to decide if the reset handler is running after a
+ * System Suspend. Resume from system suspend requires restoring
+ * the entire state from TZDRAM to TZRAM.
+ */
+ mov x0, #BL31_BASE
+ ldr x0, [x0]
+ cbnz x0, boot_cpu
+
+ /* resume from system suspend */
+ mov x0, #BL31_BASE
+ adr x1, __tegra186_cpu_reset_handler_end
+ adr x2, __tegra186_cpu_reset_handler_data
+ ldr x2, [x2, #8]
+
+ /* memcpy16 */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+ /* copy byte per byte */
+m_loop1:
+ cbz x2, boot_cpu
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+
+boot_cpu:
+ adr x0, __tegra186_cpu_reset_handler_data
+ ldr x0, [x0]
+ br x0
+endfunc tegra186_cpu_reset_handler
+
+ /*
+ * Tegra186 reset data (offset 0x0 - 0x430)
+ *
+ * 0x000: secure world's entrypoint
+ * 0x008: BL31 size (RO + RW)
+ * 0x00C: SMMU context start
+ * 0x42C: SMMU context end
+ */
+
+ .align 4
+ .type __tegra186_cpu_reset_handler_data, %object
+ .globl __tegra186_cpu_reset_handler_data
+__tegra186_cpu_reset_handler_data:
+ .quad tegra_secure_entrypoint
+ .quad __BL31_END__ - BL31_BASE
+ .globl __tegra186_smmu_context
+__tegra186_smmu_context:
+ .rept TEGRA186_SMMU_CTX_SIZE
+ .quad 0
+ .endr
+ .size __tegra186_cpu_reset_handler_data, \
+ . - __tegra186_cpu_reset_handler_data
+
+ .align 4
+ .globl __tegra186_cpu_reset_handler_end
+__tegra186_cpu_reset_handler_end:
diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk
new file mode 100644
index 00000000..c9053238
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk
@@ -0,0 +1,66 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# platform configs
+ENABLE_AFI_DEVICE := 1
+$(eval $(call add_define,ENABLE_AFI_DEVICE))
+
+ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS := 1
+$(eval $(call add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS))
+
+RELOCATE_TO_BL31_BASE := 1
+$(eval $(call add_define,RELOCATE_TO_BL31_BASE))
+
+ENABLE_CHIP_VERIFICATION_HARNESS := 0
+$(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS))
+
+ENABLE_SMMU_DEVICE := 1
+$(eval $(call add_define,ENABLE_SMMU_DEVICE))
+
+NUM_SMMU_DEVICES := 1
+$(eval $(call add_define,NUM_SMMU_DEVICES))
+
+RESET_TO_BL31 := 1
+
+PROGRAMMABLE_RESET_ADDRESS := 1
+
+COLD_BOOT_SINGLE_CPU := 1
+
+# platform settings
+TZDRAM_BASE := 0x30000000
+$(eval $(call add_define,TZDRAM_BASE))
+
+PLATFORM_CLUSTER_COUNT := 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER := 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES := 24
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 24
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+# platform files
+PLAT_INCLUDES += -I${SOC_DIR}/drivers/include
+
+BL31_SOURCES += lib/cpus/aarch64/denver.S \
+ lib/cpus/aarch64/cortex_a57.S \
+ ${COMMON_DIR}/drivers/memctrl/memctrl_v2.c \
+ ${COMMON_DIR}/drivers/smmu/smmu.c \
+ ${SOC_DIR}/drivers/mce/mce.c \
+ ${SOC_DIR}/drivers/mce/ari.c \
+ ${SOC_DIR}/drivers/mce/nvg.c \
+ ${SOC_DIR}/drivers/mce/aarch64/nvg_helpers.S \
+ ${SOC_DIR}/plat_memctrl.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/plat_secondary.c \
+ ${SOC_DIR}/plat_sip_calls.c \
+ ${SOC_DIR}/plat_smmu.c \
+ ${SOC_DIR}/plat_trampoline.S
+
diff --git a/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
new file mode 100644
index 00000000..f77746ca
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_psci_handlers.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <flowctrl.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <pmc.h>
+#include <psci.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+
+/*
+ * Register used to clear CPU reset signals. Each CPU has two reset
+ * signals: CPU reset (3:0) and Core reset (19:16).
+ */
+#define CPU_CMPLX_RESET_CLR 0x454
+#define CPU_CORE_RESET_MASK 0x10001
+
+/* Clock and Reset controller registers for system clock's settings */
+#define SCLK_RATE 0x30
+#define SCLK_BURST_POLICY 0x28
+#define SCLK_BURST_POLICY_DEFAULT 0x10000000
+
+static int cpu_powergate_mask[PLATFORM_MAX_CPUS_PER_CLUSTER];
+
+int32_t tegra_soc_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int state_id = psci_get_pstate_id(power_state);
+
+ /* Sanity check the requested state id */
+ switch (state_id) {
+ case PSTATE_ID_CORE_POWERDN:
+ /*
+ * Core powerdown request only for afflvl 0
+ */
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id & 0xff;
+
+ break;
+
+ case PSTATE_ID_CLUSTER_IDLE:
+ case PSTATE_ID_CLUSTER_POWERDN:
+ /*
+ * Cluster powerdown/idle request only for afflvl 1
+ */
+ req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
+
+ break;
+
+ case PSTATE_ID_SOC_POWERDN:
+ /*
+ * System powerdown request only for afflvl 2
+ */
+ for (uint32_t i = MPIDR_AFFLVL0; i < PLAT_MAX_PWR_LVL; i++)
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+
+ req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] =
+ PLAT_SYS_SUSPEND_STATE_ID;
+
+ break;
+
+ default:
+ ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Platform handler to calculate the proper target power level at the
+ * specified affinity level
+ ******************************************************************************/
+plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl,
+ const plat_local_state_t *states,
+ unsigned int ncpu)
+{
+ plat_local_state_t target = *states;
+ int cpu = plat_my_core_pos();
+ int core_pos = read_mpidr() & MPIDR_CPU_MASK;
+
+ /* get the power state at this level */
+ if (lvl == MPIDR_AFFLVL1)
+ target = *(states + core_pos);
+ if (lvl == MPIDR_AFFLVL2)
+ target = *(states + cpu);
+
+ /* Cluster idle/power-down */
+ if ((lvl == MPIDR_AFFLVL1) && ((target == PSTATE_ID_CLUSTER_IDLE) ||
+ (target == PSTATE_ID_CLUSTER_POWERDN))) {
+ return target;
+ }
+
+ /* System Suspend */
+ if (((lvl == MPIDR_AFFLVL2) || (lvl == MPIDR_AFFLVL1)) &&
+ (target == PSTATE_ID_SOC_POWERDN))
+ return PSTATE_ID_SOC_POWERDN;
+
+ /* default state */
+ return PSCI_LOCAL_STATE_RUN;
+}
+
+int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ u_register_t mpidr = read_mpidr();
+ const plat_local_state_t *pwr_domain_state =
+ target_state->pwr_domain_state;
+ unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
+ unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
+ unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
+
+ if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
+
+ assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
+ (stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
+ assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
+ (stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
+
+ /* suspend the entire soc */
+ tegra_fc_soc_powerdn(mpidr);
+
+ } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
+
+ assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_IDLE);
+
+ /* Prepare for cluster idle */
+ tegra_fc_cluster_idle(mpidr);
+
+ } else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_POWERDN) {
+
+ assert(stateid_afflvl0 == PSTATE_ID_CLUSTER_POWERDN);
+
+ /* Prepare for cluster powerdn */
+ tegra_fc_cluster_powerdn(mpidr);
+
+ } else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) {
+
+ /* Prepare for cpu powerdn */
+ tegra_fc_cpu_powerdn(mpidr);
+
+ } else {
+ ERROR("%s: Unknown state id\n", __func__);
+ return PSCI_E_NOT_SUPPORTED;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ uint32_t val;
+
+ /*
+ * Check if we are exiting from SOC_POWERDN.
+ */
+ if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
+ PLAT_SYS_SUSPEND_STATE_ID) {
+
+ /*
+ * Lock scratch registers which hold the CPU vectors
+ */
+ tegra_pmc_lock_cpu_vectors();
+
+ /*
+ * Enable WRAP to INCR burst type conversions for
+ * incoming requests on the AXI slave ports.
+ */
+ val = mmio_read_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG);
+ val &= ~ENABLE_UNSUP_TX_ERRORS;
+ val |= ENABLE_WRAP_TO_INCR_BURSTS;
+ mmio_write_32(TEGRA_MSELECT_BASE + MSELECT_CONFIG, val);
+
+ /*
+ * Restore Boot and Power Management Processor (BPMP) reset
+ * address and reset it.
+ */
+ tegra_fc_reset_bpmp();
+ }
+
+ /*
+ * T210 has a dedicated ARMv7 boot and power mgmt processor, BPMP. It's
+ * used for power management and boot purposes. Inform the BPMP that
+ * we have completed the cluster power up.
+ */
+ tegra_fc_lock_active_cluster();
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_on(u_register_t mpidr)
+{
+ int cpu = mpidr & MPIDR_CPU_MASK;
+ uint32_t mask = CPU_CORE_RESET_MASK << cpu;
+
+ /* Deassert CPU reset signals */
+ mmio_write_32(TEGRA_CAR_RESET_BASE + CPU_CMPLX_RESET_CLR, mask);
+
+ /* Turn on CPU using flow controller or PMC */
+ if (cpu_powergate_mask[cpu] == 0) {
+ tegra_pmc_cpu_on(cpu);
+ cpu_powergate_mask[cpu] = 1;
+ } else {
+ tegra_fc_cpu_on(cpu);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ tegra_fc_cpu_off(read_mpidr() & MPIDR_CPU_MASK);
+ return PSCI_E_SUCCESS;
+}
+
+int tegra_soc_prepare_system_reset(void)
+{
+ /*
+ * Set System Clock (SCLK) to POR default so that the clock source
+ * for the PMC APB clock would not be changed due to system reset.
+ */
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
+ SCLK_BURST_POLICY_DEFAULT);
+ mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
+
+ /* Wait 1 ms to make sure clock source/device logic is stabilized. */
+ mdelay(1);
+
+ return PSCI_E_SUCCESS;
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_secondary.c b/plat/nvidia/tegra/soc/t210/plat_secondary.c
new file mode 100644
index 00000000..ecb258b9
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_secondary.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <mmio.h>
+#include <pmc.h>
+#include <tegra_def.h>
+
+#define SB_CSR 0x0
+#define SB_CSR_NS_RST_VEC_WR_DIS (1 << 1)
+
+/* CPU reset vector */
+#define SB_AA64_RESET_LOW 0x30 /* width = 31:0 */
+#define SB_AA64_RESET_HI 0x34 /* width = 11:0 */
+
+extern void tegra_secure_entrypoint(void);
+
+/*******************************************************************************
+ * Setup secondary CPU vectors
+ ******************************************************************************/
+void plat_secondary_setup(void)
+{
+ uint32_t val;
+ uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint;
+
+ INFO("Setting up secondary CPU boot\n");
+
+ /* setup secondary CPU vector */
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_LOW,
+ (reset_addr & 0xFFFFFFFF) | 1);
+ val = reset_addr >> 32;
+ mmio_write_32(TEGRA_SB_BASE + SB_AA64_RESET_HI, val & 0x7FF);
+
+ /* configure PMC */
+ tegra_pmc_cpu_setup(reset_addr);
+ tegra_pmc_lock_cpu_vectors();
+}
diff --git a/plat/nvidia/tegra/soc/t210/plat_setup.c b/plat/nvidia/tegra/soc/t210/plat_setup.c
new file mode 100644
index 00000000..b058bed4
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/plat_setup.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <console.h>
+#include <tegra_def.h>
+#include <tegra_private.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * The Tegra power domain tree has a single system level power domain i.e. a
+ * single root node. The first entry in the power domain descriptor specifies
+ * the number of power domains at the highest power level.
+ *******************************************************************************
+ */
+const unsigned char tegra_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ 1,
+ /* No of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of CPU cores - cluster0 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER,
+ /* No of CPU cores - cluster1 */
+ PLATFORM_MAX_CPUS_PER_CLUSTER
+};
+
+/* sets of MMIO ranges setup */
+#define MMIO_RANGE_0_ADDR 0x50000000
+#define MMIO_RANGE_1_ADDR 0x60000000
+#define MMIO_RANGE_2_ADDR 0x70000000
+#define MMIO_RANGE_SIZE 0x200000
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t tegra_mmap[] = {
+ MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ {0}
+};
+
+/*******************************************************************************
+ * Set up the pagetables as per the platform memory map & initialize the MMU
+ ******************************************************************************/
+const mmap_region_t *plat_get_mmio_map(void)
+{
+ /* MMIO space */
+ return tegra_mmap;
+}
+
+/*******************************************************************************
+ * Handler to get the System Counter Frequency
+ ******************************************************************************/
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return 19200000;
+}
+
+/*******************************************************************************
+ * Maximum supported UART controllers
+ ******************************************************************************/
+#define TEGRA210_MAX_UART_PORTS 5
+
+/*******************************************************************************
+ * This variable holds the UART port base addresses
+ ******************************************************************************/
+static uint32_t tegra210_uart_addresses[TEGRA210_MAX_UART_PORTS + 1] = {
+ 0, /* undefined - treated as an error case */
+ TEGRA_UARTA_BASE,
+ TEGRA_UARTB_BASE,
+ TEGRA_UARTC_BASE,
+ TEGRA_UARTD_BASE,
+ TEGRA_UARTE_BASE,
+};
+
+/*******************************************************************************
+ * Retrieve the UART controller base to be used as the console
+ ******************************************************************************/
+uint32_t plat_get_console_from_id(int id)
+{
+ if (id > TEGRA210_MAX_UART_PORTS)
+ return 0;
+
+ return tegra210_uart_addresses[id];
+}
+
+/*******************************************************************************
+ * Initialize the GIC and SGIs
+ ******************************************************************************/
+void plat_gic_setup(void)
+{
+ tegra_gic_setup(NULL, 0);
+}
diff --git a/plat/nvidia/tegra/soc/t210/platform_t210.mk b/plat/nvidia/tegra/soc/t210/platform_t210.mk
new file mode 100644
index 00000000..97ca3f1d
--- /dev/null
+++ b/plat/nvidia/tegra/soc/t210/platform_t210.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TZDRAM_BASE := 0xFF800000
+$(eval $(call add_define,TZDRAM_BASE))
+
+ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT := 1
+$(eval $(call add_define,ERRATA_TEGRA_INVALIDATE_BTB_AT_BOOT))
+
+PLATFORM_CLUSTER_COUNT := 2
+$(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
+
+PLATFORM_MAX_CPUS_PER_CLUSTER := 4
+$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
+
+MAX_XLAT_TABLES := 4
+$(eval $(call add_define,MAX_XLAT_TABLES))
+
+MAX_MMAP_REGIONS := 8
+$(eval $(call add_define,MAX_MMAP_REGIONS))
+
+BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
+ lib/cpus/aarch64/cortex_a57.S \
+ ${COMMON_DIR}/drivers/flowctrl/flowctrl.c \
+ ${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \
+ ${SOC_DIR}/plat_psci_handlers.c \
+ ${SOC_DIR}/plat_setup.c \
+ ${SOC_DIR}/plat_secondary.c
+
+# Enable workarounds for selected Cortex-A57 erratas.
+A57_DISABLE_NON_TEMPORAL_HINT := 1
+ERRATA_A57_826974 := 1
+ERRATA_A57_826977 := 1
+ERRATA_A57_828024 := 1
+ERRATA_A57_829520 := 1
+ERRATA_A57_833471 := 1
+
+# Enable workarounds for selected Cortex-A53 erratas.
+A53_DISABLE_NON_TEMPORAL_HINT := 1
+ERRATA_A53_826319 := 1
+ERRATA_A53_836870 := 1
+ERRATA_A53_855873 := 1