diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000000..855c9f8ec83
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+*.swp
+cscope.*
+out
+.cproject
+.project
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000000..fe903ec064a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Unless it has its own copyright/license embedded in its body, each source file
+is subject to the following license terms:
+
+Copyright (c) 2014, STMicroelectronics International N.V.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000000..6b3358a9aa0
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,41 @@
+SHELL = /bin/bash
+
+.PHONY: all
+all:
+
+# Make these default for now
+ARCH ?= arm32
+PLATFORM ?= orly2
+O ?= out/$(ARCH)-plat-$(PLATFORM)
+
+arch_$(ARCH) := y
+
+cmd-fixdep := ./scripts/fixdep
+
+ifneq ($O,)
+out-dir := $O/
+endif
+
+ifneq ($V,1)
+q := @
+cmd-echo := true
+else
+q :=
+cmd-echo := echo
+endif
+
+include core/core.mk
+
+include ta/ta.mk
+
+.PHONY: clean
+clean:
+ @echo Cleaning
+ ${q}rm -f $(cleanfiles)
+
+.PHONY: cscope
+cscope:
+ @echo Creating cscope database
+ ${q}rm -f cscope.*
+ ${q}find $(PWD) -name "*.[chSs]" > cscope.files
+ ${q}cscope -b -q -k
diff --git a/Notice.md b/Notice.md
new file mode 100755
index 00000000000..6293a23b83b
--- /dev/null
+++ b/Notice.md
@@ -0,0 +1,35 @@
+OP-TEE
+=======
+
+This is the repository of OP-TEE (Open Portable Trusted Execution Environment), the open-source TEE maintained by STMicroelectronics, with initial contributions from STMicroelectronics, Ericsson, the Linaro industry association.
+
+What OP-TEE is
+------
+
+OP-TEE is designed primarily to rely on the ARM TrustZone(R) technology as the underlying hardware isolation mechanism. However, it has been structured to be compatible with any isolation technology suitable for the TEE concept and goals, such as running as a virtual machine or on a dedicated CPU.
+
+The main design goals for OP-TEE are:
+- Isolation - the TEE provides isolation from the Rich OS (typically, Linux/Android) and it protects the Trusted Applications (TAs) it executes from each other, using underlying HW support,
+- Small footprint - the TEE should remain small enough so that the TEE core, including all the code and data required to provide isolation, can reside in a reasonable amount of on-chip memory,
+- Portability - the TEE must be easily pluggable to different architectures and available HW, and it has to support various setups such as multiple TEEs or multiple client OSes.
+
+Repository structure
+------
+
+OP-TEE is composed of three gits:
+- The optee-client git, containing the source code for the TEE client library in Linux. This component provides the TEE Client API as defined by the GlobalPlatform TEE standard. It is distributed under the BSD 2-clause open-source license.
+- The optee_os git, containing the source code for the TEE OS itself. This component provides the TEE Internal APIs as defined by the GlobalPlatform TEE standard to the Trusted Applications that it executes. It is distributed under the BSD 2-clause open-source license.
+- The optee_linuxdriver git, containing the source code for the TEE driver in Linux. This component implements a generic TEE driver, designed primarily for TEE implementations that rely on the ARM TrustZone(R)technology. It is distributed under the GPLv2 open-source license. Please note that re-distribution under other versions of the GPL license is not allowed. The rationale behind this limitation is to ensure that this code may be used on products which have security devices which prevent reloading the code. Such security devices would be incompatible with some licenses such as GPLv3 and so distribution under those licenses would be inconsistent with this goal. Therefore it is recommended that care be taken before redistributing any of the components under other license terms than those provided here.
+
+Contributions
+------
+
+Contributions to OP-TEE are managed by the OP-TEE gatekeepers, whose contact email is op-tee-support[at]st[.]com.
+
+Contributions must be original work of the contributor. In order to preserve the rights of the contributor while allowing distribution to and protection of the recipients of OP-TEE, the contributor must complete, sign and send the Contribution Agreement or a scanned copy to ST for counter-signature, prior to any contribution. The address where to send the agreement and other details will be provided upon contact with the OP-TEE gatekeepers.
+Once the Contribution Agreement is complete, the contributor may propose contributions to the OP-TEE gatekeepers. Proposed Contributions are reviewed for acceptance by the OP-TEE gatekeepers and the OP-TEE community.
+
+Submission of non-original work
+------
+
+You may submit work that is not your original creation separately from any Contribution, identifying the complete details of its source and of any license or other restriction of which you are personally aware. Such submissions are not subject to the Contribution Agreement. They are reviewed for acceptance by the OP-TEE gatekeepers and the OP-TEE community.
diff --git a/core/arch/arm32/include/arm32.h b/core/arch/arm32/include/arm32.h
new file mode 100644
index 00000000000..2c5e963aa7f
--- /dev/null
+++ b/core/arch/arm32/include/arm32.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM32_H
+#define ARM32_H
+
+#ifndef ASM
+#include
+#endif
+
+#define CPSR_MODE_MASK 0x1f
+#define CPSR_MODE_USR 0x10
+#define CPSR_MODE_FIQ 0x11
+#define CPSR_MODE_IRQ 0x12
+#define CPSR_MODE_SVC 0x13
+#define CPSR_MODE_MON 0x16
+#define CPSR_MODE_ABT 0x17
+#define CPSR_MODE_UND 0x1b
+#define CPSR_MODE_SYS 0x1f
+
+#define CPSR_T (1 << 5)
+#define CPSR_F (1 << 6)
+#define CPSR_I (1 << 7)
+
+#define MPIDR_CPU_MASK 0xff
+#define MPIDR_CLUSTER_MASK (0xff << 8)
+
+#define SCR_NS (1 << 0)
+#define SCR_IRQ (1 << 1)
+#define SCR_FIQ (1 << 2)
+#define SCR_EA (1 << 3)
+#define SCR_FW (1 << 4)
+#define SCR_AW (1 << 5)
+#define SCR_NET (1 << 6)
+#define SCR_SCD (1 << 7)
+#define SCR_HCE (1 << 8)
+#define SCR_SIF (1 << 9)
+
+#define SCTLR_M (1 << 0)
+#define SCTLR_A (1 << 1)
+#define SCTLR_C (1 << 2)
+#define SCTLR_CP15BEN (1 << 5)
+#define SCTLR_SW (1 << 10)
+#define SCTLR_Z (1 << 11)
+#define SCTLR_I (1 << 12)
+#define SCTLR_V (1 << 13)
+#define SCTLR_RR (1 << 14)
+#define SCTLR_HA (1 << 17)
+#define SCTLR_WXN (1 << 19)
+#define SCTLR_UWXN (1 << 20)
+#define SCTLR_FI (1 << 21)
+#define SCTLR_VE (1 << 24)
+#define SCTLR_EE (1 << 25)
+#define SCTLR_NMFI (1 << 26)
+#define SCTLR_TRE (1 << 28)
+#define SCTLR_AFE (1 << 29)
+#define SCTLR_TE (1 << 30)
+
+#ifndef ASM
+static inline uint32_t read_mpidr(void)
+{
+ uint32_t mpidr;
+
+ asm ("mrc p15, 0, %[mpidr], c0, c0, 5"
+ : [mpidr] "=r" (mpidr)
+ );
+
+ return mpidr;
+}
+
+static inline uint32_t read_sctlr(void)
+{
+ uint32_t sctlr;
+
+ asm ("mrc p15, 0, %[sctlr], c1, c0, 0"
+ : [sctlr] "=r" (sctlr)
+ );
+
+ return sctlr;
+}
+
+static inline void write_sctlr(uint32_t sctlr)
+{
+ asm ("mcr p15, 0, %[sctlr], c1, c0, 0"
+ : : [sctlr] "r" (sctlr)
+ );
+}
+
+static inline void write_ttbr0(uint32_t ttbr0)
+{
+ asm ("mcr p15, 0, %[ttbr0], c2, c0, 0"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
+static inline void write_dacr(uint32_t dacr)
+{
+ asm ("mcr p15, 0, %[dacr], c3, c0, 0"
+ : : [dacr] "r" (dacr)
+ );
+}
+
+static inline void isb(void)
+{
+ asm ("isb");
+}
+
+static inline void dsb(void)
+{
+ asm ("dsb");
+}
+
+static inline void write_tlbiallis(void)
+{
+ /* Invalidate entire unified TLB Inner Shareable, r0 ignored */
+ asm ("mcr p15, 0, r0, c8, c3, 0");
+}
+
+static inline uint32_t read_cpsr(void)
+{
+ uint32_t cpsr;
+
+ asm ("mrs %[cpsr], cpsr"
+ : [cpsr] "=r" (cpsr)
+ );
+ return cpsr;
+}
+
+static inline void write_cpsr(uint32_t cpsr)
+{
+ asm ("msr cpsr, %[cpsr]"
+ : : [cpsr] "r" (cpsr)
+ );
+}
+#endif
+
+#endif /*ARM32_H*/
diff --git a/core/arch/arm32/include/arm32_macros.S b/core/arch/arm32/include/arm32_macros.S
new file mode 100644
index 00000000000..ae85a479a8b
--- /dev/null
+++ b/core/arch/arm32/include/arm32_macros.S
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .macro read_sctlr reg
+ mrc p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro write_sctlr reg
+ mcr p15, 0, \reg, c1, c0, 0
+ .endm
+
+ .macro read_scr reg
+ mrc p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro write_scr reg
+ mcr p15, 0, \reg, c1, c1, 0
+ .endm
+
+ .macro read_mpidr reg
+ mrc p15, 0, \reg, c0, c0, 5
+ .endm
+
+ .macro write_vbar reg
+ mcr p15, 0, \reg, c12, c0, 0
+ .endm
+
+ .macro write_mvbar reg
+ mcr p15, 0, \reg, c12, c0, 1
+ .endm
diff --git a/core/arch/arm32/include/kernel/arch_debug.h b/core/arch/arm32/include/kernel/arch_debug.h
new file mode 100644
index 00000000000..1774b202452
--- /dev/null
+++ b/core/arch/arm32/include/kernel/arch_debug.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_ARCH_DEBUG_H
+#define KERNEL_ARCH_DEBUG_H
+
+void check_canaries(void);
+
+#endif /*KERNEL_ARCH_DEBUG_H*/
diff --git a/core/arch/arm32/include/kernel/asc.h b/core/arch/arm32/include/kernel/asc.h
new file mode 100644
index 00000000000..f942c6613ba
--- /dev/null
+++ b/core/arch/arm32/include/kernel/asc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ASC_H
+#define ASC_H
+
+extern int asc_init(void);
+extern int __asc_xmit_char(const char p);
+extern int __asc_xmit(const char *s);
+extern void __asc_flush(void);
+
+#endif
diff --git a/core/arch/arm32/include/kernel/kta_types.h b/core/arch/arm32/include/kernel/kta_types.h
new file mode 100644
index 00000000000..c308cfd8219
--- /dev/null
+++ b/core/arch/arm32/include/kernel/kta_types.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * \file kta_types.h
+ * \brief This file contains types related to the secure library API.
+ * @{
+ */
+
+#ifndef KTA_TYPES_H
+#define KTA_TYPES_H
+
+#include
+
+/*
+ * KTA return value type
+ */
+
+/* Return code type */
+typedef uint32_t t_kta_return_value;
+
+/*
+ * Structure of return type
+ * -----------------------------------------------------------------
+ * | flags | domain | code |
+ * -----------------------------------------------------------------
+ * 31 28 27 16 15 0
+ */
+/* flags: 0x0 = success / 0x8 = failure */
+
+/* Success codes (domain = D, error code =Y) */
+/* #define KTA_RET_OK_REASON_X (0x000Y000X) */
+#define KTA_RET_OK ((t_kta_return_value)0x00000001)
+#define KTA_RET_BUSY ((t_kta_return_value)0x00000003)
+
+/* Failure codes (domain = D, error code =Y) */
+/* #define KTA_RET_FAIL_ERROR_X (0x900Y000X) */
+#define KTA_RET_FAIL ((t_kta_return_value)0x90000001)
+#define KTA_RET_NON_SUPPORTED_APPL ((t_kta_return_value)0x90000002)
+#define KTA_RET_NON_VALID_ADDRESS ((t_kta_return_value)0x90000003)
+#define KTA_RET_MMU_TRANSLATION_FAULT ((t_kta_return_value)0x90000004)
+#define KTA_RET_INVALID_ARGS ((t_kta_return_value)0x90000005)
+
+typedef struct kta_signed_header {
+ uint32_t magic;
+ uint16_t size_of_signed_header;
+ uint16_t size_of_signature;
+ uint32_t sign_hash_type; /* see t_hash_type */
+ uint32_t signature_type; /* see t_signature_type */
+ uint32_t hash_type; /* see t_hash_type */
+ uint32_t payload_type; /* see enum kta_payload_type */
+ uint32_t flags; /* reserved */
+ uint32_t size_of_payload;
+ uint32_t sw_vers_nbr;
+ uint32_t load_address;
+ uint32_t startup_address;
+ uint32_t spare; /* reserved */
+} kta_signed_header_t;
+
+#endif /* End of kta_types.h */
+
+/** @} */
diff --git a/core/arch/arm32/include/kernel/misc.h b/core/arch/arm32/include/kernel/misc.h
new file mode 100644
index 00000000000..f084eabe401
--- /dev/null
+++ b/core/arch/arm32/include/kernel/misc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_MISC_H
+#define KERNEL_MISC_H
+
+#include
+
+size_t get_core_pos(void);
+
+#endif /*KERNEL_MISC_H*/
+
diff --git a/core/arch/arm32/include/kernel/tee_l2cc_mutex.h b/core/arch/arm32/include/kernel/tee_l2cc_mutex.h
new file mode 100644
index 00000000000..8cc99240ac3
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tee_l2cc_mutex.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+TEE_Result tee_l2cc_mutex_configure(uint32_t service_id, uint32_t *mutex);
diff --git a/core/arch/arm32/include/kernel/tee_misc.h b/core/arch/arm32/include/kernel/tee_misc.h
new file mode 100644
index 00000000000..00e91c6413f
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tee_misc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_MISC_H
+#define TEE_MISC_H
+
+#include
+
+/*
+ * Macro to derive hex string buffer size from binary buffer size & the
+ * reverse
+ */
+#define TEE_B2HS_HSBUF_SIZE(x) ((x) * 2 + 1)
+#define TEE_HS2B_BBUF_SIZE(x) ((x + 1) >> 1)
+
+/*
+ * binary to hex string buffer
+ * Returns the number of data bytes written to the hex string
+ */
+uint32_t tee_b2hs(uint8_t *b, uint8_t *hs, uint32_t blen, uint32_t hslen);
+
+/*
+ * hex string to binary buffer
+ * Returns the number of data bytes written to the bin buffer
+ */
+uint32_t tee_hs2b(uint8_t *hs, uint8_t *b, uint32_t hslen, uint32_t blen);
+
+#endif /* TEE_MISC_H */
diff --git a/core/arch/arm32/include/kernel/thread.h b/core/arch/arm32/include/kernel/thread.h
new file mode 100644
index 00000000000..e4cb17dfb60
--- /dev/null
+++ b/core/arch/arm32/include/kernel/thread.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_H
+#define KERNEL_THREAD_H
+
+#include
+
+#define THREAD_ID_0 0
+#define THREAD_ABT_STACK 0xfffffffe
+#define THREAD_TMP_STACK 0xffffffff
+
+struct thread_smc_args {
+ uint32_t a0;
+ uint32_t a1;
+ uint32_t a2;
+ uint32_t a3;
+ uint32_t a4; /* Thread ID when returning from RPC */
+ uint32_t a5;
+ uint32_t a6; /* Optional session ID */
+ uint32_t a7; /* Hypervisor Client ID */
+};
+
+
+struct thread_abort_regs {
+ uint32_t spsr;
+ uint32_t pad;
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t ip;
+ uint32_t lr;
+};
+typedef void (*thread_abort_handler_t)(uint32_t abort_type,
+ struct thread_abort_regs *regs);
+struct thread_svc_regs {
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t lr;
+ uint32_t spsr;
+};
+typedef void (*thread_svc_handler_t)(struct thread_svc_regs *regs);
+typedef void (*thread_call_handler_t)(struct thread_smc_args *args);
+typedef void (*thread_fiq_handler_t)(void);
+struct thread_handlers {
+ /*
+ * stdcall and fastcall are called as regular functions and
+ * normal ARM Calling Convention applies. Return values are passed
+ * args->param{1-3} and forwarded into r0-r3 when returned to
+ * non-secure world.
+ *
+ * stdcall handles calls which can be preemted from non-secure
+ * world. This handler is executed with a large stack.
+ *
+ * fastcall handles fast calls which can't be preemted. This
+ * handler is executed with a limited stack. This handler must not
+ * cause any aborts or reenenable FIQs which are temporarily masked
+ * while executing this handler.
+ *
+ * TODO execute fastcalls and FIQs on different stacks allowing
+ * FIQs to be enabled during a fastcall.
+ */
+ thread_call_handler_t stdcall;
+ thread_call_handler_t fastcall;
+
+ /*
+ * fiq is called as a regular function and normal ARM Calling
+ * Convention applies.
+ *
+ * This handler handles FIQs which can't be preemted. This handler
+ * is executed with a limited stack. This handler must not cause
+ * any aborts or reenenable FIQs which are temporarily masked while
+ * executing this handler.
+ */
+ thread_fiq_handler_t fiq;
+
+ /*
+ * The SVC handler is called as a normal function and should do
+ * a normal return. Note that IRQ is masked when this function
+ * is called, it's permitted for the function to unmask IRQ.
+ */
+ thread_svc_handler_t svc;
+
+ /*
+ * The abort handler is called as a normal function and should do
+ * a normal return. The abort handler is called when an undefined,
+ * prefetch abort, or data abort exception is received. In all
+ * cases the abort handler is executing in abort mode. If IRQ is
+ * unmasked in the abort handler it has to have separate abort
+ * stacks for each thread.
+ */
+ thread_abort_handler_t abort;
+};
+void thread_init_handlers(const struct thread_handlers *handlers);
+
+/*
+ * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
+ * first stack, THREAD_ID_0 + 1 for the next and so on.
+ *
+ * If stack_id == THREAD_ID_TMP_STACK the temporary stack used by current
+ * CPU is selected.
+ * If stack_id == THREAD_ID_ABT_STACK the abort stack used by current CPU
+ * is selected.
+ *
+ * Returns true on success and false on errors.
+ */
+bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
+
+/*
+ * Set Thread Specific Data (TSD) pointer together a function
+ * to free the TSD on thread_exit.
+ */
+typedef void (*thread_tsd_free_t)(void *tsd);
+void thread_set_tsd(void *tsd, thread_tsd_free_t free_func);
+
+/* Returns Thread Specific Data (TSD) pointer. */
+void *thread_get_tsd(void);
+
+/*
+ * Sets IRQ status for current thread, must only be called from an
+ * active thread context.
+ *
+ * enable == true -> enable IRQ
+ * enable == false -> disable IRQ
+ */
+void thread_set_irq(bool enable);
+
+/*
+ * Restores the IRQ status (in CPSR) for current thread, must only be called
+ * from an active thread context.
+ */
+void thread_restore_irq(void);
+
+/**
+ * Allocates data for struct teesmc32_arg.
+ *
+ * @size: size in bytes of struct teesmc32_arg
+ *
+ * @returns 0 on failure or a physical pointer to a struct teesmc32_arg buffer
+ * on success.
+ */
+paddr_t thread_rpc_alloc_arg(size_t size);
+
+/**
+ * Allocates data for a payload buffer.
+ *
+ * @size: size in bytes of payload buffer
+ *
+ * @returns 0 on failure or a physical pointer to a payload buffer on success.
+ */
+paddr_t thread_rpc_alloc_payload(size_t size);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_arg()
+ *
+ * @arg: physical pointer to struct teesmc32_arg buffer
+ */
+void thread_rpc_free_arg(paddr_t arg);
+
+/**
+ * Free physical memory previously allocated with thread_rpc_alloc_payload()
+ *
+ * @arg: physical pointer to struct teesmc32_arg buffer
+ */
+void thread_rpc_free_payload(paddr_t payload);
+
+/**
+ * Does an RPC with a physical pointer to a struct teesmc32_arg
+ *
+ * @arg: physical pointer to struct teesmc32_arg
+ */
+void thread_rpc_cmd(paddr_t arg);
+
+/**
+ * Extension: Allocates data for payload buffers.
+ *
+ * @size: size in bytes of payload buffer
+ * @payload: returned physcial pointer to payload buffer
+ * @cookie: returned cookie used when freeing the buffer
+ */
+void thread_st_rpc_alloc_payload(size_t size, paddr_t *payload,
+ paddr_t *cookie);
+
+/**
+ * Extension: Free physical memory previously allocated with thread_rpc_alloc()
+ *
+ * @cookie: cookie received when allocating the payload buffer
+ */
+void thread_st_rpc_free_payload(paddr_t cookie);
+
+#endif /*KERNEL_THREAD_H*/
diff --git a/core/arch/arm32/include/kernel/thread_defs.h b/core/arch/arm32/include/kernel/thread_defs.h
new file mode 100644
index 00000000000..b3b984704a0
--- /dev/null
+++ b/core/arch/arm32/include/kernel/thread_defs.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_THREAD_DEFS_H
+#define KERNEL_THREAD_DEFS_H
+
+#define THREAD_FLAGS_COPY_ARGS_ON_RETURN (1 << 0)
+#define THREAD_FLAGS_IRQ_ENABLE (1 << 1)
+
+#define THREAD_ABORT_UNDEF 0
+#define THREAD_ABORT_PREFETCH 1
+#define THREAD_ABORT_DATA 2
+
+#endif /*KERNEL_THREAD_DEFS_H*/
diff --git a/core/arch/arm32/include/kernel/tz_proc.h b/core/arch/arm32/include/kernel/tz_proc.h
new file mode 100644
index 00000000000..95eb58261fb
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tz_proc.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_PROC_H
+#define TZ_PROC_H
+
+void cpu_dsb(void);
+void cpu_dmb(void);
+void cpu_isb(void);
+void cpu_wfe(void);
+void cpu_sev(void);
+
+void cpu_disable_its(void);
+void cpu_enable_its(void);
+unsigned int cpu_read_cpsr(void);
+void cpu_write_cpsr(unsigned int cpsr);
+
+unsigned int cpu_read_ttbr0(void);
+void cpu_write_ttbr0(unsigned int ttbr0);
+
+void cpu_spin_lock(unsigned int *lock);
+unsigned int cpu_spin_trylock(unsigned int *lock);
+void cpu_spin_unlock(unsigned int *lock);
+
+void mmu_enable(void);
+void mmu_enable_icache(void);
+void mmu_enable_dcache(void);
+
+#endif
diff --git a/core/arch/arm32/include/kernel/tz_proc_def.h b/core/arch/arm32/include/kernel/tz_proc_def.h
new file mode 100644
index 00000000000..2b3ec8def9c
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tz_proc_def.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/* *****************************************************************************
+ * * *
+ * * Description: This file contains constant assembly definitions for Orly2 *
+ * * configuration *
+ * *****************************************************************************
+ */
+
+/*
+ * General constants
+ */
+
+/*
+ * CP15 Multiprocessor Affinity register (MPIDR)
+ */
+#define CP15_CONFIG_CPU_ID_MASK 0x00000003
+#define CPU_ID0 0x00000000
+#define CPU_ID1 0x00000001
+
+/*
+ * CP15 Secure configuration register
+ */
+#define CP15_CONFIG_NS_MASK 0x00000001
+#define CP15_CONFIG_IRQ_MASK 0x00000002
+#define CP15_CONFIG_FIQ_MASK 0x00000004
+#define CP15_CONFIG_EA_MASK 0x00000008
+#define CP15_CONFIG_FW_MASK 0x00000010
+#define CP15_CONFIG_AW_MASK 0x00000020
+#define CP15_CONFIG_nET_MASK 0x00000040
+
+/*
+ * CP15 Control register
+ */
+#define CP15_CONTROL_M_MASK 0x00000001
+#define CP15_CONTROL_C_MASK 0x00000004
+#define CP15_CONTROL_Z_MASK 0x00000800
+#define CP15_CONTROL_I_MASK 0x00001000
+#define CP15_CONTROL_V_MASK 0x00002000
+#define CP15_CONTROL_HA_MASK 0x00020000
+#define CP15_CONTROL_EE_MASK 0x02000000
+#define CP15_CONTROL_NMFI_MASK 0x08000000
+#define CP15_CONTROL_TRE_MASK 0x10000000
+#define CP15_CONTROL_AFE_MASK 0x20000000
+#define CP15_CONTROL_TE_MASK 0x40000000
+
+/*
+ * CP15 Auxiliary Control register
+ */
+#define CP15_CONTROL_SMP_MASK 0x00000040
+#define CP15_CONTROL_EXCL_MASK 0x00000080
+
+/*
+ * CP15 Non secure access control register
+ */
+#define CP15_NSAC_TL_MASK 0x10000
+#define CP15_NSAC_CL_MASK 0x20000
+#define CP15_NSAC_CPN_MASK 0x3FFF
+
+/*
+ * CP15 Cache register
+ */
+#define CP15_CACHE_ADDR_R_BIT 12
+#define CP15_CACHE_ADDR_L_BIT (32-CP15_CACHE_ADDR_R_BIT)
+#define CP15_CACHE_RESULT_MASK 0x00000001
+
+/*
+ * CP15 TCM register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_TCM_ENABLE_MASK 0x00000001
+#define CP15_TCM_INSTR_TCM 0x2010000C
+#define CP15_TCM_DATA_TCM 0x2010100C
+
+/*
+ * CP15 cache lockdown register
+ *
+ * ITCM configuration (4kbytes, @0x20100000, enabled)
+ * DTCM configuration (4kbytes, @0x20101000, enabled)
+ */
+#define CP15_CACHE_LOCK_ALLWAYS_MASK 0x0000000F
+
+/*
+ * CP15 cache cleaning constant definition
+ */
+/* start of line number field offset in way/index format */
+#define LINE_FIELD_OFFSET 5
+/* Warning: this assumes a 256 lines/way cache (32kB cache) */
+#define LINE_FIELD_OVERFLOW 13
+/* start of way number field offset in way/index format */
+#define WAY_FIELD_OFFSET 30
+
+/*
+ * CPSR definitions
+ */
+
+ /* Values */
+#define CPSR_USER_MODE 0x10 /* 10000b */
+#define CPSR_FIQ_MODE 0x11 /* 10001b */
+#define CPSR_IRQ_MODE 0x12 /* 10010b */
+#define CPSR_SVC_MODE 0x13 /* 10011b */
+#define CPSR_MON_MODE 0x16 /* 10110b */
+#define CPSR_ABORT_MODE 0x17 /* 10111b */
+#define CPSR_UNDEF_MODE 0x1B /* 11011b */
+#define CPSR_SYSTEM_MODE 0x1F /* 11111b */
+#define CPSR_CLR_MASK_MODE 0x1F /* 11111b */
+
+ /* Masks */
+#define CPSR_ARM_THUMB_MODE_MASK 0x020 /* 100000b ARM = 0, THUMB = 1 */
+#define CPSR_FIQ_IRQ_MASK 0x0C0 /* 11000000b */
+#define CPSR_FIQ_MASK 0x040 /* 01000000b */
+#define CPSR_IRQ_MASK 0x080 /* 10000000b */
+#define CPSR_A_MASK 0x100 /* 100000000b */
+#define CPSR_INST_MODE_FIQ_IRQ_MASK 0x0E0 /* 11100000b */
+
+ /* Bits */
+#define CPSR_FIQ_BIT 0x6
+#define CPSR_IRQ_BIT 0x7
+
+/*
+ * SCU related definitions
+ */
+#define SCU_INV_ALL_WAYS_CPU0 0xF
+#define SCU_INV_ALL_WAYS_CPU1 0xF0
+#define SCU_ENABLE_MASK 1
+#define SCU_FILTERING_ENABLE_MASK 2
+#define END_FILTERING_SCU_ADDR 0x3FFFFFFF
+
+/* Stack OFFSET definition */
+#define TWO_REGS_OFFSET 0x08
+#define THREE_REGS_OFFSET 0x0C
+#define FOUR_REGS_OFFSET 0x10
+#define SIX_REGS_OFFSET 0x18
+#define SEVEN_REGS_OFFSET 0x1C
+#define EIGHT_REGS_OFFSET 0x20
+#define TEN_REGS_OFFSET 0x28
+#define LOCK_INIT 2
+#define LOCK 1
+#define UNLOCK 0
+#define ARM_STORE_DONE 0
+#define ARM_STORE_FAILED 1
diff --git a/core/arch/arm32/include/kernel/tz_ssvce.h b/core/arch/arm32/include/kernel/tz_ssvce.h
new file mode 100644
index 00000000000..81127fbaaa0
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tz_ssvce.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TZ_SSVCE_H
+#define TZ_SSVCE_H
+
+unsigned int secure_get_cpu_id(void);
+
+void arm_cl1_d_cleanbysetway(void);
+void arm_cl1_d_invbysetway(void);
+void arm_cl1_d_cleaninvbysetway(void);
+void arm_cl1_d_cleanbypa(unsigned long start, unsigned long end);
+void arm_cl1_d_invbypa(unsigned long start, unsigned long end);
+void arm_cl1_d_cleaninvbypa(unsigned long start, unsigned long end);
+
+void arm_cl1_i_inv_all(void);
+void arm_cl1_i_inv(unsigned long start, unsigned long end);
+
+void arm_cl2_cleaninvbyway(void);
+void arm_cl2_invbyway(void);
+void arm_cl2_cleanbyway(void);
+void arm_cl2_cleanbypa(unsigned long start, unsigned long end);
+void arm_cl2_invbypa(unsigned long start, unsigned long end);
+void arm_cl2_cleaninvbypa(unsigned long start, unsigned long end);
+
+void secure_mmu_datatlbinvall(void);
+void secure_mmu_unifiedtlbinvall(void);
+void secure_mmu_unifiedtlbinvbymva(unsigned long addr);
+void secure_mmu_unifiedtlbinv_curasid(void);
+void secure_mmu_unifiedtlbinv_byasid(unsigned long asid);
+
+void secure_mmu_disable(void);
+
+#endif
diff --git a/core/arch/arm32/include/kernel/tz_ssvce_def.h b/core/arch/arm32/include/kernel/tz_ssvce_def.h
new file mode 100644
index 00000000000..99f4e766f54
--- /dev/null
+++ b/core/arch/arm32/include/kernel/tz_ssvce_def.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+#define CPSR_OFFSET 0x00
+#define STACK_INT_USAGE 0x04
+
+/*
+ * tee service IDs (TODO: align with the service ID list).
+ * Set by NSec in R4 before SMC to request a TEE service.
+ */
+#define SSAPI_RET_FROM_INT_SERV 4
+#define SSAPI_RET_FROM_RPC_SERV 5
+
+/*
+ * TEE monitor: status returned by the routine that checks the entry
+ * reason (valid Service ID / secure context).
+ */
+#define SEC_INVALID_ENTRY 0
+#define SEC_PRE_INIT_ENTRY 1
+#define SEC_RET_FROM_INT_ENTRY 2
+#define SEC_RET_FROM_RPC_ENTRY 3
+#define SEC_NORMAL_ENTRY 4
+
+/*
+ * teecore exit reason.
+ * Set by Secure in R4 before SMC to request a switch to NSec.
+ */
+#define SEC_EXIT_NORMAL 1
+#define SEC_EXIT_START_EXT_CODE 2
+#define SEC_EXIT_INT 3
+#define SEC_EXIT_RPC_CALL 4
+#define SEC_EXIT_FIRST 5
+#define SEC_EXIT_DEEP_SLEEP 6
+
+/* misc */
+
+#define SEC_UNDEF_STACK_OFFSET 4
+#define SEC_ABORT_STACK_OFFSET 12
+
+#define SEC_ENTRY_STATUS_NOK 0
+#define SEC_ENTRY_STATUS_OK 1
diff --git a/core/arch/arm32/include/mm/core_memprot.h b/core/arch/arm32/include/mm/core_memprot.h
new file mode 100644
index 00000000000..1a04c6db8ab
--- /dev/null
+++ b/core/arch/arm32/include/mm/core_memprot.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MEMPROT_H
+#define CORE_MEMPROT_H
+
+#include
+#include
+
+/*
+ * "pbuf_is" support.
+ *
+ * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped
+ * virtual address or a physical address is "Secure", "Unsecure", "external
+ * RAM" and some other fancy attributes.
+ *
+ * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is
+ * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured
+ * only if 'buf_is(UnSecure, buffer)==true'.
+ */
+
+/* memory atttributes */
+enum buf_is_attr {
+ CORE_MEM_SEC,
+ CORE_MEM_NON_SEC,
+ CORE_MEM_TEE_RAM,
+ CORE_MEM_TA_RAM,
+ CORE_MEM_NSEC_SHM,
+ CORE_MEM_MULTPURPOSE,
+ CORE_MEM_EXTRAM,
+ CORE_MEM_INTRAM,
+ CORE_MEM_CACHED,
+};
+
+/* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */
+#define tee_pbuf_is core_pbuf_is
+#define tee_vbuf_is core_vbuf_is
+
+/* Convenience macros */
+#define tee_pbuf_is_non_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_NON_SEC, (tee_paddr_t)(buf), (len))
+
+#define tee_pbuf_is_sec(buf, len) \
+ core_pbuf_is(CORE_MEM_SEC, (tee_paddr_t)(buf), (len))
+
+#define tee_vbuf_is_non_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len))
+
+#define tee_vbuf_is_sec(buf, len) \
+ core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len))
+
+/* See kta_mem.h for flags to tee_pbuf_is() and tee_kbuf_is() */
+
+/*
+ * This function return true if the buf complies with supplied flags.
+ * If this function returns false buf doesn't comply with supplied flags
+ * or something went wrong.
+ *
+ * Note that returning false doesn't guarantee that buf complies with
+ * the complement of the supplied flags.
+ */
+bool core_pbuf_is(uint32_t flags, tee_paddr_t pbuf, size_t len);
+
+/*
+ * Translates the supplied virtual address to a physical address and uses
+ * tee_phys_buf_is() to check the compliance of the buffer.
+ */
+bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len);
+
+#endif /* CORE_MEMPROT_H */
diff --git a/core/arch/arm32/include/mm/core_mmu.h b/core/arch/arm32/include/mm/core_mmu.h
new file mode 100644
index 00000000000..26bd60af226
--- /dev/null
+++ b/core/arch/arm32/include/mm/core_mmu.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_MMU_H
+#define CORE_MMU_H
+
+#include
+#include
+#include
+
+/*
+ * @type: enumerate: specifiy the purpose of the memory area.
+ * @pa: memory area physical start address
+ * @size: memory area size in bytes
+ * @va: virtual start address (0 if memory is not mapped)
+ * @region_size: size of the mapping region used (4k, 64K, 1MB)
+ * @secure: true if memory area in inside a A9 secure area
+ */
+struct map_area {
+ unsigned int type;
+ unsigned int pa;
+ size_t size;
+ /* below here are core_mmu.c internal data */
+ unsigned int va;
+ unsigned int region_size;
+ bool secure;
+ bool cached;
+ bool device;
+ bool rw;
+ bool exec;
+};
+
+/*
+ * Memory area type:
+ * MEM_AREA_NOTYPE: Undefined type. Used as end of table.
+ * MEM_AREA_TEE_RAM: teecore execution RAM (secure, reserved to TEEtz, unused)
+ * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEEtz)
+ * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances.
+ * MEM_AREA_NS_SHM: NonSecure shared RAM between NSec and TEEtz.
+ * MEM_AREA_KEYVAULT: Secure RAM storing some secrets
+ * MEM_AREA_IO_SEC: Secure HW mapped registers
+ * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
+ * MEM_AREA_MAXTYPE: lower invalid 'type' value
+ */
+enum teecore_memtypes {
+ MEM_AREA_NOTYPE = 0,
+ MEM_AREA_TEE_RAM,
+ MEM_AREA_TEE_COHERENT,
+ MEM_AREA_TA_RAM,
+ MEM_AREA_NSEC_SHM,
+ MEM_AREA_KEYVAULT,
+ MEM_AREA_IO_SEC,
+ MEM_AREA_IO_NSEC,
+ MEM_AREA_MAXTYPE
+};
+
+/* Default NSec shared memory allocated from NSec world */
+extern unsigned long default_nsec_shm_paddr;
+extern unsigned long default_nsec_shm_size;
+
+uint32_t core_map_area_flag(void *p, size_t l);
+unsigned int core_init_mmu(unsigned int core_ttbr0, unsigned int ta_ttbr0);
+
+int core_mmu_map(unsigned long paddr, size_t size, unsigned long flags);
+int core_mmu_unmap(unsigned long paddr, size_t size);
+
+void core_mmu_get_mem_by_type(unsigned int type, unsigned int *s,
+ unsigned int *e);
+
+int core_va2pa(uint32_t va, uint32_t *pa);
+int core_pa2va(uint32_t pa, uint32_t *va);
+
+/* get phys base addr of MMU L1 table used for tee core */
+uint32_t core_mmu_get_ttbr0_base(void);
+uint32_t core_mmu_get_ttbr0_attr(void);
+uint32_t core_mmu_get_ttbr0(void);
+
+/* get virt and phys base addr of MMU L1 table used for TAs */
+uint32_t core_mmu_get_ta_ul1_va(void);
+uint32_t core_mmu_get_ta_ul1_pa(void);
+
+/* routines to retreive shared mem configuration */
+bool core_mmu_is_shm_cached(void);
+
+/* L1/L2 cache maintenance (op: refer to ???) */
+unsigned int core_cache_maintenance(int op, void *start, size_t len);
+void core_l2cc_mutex_set(void *mutex);
+void core_l2cc_mutex_activate(bool en);
+
+/* various invalidate secure TLB */
+enum teecore_tlb_op {
+ TLBINV_DATATLB, /* invalidate data tlb */
+ TLBINV_UNIFIEDTLB, /* invalidate unified tlb */
+ TLBINV_CURRENT_ASID, /* invalidate unified tlb for current ASID */
+ TLBINV_BY_ASID, /* invalidate unified tlb by ASID */
+ TLBINV_BY_MVA, /* invalidate unified tlb by MVA */
+};
+
+struct map_area *bootcfg_get_memory(void);
+int core_tlb_maintenance(int op, unsigned int a);
+unsigned long bootcfg_get_pbuf_is_handler(void);
+
+/* Cache maintenance operation type */
+typedef enum {
+ DCACHE_CLEAN = 0x1,
+ DCACHE_AREA_CLEAN = 0x2,
+ DCACHE_INVALIDATE = 0x3,
+ DCACHE_AREA_INVALIDATE = 0x4,
+ ICACHE_INVALIDATE = 0x5,
+ ICACHE_AREA_INVALIDATE = 0x6,
+ WRITE_BUFFER_DRAIN = 0x7,
+ DCACHE_CLEAN_INV = 0x8,
+ DCACHE_AREA_CLEAN_INV = 0x9,
+ L2CACHE_INVALIDATE = 0xA,
+ L2CACHE_AREA_INVALIDATE = 0xB,
+ L2CACHE_CLEAN = 0xC,
+ L2CACHE_AREA_CLEAN = 0xD,
+ L2CACHE_CLEAN_INV = 0xE,
+ L2CACHE_AREA_CLEAN_INV = 0xF
+} t_cache_operation_id;
+
+#endif /* CORE_MMU_H */
diff --git a/core/arch/arm32/include/mm/tee_mm_def.h b/core/arch/arm32/include/mm/tee_mm_def.h
new file mode 100644
index 00000000000..1647364896c
--- /dev/null
+++ b/core/arch/arm32/include/mm/tee_mm_def.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_MM_DEF_H
+#define TEE_MM_DEF_H
+
+#define SMALL_PAGE_SHIFT 12
+#define SMALL_PAGE_MASK 0xfff
+#define SMALL_PAGE_SIZE 0x1000
+
+#define SECTION_SHIFT 20
+#define SECTION_MASK 0x000fffff
+#define SECTION_SIZE 0x00100000
+
+#define TEE_VMEM_START 0x40000000
+#define TEE_VMEM_SIZE (1024 * 1024)
+
+/* virtual addresses of ROM code variables and L2 MMU tables */
+#define SEC_VIRT_MMU_L2_BASE 0x40000000
+
+/* Paged virtual memory defines */
+#define TEE_PVMEM_PSIZE (TEE_VMEM_SIZE / SMALL_PAGE_SIZE)
+
+#define TEE_PVMEM_LO TEE_VMEM_START
+
+/* define section to load */
+#define TEE_DDR_VLOFFSET 0x1
+
+/* Reset error code */
+#define TEE_RESET_INVALID_PAGE_ERROR 0xBADB7000
+
+/*
+ * MMU related values
+ */
+#define TEE_VIRT_MMU_L2_BASE TEE_VMEM_START
+#define TEE_VIRT_MMU_L2_SIZE 0x400
+#define TEE_MMU_UL1_BASE core_mmu_get_ta_ul1_va()
+#define TEE_MMU_UL1_PA_BASE core_mmu_get_ta_ul1_pa()
+
+#define TEE_MMU_DEFAULT_ATTRS \
+ (TEE_MMU_TTB_S | TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
+
+/* Page attributes */
+
+/*
+ * Small pages [31:12]PA, not Global, Sharable, Access Permission,
+ * Memory region attribute [8:6], Access permissions [5:4],
+ * C, B, Small page, Outer and Inner Write-Back, Write-Allocate
+ */
+#define TEE_PAGER_PAGE_UNLOADED \
+ (TEE_MMU_L2SP_SMALL_PAGE | TEE_MMU_L2SP_WBWA | TEE_MMU_L2SP_S)
+
+#define TEE_PAGER_PAGE_LOADED \
+ (TEE_PAGER_PAGE_UNLOADED | TEE_MMU_L2SP_PRIV_ACC)
+
+#define TEE_PAGER_STACKS_ATTRIBUTES \
+ (TEE_PAGER_PAGE_LOADED | TEE_MMU_L2SP_XN)
+
+#define TEE_PAGER_NO_ACCESS_ATTRIBUTES 0x00000000
+
+#define TEE_ROM_AREA_START TEE_VMEM_START
+#define TEE_ROM_AREA_SIZE 0x2000
+
+#define TEE_HOLE_START (TEE_ROM_AREA_START + TEE_ROM_AREA_SIZE)
+#define TEE_HOLE_SIZE 0x2000
+
+/* Has to be kept in sync with elf_arm.x */
+#define TEE_STACK_AREA_START (TEE_HOLE_START + TEE_HOLE_SIZE)
+/* Stack is not physically contigious. */
+#define TEE_STACK_AREA_START0 TEE_STACK_AREA_START
+#define TEE_STACK_AREA_SIZE0 0x3000
+#define TEE_STACK_AREA_SIZE TEE_STACK_AREA_SIZE0
+
+/* Has to be kept in sync with elf_arm.x */
+#define TEE_CODE_START (TEE_STACK_AREA_START + TEE_STACK_AREA_SIZE)
+#define TEE_CODE_SIZE 0xA000
+
+#define TEE_HEAP_START (TEE_CODE_START + TEE_CODE_SIZE)
+/*
+ * This address has to be 16kb aligned as the first few bytes are
+ * used to hold the L1 mmu descriptor for user mode mapping.
+ */
+#define TEE_HEAP_START0 TEE_HEAP_START
+
+
+/*
+ * Register addresses related to time
+ * RTT = Real-Time Timer
+ * RTT0 = Real-Time Timer 0
+ * RTT1 = Real-Time Timer 1
+ */
+#define RTT_CR_EN 0x2
+#define RTT_CR_ENS 0x4
+#define RTT_IMSC_IMSC 0x1
+#define RTT_MIS_MIS 0x1
+
+/* RTT0 definition */
+#define RTT0_REG_START_ADDR 0x80152000
+#define RTT0_CTCR (RTT0_REG_START_ADDR)
+#define RTT0_IMSC (RTT0_REG_START_ADDR + 0x04)
+#define RTT0_RIS (RTT0_REG_START_ADDR + 0x08)
+#define RTT0_MIS (RTT0_REG_START_ADDR + 0x0C)
+#define RTT0_ICR (RTT0_REG_START_ADDR + 0x10)
+#define RTT0_DR (RTT0_REG_START_ADDR + 0x14)
+#define RTT0_LR (RTT0_REG_START_ADDR + 0x18)
+#define RTT0_CR (RTT0_REG_START_ADDR + 0x1c)
+
+/* RTT1 definition */
+#define RTT1_REG_START_ADDR 0x80153000
+#define RTT1_CTCR (RTT1_REG_START_ADDR)
+#define RTT1_IMSC (RTT1_REG_START_ADDR + 0x04)
+#define RTT1_RIS (RTT1_REG_START_ADDR + 0x08)
+#define RTT1_MIS (RTT1_REG_START_ADDR + 0x0C)
+#define RTT1_ICR (RTT1_REG_START_ADDR + 0x10)
+#define RTT1_DR (RTT1_REG_START_ADDR + 0x14)
+#define RTT1_LR (RTT1_REG_START_ADDR + 0x18)
+#define RTT1_CR (RTT1_REG_START_ADDR + 0x1c)
+
+#endif
diff --git a/core/arch/arm32/include/mm/tee_mmu_defs.h b/core/arch/arm32/include/mm/tee_mmu_defs.h
new file mode 100644
index 00000000000..82241d26c04
--- /dev/null
+++ b/core/arch/arm32/include/mm/tee_mmu_defs.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_MMU_DEFS_H
+#define TEE_MMU_DEFS_H
+
+/* Number of sections in ttbr0 */
+#define TEE_MMU_UL1_NUM_ENTRIES 32
+#define TEE_MMU_UL1_NUM_USER_ENTRIES (TEE_MMU_UL1_NUM_ENTRIES / 2)
+#define TEE_MMU_UL1_NUM_KERN_ENTRIES (TEE_MMU_UL1_NUM_ENTRIES - \
+ TEE_MMU_UL1_NUM_USER_ENTRIES)
+
+#define TEE_MMU_UL1_SIZE (TEE_MMU_UL1_NUM_ENTRIES * sizeof(uint32_t))
+#define TEE_MMU_UL1_USER_SIZE (TEE_MMU_UL1_NUM_USER_ENTRIES * sizeof(uint32_t))
+#define TEE_MMU_UL1_KERN_SIZE (TEE_MMU_UL1_NUM_KERN_ENTRIES * sizeof(uint32_t))
+
+#define TEE_MMU_UL1_KERN_BASE (TEE_MMU_UL1_BASE + TEE_MMU_UL1_USER_SIZE)
+
+/* TTB attributes */
+
+/* Mask for all attributes */
+/* #define TEE_MMU_TTB_ATTR_MASK ((1 << 7) - 1) */
+
+/* Sharable */
+#define TEE_MMU_TTB_S (1 << 1)
+
+/* Not Outer Sharable */
+#define TEE_MMU_TTB_NOS (1 << 5)
+
+/* Normal memory, Inner Non-cacheable */
+#define TEE_MMU_TTB_IRGN_NC 0
+
+/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
+
+/* Normal memory, Inner Write-Through Cacheable */
+#define TEE_MMU_TTB_IRGN_WT 1
+
+/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
+
+/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_RNG_WBWA (1 << 3)
+
+#define TEE_MMU_TTBRX_TTBX_MASK (((1 << 18) - 1) << 14)
+#define TEE_MMU_TTBRX_ATTR_MASK ((1 << 14) - 1)
+
+/*
+ * Second-level descriptor Small page table Attributes
+ */
+
+/* Small page */
+#define TEE_MMU_L2SP_SMALL_PAGE (1 << 1)
+
+/* Execute never */
+#define TEE_MMU_L2SP_XN 1
+
+/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_L2SP_WBWA ((1 << 6) | (1 << 3) | (1 << 2))
+
+/* Not global */
+#define TEE_MMU_L2SP_NG (1 << 11)
+
+/* Sharable */
+#define TEE_MMU_L2SP_S (1 << 10)
+
+/* Privileged access only */
+#define TEE_MMU_L2SP_PRIV_ACC (1 << 4)
+
+/* Clear access from attribute */
+#define TEE_MMU_L2SP_CLEAR_ACC(attr) ((attr) & ~((1 << 5) | (1 << 4)))
+
+#endif /* TEE_MMU_DEFS_H */
diff --git a/core/arch/arm32/include/mm/tee_pager_defines.h b/core/arch/arm32/include/mm/tee_pager_defines.h
new file mode 100644
index 00000000000..30cd3181f58
--- /dev/null
+++ b/core/arch/arm32/include/mm/tee_pager_defines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_PAGER_DEFINES_H
+#define TEE_PAGER_DEFINES_H
+
+#define TEE_PAGER_NORMAL_RETURN 0
+#define TEE_PAGER_USER_TA_PANIC 1
+
+#define TEE_PAGER_SPSR_MODE_MASK 0x1F
+#define TEE_PAGER_SPSR_MODE_USR 0x10
+#define TEE_PAGER_SPSR_MODE_SVC 0x13
+#define TEE_PAGER_SPSR_MODE_ABT 0x17
+#define TEE_PAGER_SPSR_MODE_MON 0x16
+
+#define TEE_PAGER_DATA_ABORT 0x00000000
+#define TEE_PAGER_PREF_ABORT 0x00000001
+#define TEE_PAGER_UNDEF_ABORT 0x00000002
+
+#endif /* TEE_PAGER_DEFINES_H */
diff --git a/core/arch/arm32/include/mm/tee_pager_unpg.h b/core/arch/arm32/include/mm/tee_pager_unpg.h
new file mode 100644
index 00000000000..b91b684509b
--- /dev/null
+++ b/core/arch/arm32/include/mm/tee_pager_unpg.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This h-file holds shared internal defintions for tee_pager*.[cs]
+ * and should not be included in other files.
+ */
+
+#ifndef TEE_PAGER_UNPG_H
+#define TEE_PAGER_UNPG_H
+
+#include
+#include
+#include
+#include
+
+/* Interesting aborts for TEE pager */
+#define TEE_FSR_FS_MASK 0x040F
+#define TEE_FSR_FS_ALIGNMENT_FAULT 0x0001 /* DFSR[10,3:0] 0b00001 */
+#define TEE_FSR_FS_DEBUG_EVENT 0x0002 /* DFSR[10,3:0] 0b00010 */
+#define TEE_FSR_FS_ASYNC_EXTERNAL_ABORT 0x0406 /* DFSR[10,3:0] 0b10110 */
+#define TEE_FSR_FS_PERMISSION_FAULT_SECTION 0x000D /* DFSR[10,3:0] 0b01101 */
+#define TEE_FSR_FS_PERMISSION_FAULT_PAGE 0x000F /* DFSR[10,3:0] 0b01111 */
+
+/*
+ * Represents a physical page used for paging.
+ *
+ * mmu_entry points to currently used MMU entry. This actual physical
+ * address is stored here so even if the page isn't mapped, there's allways
+ * an MMU entry holding the physical address.
+ *
+ * session_handle is a pointer returned by tee_ta_load_page() and later
+ * used when saving rw-data.
+ */
+struct tee_pager_pmem {
+ uint32_t *mmu_entry;
+ void *ctx_handle;
+ TAILQ_ENTRY(tee_pager_pmem) link;
+};
+
+TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
+
+/* Head of registered physical pages */
+extern struct tee_pager_pmem_head tee_pager_pmem_head;
+/* Number of registered physical pages, used hiding pages. */
+extern uint8_t tee_pager_npages;
+
+void tee_pager_abort_handler(uint32_t abort_type,
+ struct thread_abort_regs *regs);
+
+/* Returns the IFAR register */
+uint32_t TEE_PAGER_GET_IFAR_asm(void);
+/* Returns the DFAR register */
+uint32_t TEE_PAGER_GET_DFAR_asm(void);
+/* Returns the DFSR register */
+uint32_t TEE_PAGER_GET_DFSR_asm(void);
+uint32_t TEE_PAGER_GET_CPUID_asm(void);
+/* Returns the IFSR register */
+uint32_t TEE_PAGER_GET_IFSR_asm(void);
+/* Returns the SPSR register */
+uint32_t tee_pager_get_spsr(void);
+
+/* Called for each core to setup core for tee pager. */
+extern void TEE_PAGER_INIT_asm(void);
+
+/* Get VA from L2 MMU entry address */
+#define TEE_PAGER_GET_VA(a) \
+ (((((uint32_t)a) - SEC_VIRT_MMU_L2_BASE) << \
+ (SMALL_PAGE_SHIFT - 2)) + TEE_VMEM_START)
+
+/* Get L2 MMU entry address from virtual address */
+static inline uint32_t *tee_pager_get_mmu_entry(tee_vaddr_t va)
+{
+ tee_vaddr_t addr = va & ~SMALL_PAGE_MASK;
+ size_t mmu_entry_offset = (addr - TEE_VMEM_START) >> SMALL_PAGE_SHIFT;
+
+ return (uint32_t *)(TEE_VIRT_MMU_L2_BASE +
+ mmu_entry_offset * sizeof(uint32_t));
+}
+
+/* Returns true if the exception originated from user mode */
+bool tee_pager_is_user_exception(void);
+/* Returns true if the exception originated from abort mode */
+bool tee_pager_is_abort_in_abort_handler(void);
+
+void tee_pager_restore_irq(void);
+
+#endif /* TEE_PAGER_UNPG_H */
diff --git a/core/arch/arm32/include/sm/sm.h b/core/arch/arm32/include/sm/sm.h
new file mode 100644
index 00000000000..a0327d83058
--- /dev/null
+++ b/core/arch/arm32/include/sm/sm.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SM_SM_H
+#define SM_SM_H
+
+#include
+
+struct sm_nsec_ctx {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t irq_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t abt_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t und_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+};
+
+struct sm_sec_ctx {
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+ uint32_t irq_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+ uint32_t abt_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t und_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+ uint32_t mon_lr;
+ uint32_t mon_spsr;
+};
+
+/* Returns storage location of non-secure context for current CPU */
+struct sm_nsec_ctx *sm_get_nsec_ctx(void);
+
+/* Returns storage location of secure context for current CPU */
+struct sm_sec_ctx *sm_get_sec_ctx(void);
+
+/* Returns stack pointer to use in monitor mode for current CPU */
+void *sm_get_sp(void);
+
+
+/*
+ * Initializes secure monitor, must be called by each CPU
+ */
+void sm_init(vaddr_t stack_pointer);
+
+#endif /*SM_SM_H*/
diff --git a/core/arch/arm32/include/sm/sm_defs.h b/core/arch/arm32/include/sm/sm_defs.h
new file mode 100644
index 00000000000..eae324300e3
--- /dev/null
+++ b/core/arch/arm32/include/sm/sm_defs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SM_SM_DEFS_H
+#define SM_SM_DEFS_H
+
+/*
+ * sm_smc_entry uses 6 * 4 bytes
+ * sm_fiq_entry uses 6 * 4 bytes
+ *
+ * Add some spare space since a couple of C functions are called using this
+ * stack. These functions can sometimes use more stack depending on
+ * compiler options.
+ */
+#define SM_STACK_SIZE (12 * 4)
+
+#endif /*SM_SM_DEFS_H*/
diff --git a/core/arch/arm32/include/sm/tee_mon.h b/core/arch/arm32/include/sm/tee_mon.h
new file mode 100644
index 00000000000..9e546d64f9a
--- /dev/null
+++ b/core/arch/arm32/include/sm/tee_mon.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_MON_H
+#define TEE_MON_H
+
+#include
+#include
+#include
+#include "tee_api_types.h"
+#include "user_ta_header.h"
+
+extern ta_static_head_t __start_ta_head_section;
+extern ta_static_head_t __stop_ta_head_section;
+extern TEE_Result init_teecore(void);
+
+/* teecore heap address/size is defined in scatter file */
+extern unsigned char teecore_heap_start;
+extern unsigned char teecore_heap_end;
+
+#endif /* TEE_MON_H */
diff --git a/core/arch/arm32/include/sm/teesmc.h b/core/arch/arm32/include/sm/teesmc.h
new file mode 100644
index 00000000000..11ba4f94ae9
--- /dev/null
+++ b/core/arch/arm32/include/sm/teesmc.h
@@ -0,0 +1,716 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEESMC_H
+#define TEESMC_H
+
+#ifndef ASM
+/*
+ * This section depends on uint64_t, uint32_t uint8_t already being
+ * defined. Since this file is used in several different environments
+ * (secure world OS and normal world Linux kernel to start with) where
+ * stdint.h may not be available it's the responsibility of the one
+ * including this file to provide those types.
+ */
+
+/*
+ * Trusted OS SMC interface.
+ *
+ * The SMC interface follows SMC Calling Convention
+ * (ARM_DEN0028A_SMC_Calling_Convention).
+ *
+ * The primary objective of this API is to provide a transport layer on
+ * which a Global Platform compliant TEE interfaces can be deployed. But the
+ * interface can also be used for other implementations.
+ *
+ * This file is divided in two parts.
+ * Part 1 deals with passing parameters to Trusted Applications running in
+ * a trusted OS in secure world.
+ * Part 2 deals with the lower level handling of the SMC.
+ */
+
+/*
+ *******************************************************************************
+ * Part 1 - passing parameters to Trusted Applications
+ *******************************************************************************
+ */
+
+/*
+ * Same values as TEE_PARAM_* from TEE Internal API
+ */
+#define TEESMC_ATTR_TYPE_NONE 0
+#define TEESMC_ATTR_TYPE_VALUE_INPUT 1
+#define TEESMC_ATTR_TYPE_VALUE_OUTPUT 2
+#define TEESMC_ATTR_TYPE_VALUE_INOUT 3
+#define TEESMC_ATTR_TYPE_MEMREF_INPUT 5
+#define TEESMC_ATTR_TYPE_MEMREF_OUTPUT 6
+#define TEESMC_ATTR_TYPE_MEMREF_INOUT 7
+
+#define TEESMC_ATTR_TYPE_MASK 0x7
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * One example of this is a struct teesmc_meta_open_session which
+ * is added to TEESMC{32,64}_CMD_OPEN_SESSION.
+ */
+#define TEESMC_ATTR_META 0x8
+
+/*
+ * Used as an indication from normal world of compatible cache usage.
+ * 'I' stands for inner cache and 'O' for outer cache.
+ */
+#define TEESMC_ATTR_CACHE_I_NONCACHE 0x0
+#define TEESMC_ATTR_CACHE_I_WRITE_THR 0x1
+#define TEESMC_ATTR_CACHE_I_WRITE_BACK 0x2
+#define TEESMC_ATTR_CACHE_O_NONCACHE 0x0
+#define TEESMC_ATTR_CACHE_O_WRITE_THR 0x4
+#define TEESMC_ATTR_CACHE_O_WRITE_BACK 0x8
+
+#define TEESMC_ATTR_CACHE_DEFAULT (TEESMC_ATTR_CACHE_I_WRITE_BACK | \
+ TEESMC_ATTR_CACHE_O_WRITE_BACK)
+
+#define TEESMC_ATTR_CACHE_SHIFT 4
+#define TEESMC_ATTR_CACHE_MASK 0xf
+
+#define TEESMC_CMD_OPEN_SESSION 0
+#define TEESMC_CMD_INVOKE_COMMAND 1
+#define TEESMC_CMD_CLOSE_SESSION 2
+#define TEESMC_CMD_CANCEL 3
+
+/**
+ * struct teesmc32_param_memref - memory reference
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ *
+ * Secure and normal world communicates pointer via physical address instead of
+ * the virtual address with is usually used for pointers. This is because
+ * Secure and normal world has completely independant memory mapping. Normal
+ * world can even have a hypervisor which need to translate the guest
+ * physical address (AKA IPA in ARM lingo) to a real physical address
+ * before passing the structure to secure world.
+ */
+struct teesmc32_param_memref {
+ uint32_t buf_ptr;
+ uint32_t size;
+};
+
+/**
+ * struct teesmc32_param_memref - memory reference
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ *
+ * See description of struct teesmc32_param_memref.
+ */
+struct teesmc64_param_memref {
+ uint64_t buf_ptr;
+ uint64_t size;
+};
+
+/**
+ * struct teesmc32_param_value - values
+ * @a: first value
+ * @b: second value
+ */
+struct teesmc32_param_value {
+ uint32_t a;
+ uint32_t b;
+};
+
+/**
+ * struct teesmc64_param_value - values
+ * @a: first value
+ * @b: second value
+ */
+struct teesmc64_param_value {
+ uint64_t a;
+ uint64_t b;
+};
+
+/**
+ * struct teesmc32_param - parameter
+ * @attr: attributes
+ * @memref: a memory reference
+ * @value: a value
+ *
+ * attr & TEESMC_ATTR_TYPE_MASK indicates if memref or value is used in the
+ * union. TEESMC_ATTR_TYPE_VALUE_* indicates value and
+ * TEESMC_ATTR_TYPE_MEMREF_* indicates memref. TEESMC_ATTR_TYPE_NONE
+ * indicates that none of the members are used.
+ */
+struct teesmc32_param {
+ uint32_t attr;
+ union {
+ struct teesmc32_param_memref memref;
+ struct teesmc32_param_value value;
+ } u;
+};
+
+/**
+ * struct teesmc64_param - parameter
+ * @attr: attributes
+ * @memref: a memory reference
+ * @value: a value
+ *
+ * See description of union teesmc32_param.
+ */
+struct teesmc64_param {
+ uint64_t attr;
+ union {
+ struct teesmc64_param_memref memref;
+ struct teesmc64_param_value value;
+ } u;
+};
+
+/**
+ * struct teesmc32_arg - SMC argument for Trusted OS
+ * @cmd: Command, one of TEESMC_CMD_*
+ * @ta_func: Trusted Application function, specific to the Trusted Application,
+ * used if cmd == TEESMC_CMD_INVOKE_COMMAND
+ * @session: In parameter for all TEESMC_CMD_* except
+ * TEESMC_CMD_OPEN_SESSION where it's an output paramter instead
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal SMC calls to Trusted OS uses this struct. If cmd requires
+ * further information than what these field holds it can be passed as a
+ * parameter tagged as meta (setting the TEESMC_ATTR_META bit in
+ * corresponding param_attrs). This is used for TEESMC_CMD_OPEN_SESSION
+ * to pass a struct teesmc32_meta_open_session which is needed find the
+ * Trusted Application and to indicate the credentials of the client.
+ */
+struct teesmc32_arg {
+ uint32_t cmd;
+ uint32_t ta_func;
+ uint32_t session;
+ uint32_t ret;
+ uint32_t ret_origin;
+ uint32_t num_params;
+#if 0
+ /*
+ * Commented out elements used to visualize the layout dynamic part
+ * of the struct. Note that these fields are not available at all
+ * if num_params == 0.
+ *
+ * params is accessed through the macro TEESMC32_GET_PARAMS
+ */
+
+ struct teesmc32_param params[num_params];
+#endif
+};
+
+/**
+ * TEESMC32_GET_PARAMS - return pointer to union teesmc32_param *
+ *
+ * @x: Pointer to a struct teesmc32_arg
+ *
+ * Returns a pointer to the params[] inside a struct teesmc32_arg.
+ */
+#define TEESMC32_GET_PARAMS(x) \
+ (struct teesmc32_param *)(((struct teesmc32_arg *)(x)) + 1)
+
+/**
+ * TEESMC32_GET_ARG_SIZE - return size of struct teesmc32_arg
+ *
+ * @num_params: Number of parameters embedded in the struct teesmc32_arg
+ *
+ * Returns the size of the struct teesmc32_arg together with the number
+ * of embedded paramters.
+ */
+#define TEESMC32_GET_ARG_SIZE(num_params) \
+ (sizeof(struct teesmc32_arg) + \
+ sizeof(struct teesmc32_param) * (num_params))
+
+/**
+ * struct teesmc64_arg - SMC argument for Trusted OS
+ * @cmd: OS Command, one of TEESMC_CMD_*
+ * @ta_func: Trusted Application function, specific to the Trusted Application
+ * @session: In parameter for all TEESMC_CMD_* but
+ * TEESMC_CMD_OPEN_SESSION
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * See description of struct teesmc32_arg.
+ */
+struct teesmc64_arg {
+ uint64_t cmd;
+ uint64_t ta_func;
+ uint64_t session;
+ uint64_t ret;
+ uint64_t ret_origin;
+ uint64_t num_params;
+#if 0
+ /*
+ * Commented out elements used to visualize the layout dynamic part
+ * of the struct. Note that these fields are not available at all
+ * if num_params == 0.
+ *
+ * params is accessed through the macro TEESMC64_GET_PARAMS
+ */
+
+ union teesmc64_param params[num_params];
+#endif
+};
+
+/**
+ * TEESMC64_GET_PARAMS - return pointer to union teesmc64_param *
+ *
+ * @x: Pointer to a struct teesmc64_arg
+ *
+ * Returns a pointer to the params[] inside a struct teesmc64_arg.
+ */
+#define TEESMC64_GET_PARAMS(x) \
+ (struct teesmc64_param *)(((struct teesmc64_arg *)(x)) + 1)
+
+/**
+ * TEESMC64_GET_ARG_SIZE - return size of struct teesmc64_arg
+ *
+ * @num_params: Number of parameters embedded in the struct teesmc64_arg
+ *
+ * Returns the size of the struct teesmc64_arg together with the number
+ * of embedded paramters.
+ */
+#define TEESMC64_GET_ARG_SIZE(num_params) \
+ (sizeof(struct teesmc64_arg) + \
+ sizeof(union teesmc64_param) * (num_params))
+
+#define TEESMC_UUID_LEN 16
+
+/**
+ * struct teesmc_meta_open_session - additional parameters for
+ * TEESMC32_CMD_OPEN_SESSION and
+ * TEESMC64_CMD_OPEN_SESSION
+ * @uuid: UUID of the Trusted Application
+ * @clnt_uuid: UUID of client
+ * @clnt_login: Login class of client, TEE_LOGIN_* if being Global Platform
+ * compliant
+ *
+ * This struct is passed in the first parameter as an input memref tagged
+ * as meta on an TEESMC{32,64}_CMD_OPEN_SESSION cmd. It's important
+ * that it really is the first parameter to make it easy for an eventual
+ * hypervisor to inspect and possibly update clnt_* values.
+ */
+struct teesmc_meta_open_session {
+ uint8_t uuid[TEESMC_UUID_LEN];
+ uint8_t clnt_uuid[TEESMC_UUID_LEN];
+ uint32_t clnt_login;
+};
+
+
+#endif /*!ASM*/
+
+/*
+ *******************************************************************************
+ * Part 2 - low level SMC interaction
+ *******************************************************************************
+ */
+
+#define TEESMC_32 0
+#define TEESMC_64 0x40000000
+#define TEESMC_FAST_CALL 0x80000000
+#define TEESMC_STD_CALL 0
+
+#define TEESMC_OWNER_MASK 0x3F
+#define TEESMC_OWNER_SHIFT 24
+
+#define TEESMC_FUNC_MASK 0xFFFF
+
+#define TEESMC_IS_FAST_CALL(smc_val) ((smc_val) & TEESMC_FAST_CALL)
+#define TEESMC_IS_64(smc_val) ((smc_val) & TEESMC_64)
+#define TEESMC_FUNC_NUM(smc_val) ((smc_val) & TEESMC_FUNC_MASK)
+#define TEESMC_OWNER_NUM(smc_val) (((smc_val) >> TEESMC_OWNER_SHIFT) & \
+ TEESMC_OWNER_MASK)
+
+#define TEESMC_CALL_VAL(type, calling_convention, owner, func_num) \
+ ((type) | (calling_convention) | \
+ (((owner) & TEESMC_OWNER_MASK) << TEESMC_OWNER_SHIFT) |\
+ ((func_num) & TEESMC_FUNC_MASK))
+
+#define TEESMC_OWNER_ARCH 0
+#define TEESMC_OWNER_CPU 1
+#define TEESMC_OWNER_SIP 2
+#define TEESMC_OWNER_OEM 3
+#define TEESMC_OWNER_STANDARD 4
+#define TEESMC_OWNER_TRUSTED_APP 48
+#define TEESMC_OWNER_TRUSTED_OS 50
+
+#define TEESMC_OWNER_TRUSTED_OS_API 63
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define TEESMC32_FUNCID_CALLS_COUNT 0xFF00
+#define TEESMC32_CALLS_COUNT \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+ TEESMC_OWNER_TRUSTED_OS_API, \
+ TEESMC32_FUNCID_CALLS_COUNT)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return one of the following UIDs if using API specified in this file
+ * without further extentions:
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 : Only 32 bit calls are supported
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f9 : Both 32 and 64 bit calls are supported
+ */
+#define TEESMC_UID_R0 0x65cb6b93
+#define TEESMC_UID_R1 0xaf0c4617
+#define TEESMC_UID_R2 0x8ed6644a
+#define TEESMC_UID32_R3 0x8d1140f8
+#define TEESMC_UID64_R3 0x8d1140f9
+#define TEESMC32_FUNCID_CALLS_UID 0xFF01
+#define TEESMC32_CALLS_UID \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+ TEESMC_OWNER_TRUSTED_OS_API, \
+ TEESMC32_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 1.0 if using API specified in this file without further extentions.
+ */
+#define TEESMC_REVISION_MAJOR 1
+#define TEESMC_REVISION_MINOR 0
+#define TEESMC32_FUNCID_CALLS_REVISION 0xFF03
+#define TEESMC32_CALLS_REVISION \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, \
+ TEESMC_OWNER_TRUSTED_OS_API, \
+ TEESMC32_FUNCID_CALLS_REVISION)
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in r0-4/w0-4 in the same way as TEESMC32_CALLS_UID
+ * described above.
+ */
+#define TEESMC_FUNCID_GET_OS_UUID 0
+#define TEESMC32_CALL_GET_OS_UUID \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in r0-1/w0-1 in the same way as TEESMC32_CALLS_REVISION
+ * described above.
+ */
+#define TEESMC_FUNCID_GET_OS_REVISION 1
+#define TEESMC32_CALL_GET_OS_REVISION \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_GET_OS_REVISION)
+
+
+
+/*
+ * Call with struct teesmc32_arg as argument
+ *
+ * Call register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_WITH_ARG
+ * r1/x1 Physical pointer to a struct teesmc32_arg
+ * r2-6/x2-6 Not used
+ * r7/x7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * r0/x0 Return value, TEESMC_RETURN_*
+ * r1-3/x1-3 Not used
+ * r4-7/x4-7 Preserved
+ *
+ * Ebusy return register usage:
+ * r0/x0 Return value, TEESMC_RETURN_EBUSY
+ * r1-3/x1-3 Preserved
+ * r4-7/x4-7 Preserved
+ *
+ * RPC return register usage:
+ * r0/x0 Return value, TEESMC_RETURN_IS_RPC(val)
+ * r1-2/x1-2 RPC parameters
+ * r3-7/x3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * TEESMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * TEESMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * teesmc32_arg.
+ * TEESMC_RETURN_EBUSY Trusted OS busy, try again later.
+ * TEESMC_RETURN_EBADADDR Bad physcial pointer to struct
+ * teesmc32_arg.
+ * TEESMC_RETURN_EBADCMD Bad/unknown cmd in struct teesmc32_arg
+ * TEESMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define TEESMC_FUNCID_CALL_WITH_ARG 2
+#define TEESMC32_CALL_WITH_ARG \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_CALL_WITH_ARG)
+/* Same as TEESMC32_CALL_WITH_ARG but a "fast call". */
+#define TEESMC32_FASTCALL_WITH_ARG \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Call with struct teesmc64_arg as argument
+ *
+ * See description of TEESMC32_CALL_WITH_ARG above, uses struct
+ * teesmc64_arg in x1 instead.
+ */
+#define TEESMC64_CALL_WITH_ARG \
+ TEESMC_CALL_VAL(TEESMC_64, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_CALL_WITH_ARG)
+/* Same as TEESMC64_CALL_WITH_ARG but a "fast call". */
+#define TEESMC64_FASTCALL_WITH_ARG \
+ TEESMC_CALL_VAL(TEESMC_64, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * r0/x0 SMC Function ID,
+ * TEESMC32_CALL_RETURN_FROM_RPC or
+ * TEESMC32_FASTCALL_RETURN_FROM_RPC
+ * r1-3/x1-3 Value of r1-3/x1-3 when TEESMC32_CALL_WITH_ARG returned
+ * TEESMC_RETURN_RPC in r0/x0
+ *
+ * Return register usage is the same as for TEESMC32_CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * TEESMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * TEESMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct teesmc32_arg
+ * TEESMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * TEESMC_RETURN_EBUSY Trusted OS busy, try again later.
+ * TEESMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define TEESMC_FUNCID_RETURN_FROM_RPC 3
+#define TEESMC32_CALL_RETURN_FROM_RPC \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_RETURN_FROM_RPC)
+/* Same as TEESMC32_CALL_RETURN_FROM_RPC but a "fast call". */
+#define TEESMC32_FASTCALL_RETURN_FROM_RPC \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_RETURN_FROM_RPC)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * See description of TEESMC32_CALL_RETURN_FROM_RPC above, used when
+ * it's a 64bit call that has returned.
+ */
+#define TEESMC64_CALL_RETURN_FROM_RPC \
+ TEESMC_CALL_VAL(TEESMC_64, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_RETURN_FROM_RPC)
+/* Same as TEESMC64_CALL_RETURN_FROM_RPC but a "fast call". */
+#define TEESMC64_FASTCALL_RETURN_FROM_RPC \
+ TEESMC_CALL_VAL(TEESMC_64, TEESMC_STD_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_RETURN_FROM_RPC)
+
+/*
+ * From secure monitor to Trusted OS, handle FIQ
+ *
+ * A virtual call which is injected by the Secure Monitor when an FIQ is
+ * raised while in normal world (SCR_NS is set). The monitor restores
+ * secure architecture registers and secure EL_SP1 and jumps to previous
+ * secure EL3_ELR. Trusted OS should preserve all general purpose
+ * registers.
+ *
+ * Call register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_HANDLE_FIQ
+ * r1-7/x1-7 Not used, but must be preserved
+ *
+ * Return register usage:
+ * Note used
+ */
+#define TEESMC_FUNCID_CALL_HANDLE_FIQ 0xf000
+#define TEESMC32_CALL_HANDLE_FIQ \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_FUNCID_CALL_HANDLE_FIQ)
+
+#define TEESMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define TEESMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define TEESMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define TEESMC_RETURN_GET_RPC_FUNC(ret) ((ret) & TEESMC_RETURN_RPC_FUNC_MASK)
+
+#define TEESMC_RPC_VAL(func) ((func) | TEESMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate argument memory for RPC parameter passing.
+ * Argument memory is used to hold a struct teesmc32_arg.
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_RPC_ALLOC
+ * r1/x1 Size in bytes of required argument memory
+ * r2-7/x2-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1/x1 Physical pointer to allocated argument memory, 0 if size
+ * was 0 or if memory can't be allocated
+ * r2-7/x2-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_ALLOC_ARG 0
+#define TEESMC_RETURN_RPC_ALLOC_ARG \
+ TEESMC_RPC_VAL(TEESMC_RPC_FUNC_ALLOC_ARG)
+
+/*
+ * Allocate payload memory for RPC parameter passing.
+ * Payload memory is used to hold the memory referred to by struct
+ * teesmc32_param_memref.
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_RPC_ALLOC
+ * r1/x1 Size in bytes of required payload memory
+ * r2-7/x2-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1/x1 Physical pointer to allocated payload memory, 0 if size
+ * was 0 or if memory can't be allocated
+ * r2-7/x2-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_ALLOC_PAYLOAD 1
+#define TEESMC_RETURN_RPC_ALLOC_PAYLOAD \
+ TEESMC_RPC_VAL(TEESMC_RPC_FUNC_ALLOC_PAYLOAD)
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_RPC_ALLOC_ARG.
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_RPC_FREE
+ * r1/x1 Physical pointer to previously allocated argument memory
+ * r2-7/x2-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1/x1 Not used
+ * r2-7/x2-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_FREE_ARG 2
+#define TEESMC_RETURN_RPC_FREE_ARG TEESMC_RPC_VAL(TEESMC_RPC_FUNC_FREE_ARG)
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_RPC_ALLOC_PAYLOAD.
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_RPC_FREE
+ * r1/x1 Physical pointer to previously allocated payload memory
+ * r3-7/x3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1-2/x1-2 Not used
+ * r3-7/x3-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_FREE_PAYLOAD 3
+#define TEESMC_RETURN_RPC_FREE_PAYLOAD \
+ TEESMC_RPC_VAL(TEESMC_RPC_FUNC_FREE_PAYLOAD)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * r0/x0 TEESMC_RETURN_RPC_IRQ
+ * r1-7/x1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1-7/x1-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_IRQ 4
+#define TEESMC_RETURN_RPC_IRQ TEESMC_RPC_VAL(TEESMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct teesmc{32,64}_arg tells which
+ * request to do and the paramters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * r0/x0 TEESMC_RETURN_RPC_CMD
+ * r1/x1 Physical pointer to a struct teesmc32_arg if returning from
+ * a AArch32 SMC or a struct teesmc64_arg if returning from a
+ * AArch64 SMC, must be preserved, only the data should
+ * be updated
+ * r2-7/x2-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1-7/x1-7 Preserved
+ */
+#define TEESMC_RPC_FUNC_CMD 5
+#define TEESMC_RETURN_RPC_CMD TEESMC_RPC_VAL(TEESMC_RPC_FUNC_CMD)
+
+
+/* Returned in r0 */
+#define TEESMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in r0 only from Trusted OS functions */
+#define TEESMC_RETURN_OK 0x0
+#define TEESMC_RETURN_EBUSY 0x1
+#define TEESMC_RETURN_ERESUME 0x2
+#define TEESMC_RETURN_EBADADDR 0x3
+#define TEESMC_RETURN_EBADCMD 0x4
+#define TEESMC_RETURN_IS_RPC(ret) \
+ (((ret) & TEESMC_RETURN_RPC_PREFIX_MASK) == TEESMC_RETURN_RPC_PREFIX)
+
+/*
+ * Returned in r1 by Trusted OS functions if r0 = TEESMC_RETURN_RPC
+ */
+#define TEESMC_RPC_REQUEST_IRQ 0x0
+
+#endif /* TEESMC_H */
diff --git a/core/arch/arm32/include/sm/teesmc_st.h b/core/arch/arm32/include/sm/teesmc_st.h
new file mode 100644
index 00000000000..16acb3e9a1b
--- /dev/null
+++ b/core/arch/arm32/include/sm/teesmc_st.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEESMC_ST_H
+#define TEESMC_ST_H
+
+#define TEESMC_ST_RETURN_NOTAVAIL 0x5700
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * r0 SMC Function ID, TEESMC32_ST_FASTCALL_GET_SHM_CONFIG
+ * r1-6 Not used
+ * r7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * r0 TEESMC_RETURN_OK
+ * r1 Physical address of start of SHM
+ * r2 Size of of SHM
+ * r3 1 if SHM is cached, 0 if uncached.
+ * r4-7 Preserved
+ *
+ * Not available register usage:
+ * r0 TEESMC_ST_RETURN_NOTAVAIL
+ * r1-3 Not used
+ * r4-7 Preserved
+ */
+#define TEESMC_ST_FUNCID_GET_SHM_CONFIG 0x5700
+#define TEESMC32_ST_FASTCALL_GET_SHM_CONFIG \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_ST_FUNCID_GET_SHM_CONFIG)
+
+/*
+ * Configures L2CC mutex
+ *
+ * Disables, enables usage of L2CC mutex. Returns or sets physical address
+ * of L2CC mutex.
+ *
+ * Call register usage:
+ * r0 SMC Function ID, TEESMC32_ST_FASTCALL_L2CC_MUTEX
+ * r1 TEESMC_ST_L2CC_MUTEX_GET_ADDR Get physical address of mutex
+ * TEESMC_ST_L2CC_MUTEX_SET_ADDR Set physical address of mutex
+ * TEESMC_ST_L2CC_MUTEX_ENABLE Enable usage of mutex
+ * TEESMC_ST_L2CC_MUTEX_DISABLE Disable usage of mutex
+ * r2 if r1 == TEESMC_ST_L2CC_MUTEX_SET_ADDR, physical address of mutex
+ * r3-6 Not used
+ * r7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * r0 TEESMC_RETURN_OK
+ * r1 Preserved
+ * r2 if r1 == 0, physical address of L2CC mutex
+ * r3-7 Preserved
+ *
+ * Error return register usage:
+ * r0 TEESMC_ST_RETURN_NOTAVAIL Physical address not available
+ * TEESMC_RETURN_EBADADDR Bad supplied physical address
+ * TEESMC_RETURN_EBADCMD Unsupported value in r1
+ * r1-7 Preserved
+ */
+#define TEESMC_ST_L2CC_MUTEX_GET_ADDR 0
+#define TEESMC_ST_L2CC_MUTEX_SET_ADDR 1
+#define TEESMC_ST_L2CC_MUTEX_ENABLE 2
+#define TEESMC_ST_L2CC_MUTEX_DISABLE 3
+#define TEESMC_ST_FUNCID_L2CC_MUTEX 0x5701
+#define TEESMC32_ST_FASTCALL_L2CC_MUTEX \
+ TEESMC_CALL_VAL(TEESMC_32, TEESMC_FAST_CALL, TEESMC_OWNER_TRUSTED_OS, \
+ TEESMC_ST_FUNCID_L2CC_MUTEX)
+
+/*
+ * Allocate payload memory for RPC parameter passing.
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD
+ * r1/x1 Size in bytes of required payload memory
+ * r2/x2 Not used
+ * r3-7/x3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r1/x1 Physical pointer to allocated payload memory, 0 if size
+ * was 0 or if memory can't be allocated
+ * r2/x2 Shared memory cookie used when freeing the memory
+ * r3-7/x3-7 Preserved
+ */
+#define TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD 0x5700
+#define TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD \
+ TEESMC_RPC_VAL(TEESMC_ST_RPC_FUNC_ALLOC_PAYLOAD)
+
+
+/*
+ * Free memory previously allocated by TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD
+ *
+ * "Call" register usage:
+ * r0/x0 This value, TEESMC_RETURN_ST_RPC_FREE_PAYLOAD
+ * r1/x1 Shared memory cookie belonging to this payload memory
+ * r2-7/x2-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * r0/x0 SMC Function ID, TEESMC32_CALL_RETURN_FROM_RPC if it was an
+ * AArch32 SMC return or TEESMC64_CALL_RETURN_FROM_RPC for
+ * AArch64 SMC return
+ * r2-7/x2-7 Preserved
+ */
+#define TEESMC_ST_RPC_FUNC_FREE_PAYLOAD 0x5701
+#define TEESMC_RETURN_ST_RPC_FREE_PAYLOAD \
+ TEESMC_RPC_VAL(TEESMC_ST_RPC_FUNC_FREE_PAYLOAD)
+
+/* Overriding default UID since the interface is extended
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b
+ */
+#define TEESMC_ST_UID_R0 0x384fb3e0
+#define TEESMC_ST_UID_R1 0xe7f811e3
+#define TEESMC_ST_UID_R2 0xaf630002
+#define TEESMC_ST_UID32_R3 0xa5d5c51b
+#define TEESMC_ST_UID64_R3 0xa5d5c51c
+
+#define TEESMC_ST_REVISION_MAJOR 1
+#define TEESMC_ST_REVISION_MINOR 0
+
+/*
+ * UUID for OP-TEE
+ * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
+ */
+#define TEESMC_OS_OPTEE_UUID_R0 0x486178e0
+#define TEESMC_OS_OPTEE_UUID_R1 0xe7f811e3
+#define TEESMC_OS_OPTEE_UUID_R2 0xbc5e0002
+#define TEESMC_OS_OPTEE_UUID_R3 0xa5d5c51b
+
+#define TEESMC_OS_OPTEE_REVISION_MAJOR 1
+#define TEESMC_OS_OPTEE_REVISION_MINOR 0
+
+#endif /*TEESMC_ST_H*/
diff --git a/core/arch/arm32/include/tee/entry.h b/core/arch/arm32/include/tee/entry.h
new file mode 100644
index 00000000000..0976432166b
--- /dev/null
+++ b/core/arch/arm32/include/tee/entry.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEE_ENTRY_H
+#define TEE_ENTRY_H
+
+#include
+
+/* These functions are overridable by the specific target */
+void tee_entry_get_api_call_count(struct thread_smc_args *args);
+void tee_entry_get_api_uuid(struct thread_smc_args *args);
+void tee_entry_get_api_revision(struct thread_smc_args *args);
+void tee_entry_get_os_uuid(struct thread_smc_args *args);
+void tee_entry_get_os_revision(struct thread_smc_args *args);
+
+/*
+ * Returns the number of calls recognized by tee_entry(). Used by the
+ * specific target to calculate the total number of supported calls when
+ * overriding tee_entry_get_api_call_count().
+ */
+size_t tee_entry_generic_get_api_call_count(void);
+
+void tee_entry(struct thread_smc_args *args);
+
+#endif /*TEE_ENTRY_H*/
diff --git a/core/arch/arm32/kernel/chip_services.c b/core/arch/arm32/kernel/chip_services.c
new file mode 100644
index 00000000000..e9d753767ee
--- /dev/null
+++ b/core/arch/arm32/kernel/chip_services.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+/*
+ * enable_secure_wd - This function enables secure watchdog
+ */
+void enable_secure_wd(void)
+{
+ /*
+ * Only a stub.
+ * Real implementation is missing.
+ */
+}
diff --git a/core/arch/arm32/kernel/misc.S b/core/arch/arm32/kernel/misc.S
new file mode 100644
index 00000000000..15ff5fd3998
--- /dev/null
+++ b/core/arch/arm32/kernel/misc.S
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+FUNC get_core_pos , :
+ read_mpidr r0
+ /* Calculate CorePos = (ClusterId * 4) + CoreId */
+ and r1, r0, #MPIDR_CPU_MASK
+ and r0, r0, #MPIDR_CLUSTER_MASK
+ add r0, r1, r0, LSR #6
+ bx lr
+END_FUNC get_core_pos
diff --git a/core/arch/arm32/kernel/sub.mk b/core/arch/arm32/kernel/sub.mk
new file mode 100644
index 00000000000..7bc483efc75
--- /dev/null
+++ b/core/arch/arm32/kernel/sub.mk
@@ -0,0 +1,27 @@
+srcs-y += tee_core_trace.c
+cflags-tee_core_trace.c-y += -Wno-format
+cflags-tee_core_trace.c-y += -Wno-format-nonliteral -Wno-format-security
+
+srcs-y += tee_ta_manager.c
+cflags-tee_ta_manager.c-y += -Wno-declaration-after-statement -Wno-format
+cflags-tee_ta_manager.c-y += -Wno-unused-parameter
+cflags-tee_ta_manager.c-y += -Wno-format-nonliteral -Wno-format-security
+
+
+srcs-y += tee_sleep_services.c
+cflags-tee_sleep_services.c-y += -Wno-unused-parameter
+
+srcs-y += tee_time.c
+cflags-tee_time.c-y += -Wno-unused-parameter
+
+srcs-y += chip_services.c
+
+srcs-y += tee_misc.c
+srcs-y += tee_time_unpg.c
+srcs-y += tz_proc.S
+srcs-y += tz_ssvce.S
+srcs-y += tee_l2cc_mutex.c
+
+srcs-y += thread_asm.S
+srcs-y += thread.c
+srcs-y += misc.S
diff --git a/core/arch/arm32/kernel/tee_core_trace.c b/core/arch/arm32/kernel/tee_core_trace.c
new file mode 100644
index 00000000000..7b8bc09b60c
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_core_trace.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+#define STR_TRACE_CORE "TEE-CORE-TZ"
+#include
+
+#ifdef WITH_UART_DRV
+#include
+#include
+#else
+#include
+#endif
+
+/*****************************************************************************/
+
+/* Default trace level */
+int _trace_level = CFG_TEE_CORE_LOG_LEVEL;
+
+
+#ifdef WITH_UART_DRV
+static void output_string(const char *str)
+{
+ const char *p = str;
+
+ while (*p) {
+ uart_putc(*p, UART1_BASE);
+ p++;
+ }
+}
+
+static void output_flush(void)
+{
+ uart_flush_tx_fifo(UART1_BASE);
+}
+#else
+#define output_string(x) __asc_xmit(x)
+#define output_flush() __asc_flush()
+#endif
+
+/*****************************************************************************/
+
+void core_trace_test(void)
+{
+ INMSG("level: [%d]", _trace_level);
+ IMSG("current trace level = %d", _trace_level);
+ IMSG("Without args");
+ AMSG("[%d] and [%s]", TRACE_ALWAYS, "TRACE_ALWAYS");
+ EMSG("[%d] and [%s]", TRACE_ERROR, "TRACE_ERROR");
+ IMSG("[%d] and [%s]", TRACE_INFO, "TRACE_INFO");
+ DMSG("[%d] and [%s]", TRACE_DEBUG, "TRACE_DEBUG");
+ FMSG("[%d] and [%s]", TRACE_FLOW, "TRACE_FLOW");
+ AMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_ALWAYS");
+ AMSG_RAW(" __ end of raw trace\n");
+ DMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_DEBUG");
+ DMSG_RAW(" __ end of raw trace\n");
+ OUTMSG("");
+}
+
+/*****************************************************************************/
+
+void set_trace_level(int level)
+{
+ if (((int)level >= TRACE_MIN) && (level <= TRACE_MAX))
+ _trace_level = level;
+ else
+ AMSG("Can't set level [%d]", level);
+
+ core_trace_test();
+ AMSG_RAW("\nLevel set to [%d]\n", _trace_level);
+}
+
+int get_trace_level(void)
+{
+ return _trace_level;
+}
+
+/*****************************************************************************/
+
+static const char const *_trace_level_to_string[] = {
+ "NONE", "ALW", "ERR", "INF", "DBG", "FLW" };
+
+/* Format trace of user ta. Inline with kernel ta */
+static int format_trace(const char *function, int line, int level,
+ const char *prefix, const char *in, char *out)
+{
+ int nb_char = MAX_PRINT_SIZE;
+ const char *func;
+ int thread_id = 0;
+
+ if (function) {
+#ifdef TRACE_FUNC_LENGTH_CST
+ char func_buf[MAX_FUNC_PRINT_SIZE];
+ int flen = strlen(function);
+
+ /* Limit the function name to MAX_FUNC_PRINT_SIZE characters. */
+ strncpy(func_buf, function, flen > MAX_FUNC_PRINT_SIZE ?
+ (MAX_FUNC_PRINT_SIZE - 1) : flen);
+ if (flen < (MAX_FUNC_PRINT_SIZE - 1)) {
+ memset(func_buf + flen, 0x20,
+ (MAX_FUNC_PRINT_SIZE - flen));
+ }
+ func_buf[MAX_FUNC_PRINT_SIZE - 1] = '\0';
+ func = func_buf;
+#else
+ func = function;
+#endif
+
+ nb_char =
+ snprintf(out, MAX_PRINT_SIZE, "%s [%p] %s:%s:%d: %s\n",
+ _trace_level_to_string[level], thread_id, prefix,
+ func, line, in);
+ } else {
+ memcpy(out, in, MAX_PRINT_SIZE);
+
+ /* we need to add \n and a \0 at end of the string if not
+ * present. We also set nb_char to the string length, including
+ * appended chars. */
+#if (MAX_PRINT_SIZE <= 2)
+#error "cannot support MAX_PRINT_SIZE lesser than 3!"
+#endif
+ nb_char = 0;
+ while (*out) {
+ out++;
+ if (++nb_char == MAX_PRINT_SIZE) {
+ /* force the 2 last bytes */
+ *(out - 2) = '\n';
+ *(out - 1) = '\0';
+ return MAX_PRINT_SIZE - 1;
+ }
+ }
+ if (*(out - 1) != '\n') {
+ /* force last char to \n and append the \0 */
+ *(out) = '\n';
+ *(out + 1) = '\0';
+ nb_char++;
+ }
+ }
+ return nb_char;
+}
+
+
+int _dprintf(const char *function, int line, int level, const char *prefix,
+ const char *fmt, ...)
+{
+ char to_format[MAX_PRINT_SIZE];
+ char formatted[MAX_PRINT_SIZE];
+ va_list ap;
+ int nb;
+
+ va_start(ap, fmt);
+ (void)vsnprintf(to_format, sizeof(to_format), fmt, ap);
+ va_end(ap);
+
+ nb = format_trace(function, line, level, prefix, to_format, formatted);
+
+ /*
+ * dprint is making use of the uart.
+ * a shared mem / circular buffer based trace could be used instead
+ */
+ output_string(formatted);
+
+ return nb;
+}
+
+int _dprintf_hwsync(const char *function, int line, const char *fmt, ...)
+{
+ char to_format[MAX_PRINT_SIZE];
+ char formatted[MAX_PRINT_SIZE];
+ va_list ap;
+ int nb;
+
+ va_start(ap, fmt);
+ (void)vsnprintf(to_format, sizeof(to_format), fmt, ap);
+ va_end(ap);
+
+ nb = format_trace(function, line, TRACE_ALWAYS, "HWSYNC", to_format,
+ formatted);
+
+ /* note: no contention or synchro handle with other CPU core ! */
+ output_flush();
+ output_string(formatted);
+ output_flush();
+
+ return nb;
+}
+
+/*****************************************************************************/
diff --git a/core/arch/arm32/kernel/tee_l2cc_mutex.c b/core/arch/arm32/kernel/tee_l2cc_mutex.c
new file mode 100644
index 00000000000..6dedcf2e02c
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_l2cc_mutex.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/*
+ * l2cc_mutex_va holds teecore virtual address of TZ L2CC mutex or NULL.
+ *
+ * l2cc_mutex_pa holds TZ L2CC mutex physical address. It is relevant only
+ * if 'l2cc_mutex_va' hold a non-NULL address.
+ *
+ * l2cc_mutex_mm hold teecore mm structure used to allocate TZ L2CC mutex,
+ * if allocated. Otherwise, it is NULL.
+ */
+#define MUTEX_SZ sizeof(uint32_t)
+
+static uint32_t *l2cc_mutex_va;
+static uint32_t l2cc_mutex_pa;
+static tee_mm_entry_t *l2cc_mutex_mm;
+
+/*
+ * Allocate public RAM to get a L2CC mutex to shared with NSec.
+ * Return 0 on success.
+ */
+static int alloc_l2cc_mutex(void)
+{
+ uint32_t va;
+
+ if ((l2cc_mutex_va != NULL) || (l2cc_mutex_mm != NULL))
+ return -1;
+
+ l2cc_mutex_mm = tee_mm_alloc(&tee_mm_pub_ddr, MUTEX_SZ);
+ if (l2cc_mutex_mm == NULL)
+ return -1;
+
+ l2cc_mutex_pa = tee_mm_get_smem(l2cc_mutex_mm);
+
+ if (core_pa2va(l2cc_mutex_pa, &va))
+ return -1;
+
+ *(uint32_t *)va = 0;
+ l2cc_mutex_va = (uint32_t *)va;
+ return 0;
+}
+
+/*
+ * tee_l2cc_mutex_configure - Handle L2 mutex configuration requests from NSec
+ *
+ * Policy:
+ * - if NSec did not register a L2 mutex, default allocate it in public RAM.
+ * - if NSec disables L2 mutex, disable the current mutex and unregister it.
+ */
+TEE_Result tee_l2cc_mutex_configure(uint32_t service_id, uint32_t *mutex)
+{
+ uint32_t addr, va;
+ int ret = TEE_SUCCESS;
+
+ /*
+ * Enable L2CC: NSec allows teecore to run safe outer maintance
+ * with shared mutex.
+ * Disable L2CC: NSec will run outer maintenance with locking
+ * shared mutex. teecore cannot run outer maintenance.
+ * Set L2CC: NSec proposes a Shared Memory locaiotn for the outer
+ * maintenance shared mutex.
+ * Get L2CC: NSec requests the outer maintenance shared mutex
+ * location. If NSec has successufully registered one,
+ * return its location, otherwise, allocated one in NSec
+ * and provided NSec the physical location.
+ */
+ switch (service_id) {
+ case SERVICEID_ENABLE_L2CC_MUTEX:
+ if (l2cc_mutex_va == 0) {
+ ret = alloc_l2cc_mutex();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ core_l2cc_mutex_set(l2cc_mutex_va);
+ break;
+ case SERVICEID_DISABLE_L2CC_MUTEX:
+ if (l2cc_mutex_mm) {
+ tee_mm_free(l2cc_mutex_mm);
+ l2cc_mutex_mm = NULL;
+ }
+ l2cc_mutex_va = NULL;
+ core_l2cc_mutex_set(NULL);
+ break;
+ case SERVICEID_GET_L2CC_MUTEX:
+ if (l2cc_mutex_va == NULL) {
+ ret = alloc_l2cc_mutex();
+ if (ret)
+ return TEE_ERROR_GENERIC;
+ }
+ *mutex = l2cc_mutex_pa;
+ break;
+ case SERVICEID_SET_L2CC_MUTEX:
+ if (l2cc_mutex_va != NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+ addr = *mutex;
+ if (core_pbuf_is(CORE_MEM_NSEC_SHM, addr, MUTEX_SZ) == false)
+ return TEE_ERROR_BAD_PARAMETERS;
+ if (core_pa2va(addr, (uint32_t *)&va))
+ return TEE_ERROR_BAD_PARAMETERS;
+ l2cc_mutex_pa = addr;
+ l2cc_mutex_va = (uint32_t *)va;
+ break;
+ default:
+ return TEE_ERROR_GENERIC;
+ }
+
+ return ret;
+}
diff --git a/core/arch/arm32/kernel/tee_misc.c b/core/arch/arm32/kernel/tee_misc.c
new file mode 100644
index 00000000000..0ad2576ac5a
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_misc.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+
+#include
+#include
+
+static uint8_t tee_b2hs_add_base(uint8_t in)
+{
+ if (in > 9)
+ return in + 55;
+ else
+ return in + 48;
+}
+
+static int tee_hs2b_rem_base(uint8_t in, uint8_t *out)
+{
+ if (in < 48 || in > 70 || (in > 57 && in < 65))
+ return -1;
+
+ if (in < 58)
+ *out = in - 48;
+ else
+ *out = in - 55;
+
+ return 0;
+}
+
+uint32_t tee_b2hs(uint8_t *b, uint8_t *hs, uint32_t blen, uint32_t hslen)
+{
+ uint32_t i = 0;
+
+ if (blen * 2 + 1 > hslen)
+ return 0;
+
+ for (; i < blen; i++) {
+ hs[i * 2 + 1] = tee_b2hs_add_base(b[i] & 0xf);
+ hs[i * 2] = tee_b2hs_add_base(b[i] >> 4);
+ }
+ hs[blen * 2] = 0;
+
+ return blen * 2;
+}
+
+uint32_t tee_hs2b(uint8_t *hs, uint8_t *b, uint32_t hslen, uint32_t blen)
+{
+ uint32_t i = 0;
+ uint32_t len = TEE_HS2B_BBUF_SIZE(hslen);
+ uint8_t hi;
+ uint8_t lo;
+
+ if (len > blen)
+ return 0;
+
+ for (; i < len; i++) {
+ if (tee_hs2b_rem_base(hs[i * 2], &hi))
+ return 0;
+ if (tee_hs2b_rem_base(hs[i * 2 + 1], &lo))
+ return 0;
+ b[i] = (hi << 4) + lo;
+ }
+
+ return len;
+}
diff --git a/core/arch/arm32/kernel/tee_sleep_services.c b/core/arch/arm32/kernel/tee_sleep_services.c
new file mode 100644
index 00000000000..03cdfa5eeee
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_sleep_services.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+
+
+#ifdef DMAC_PREFOT_REG_ADDR
+static uint32_t dmac_prefot;
+#endif
+
+#if defined(DMAC_SWSEG_START_ADDR) && defined(DMAC_SWDEG_START_ADDR)
+static uint32_t dmac_swseg[DMAC_SWSREG_NUM_REGS];
+static uint32_t dmac_swdeg[DMAC_SWDREG_NUM_REGS];
+#endif
+
+TEE_Result tee_sleep_save_restore_vape(bool save)
+{
+#ifdef DMAC_PREFOT_REG_ADDR
+ if (save)
+ dmac_prefot = IO(DMAC_PREFOT_REG_ADDR);
+ else
+ IO(DMAC_PREFOT_REG_ADDR) = dmac_prefot;
+#endif
+
+#if defined(DMAC_SWSEG_START_ADDR) && defined(DMAC_SWDEG_START_ADDR)
+ {
+ uint32_t i;
+
+ if (save) {
+ for (i = 0; i < DMAC_SWSREG_NUM_REGS; i++)
+ dmac_swseg[i] =
+ IO(DMAC_SWSEG_START_ADDR + i * 4);
+ for (i = 0; i < DMAC_SWDREG_NUM_REGS; i++)
+ dmac_swdeg[i] =
+ IO(DMAC_SWDEG_START_ADDR + i * 4);
+ } else {
+ for (i = 0; i < DMAC_SWSREG_NUM_REGS; i++)
+ IO(DMAC_SWSEG_START_ADDR + i * 4) =
+ dmac_swseg[i];
+ for (i = 0; i < DMAC_SWDREG_NUM_REGS; i++)
+ IO(DMAC_SWDEG_START_ADDR + i * 4) =
+ dmac_swdeg[i];
+ }
+ }
+#endif
+
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm32/kernel/tee_ta_manager.c b/core/arch/arm32/kernel/tee_ta_manager.c
new file mode 100644
index 00000000000..1ddbb54d443
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_ta_manager.c
@@ -0,0 +1,1700 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "user_ta_header.h"
+#include
+#include
+#include
+#include
+#include
+
+
+/* Use this invalid ID for a static TA, since
+ * session is not needed for calling static TA.
+ */
+#define TEE_SESSION_ID_STATIC_TA 0xFFFFFFFF
+
+#define TEE_TA_STACK_ALIGNMENT 8
+
+enum tee_user_ta_func {
+ USER_TA_FUNC_OPEN_CLIENT_SESSION = 0,
+ USER_TA_FUNC_CLOSE_CLIENT_SESSION,
+ USER_TA_FUNC_INVOKE_COMMAND
+};
+
+typedef enum {
+ COMMAND_INVOKE_COMMAND = 0,
+ COMMAND_OPEN_SESSION,
+ COMMAND_CREATE_ENTRY_POINT,
+ COMMAND_CLOSE_SESSION,
+ COMMAND_DESTROY_ENTRY_POINT,
+} command_t;
+
+/* Only one session is running in the single threaded solution */
+static struct tee_ta_session *tee_rs;
+
+/* Enters a user TA */
+static TEE_Result tee_user_ta_enter(TEE_ErrorOrigin *err,
+ struct tee_ta_session *session,
+ enum tee_user_ta_func func,
+ uint32_t cancel_req_to, uint32_t cmd,
+ struct tee_ta_param *param);
+
+static TEE_Result tee_ta_param_pa2va(struct tee_ta_session *sess,
+ struct tee_ta_param *param);
+
+struct param_ta {
+ struct tee_ta_session *sess;
+ uint32_t cmd;
+ struct tee_ta_param *param;
+ TEE_Result res;
+};
+
+static TEE_Result tee_ta_rpc_free(struct tee_ta_nwumap *map);
+
+static void jumper_invokecommand(void *voidargs)
+{
+ struct param_ta *args = (struct param_ta *)voidargs;
+
+ INMSG("");
+ args->res = args->sess->ctx->static_ta->invoke_command_entry_point(
+ (void *)args->sess->user_ctx,
+ (uint32_t)args->cmd,
+ (uint32_t)args->param->types,
+ (TEE_Param *)args->param->params);
+ OUTMSG("%lx", args->res);
+}
+
+static void jumper_opensession(void *voidargs)
+{
+ struct param_ta *args = (struct param_ta *)voidargs;
+
+ INMSG("");
+ args->res = args->sess->ctx->static_ta->open_session_entry_point(
+ (uint32_t)args->param->types,
+ (TEE_Param *)args->param->params,
+ (void **)&args->sess->user_ctx);
+ OUTMSG("%lx", args->res);
+}
+
+static void jumper_createentrypoint(void *voidargs)
+{
+ struct param_ta *args = (struct param_ta *)voidargs;
+
+ INMSG("");
+ args->res = args->sess->ctx->static_ta->create_entry_point();
+ OUTMSG("%lx", args->res);
+}
+
+static void jumper_closesession(void *voidargs)
+{
+ struct param_ta *args = (struct param_ta *)voidargs;
+
+ INMSG("");
+ args->sess->ctx->static_ta->close_session_entry_point(
+ (void *)args->sess->user_ctx);
+ args->res = TEE_SUCCESS;
+ OUTMSG("%lx", args->res);
+}
+
+static void jumper_destroyentrypoint(void *voidargs)
+{
+ struct param_ta *args = (struct param_ta *)voidargs;
+
+ INMSG("");
+ args->sess->ctx->static_ta->destroy_entry_point();
+ args->res = TEE_SUCCESS;
+ OUTMSG("%lx", args->res);
+}
+
+/* Stack size is updated to take into account */
+/* the size of the needs of the tee internal libs */
+
+static TEE_Result invoke_ta(struct tee_ta_session *sess, uint32_t cmd,
+ struct tee_ta_param *param, command_t commandtype)
+{
+ struct param_ta ptas;
+
+ ptas.sess = sess;
+ ptas.cmd = cmd;
+ ptas.param = param;
+ ptas.res = TEE_ERROR_TARGET_DEAD;
+
+ tee_rs = sess;
+
+ switch (commandtype) {
+ case COMMAND_INVOKE_COMMAND:
+ jumper_invokecommand(&ptas);
+ break;
+ case COMMAND_OPEN_SESSION:
+ jumper_opensession(&ptas);
+ break;
+ case COMMAND_CREATE_ENTRY_POINT:
+ jumper_createentrypoint(&ptas);
+ break;
+ case COMMAND_CLOSE_SESSION:
+ jumper_closesession(&ptas);
+ break;
+ case COMMAND_DESTROY_ENTRY_POINT:
+ jumper_destroyentrypoint(&ptas);
+ break;
+ default:
+ EMSG("Do not know how to run the command %d", commandtype);
+ ptas.res = TEE_ERROR_GENERIC;
+ break;
+ }
+
+ tee_rs = NULL;
+
+ OUTRMSG(ptas.res);
+ return ptas.res;
+}
+
+/* set trace level for all installed TAs (TA generic code) */
+int tee_ta_set_trace_level(int level)
+{
+ struct tee_ta_ctx *ctx;
+
+ if ((level > TRACE_MAX) && (level < TRACE_MIN))
+ return -1;
+
+ TAILQ_FOREACH(ctx, &tee_ctxes, link) {
+ if (ctx->static_ta)
+ ctx->static_ta->prop_tracelevel = level;
+
+ /* non-static TA should be done too */
+ }
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * Find TA in session list based on a UUID (input)
+ * Returns a pointer to the session
+ *---------------------------------------------------------------------------*/
+static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
+{
+ struct tee_ta_ctx *ctx;
+
+ TAILQ_FOREACH(ctx, &tee_ctxes, link) {
+ if (memcmp(&ctx->head->uuid, uuid, sizeof(TEE_UUID)) == 0)
+ return ctx;
+ }
+
+ return NULL;
+}
+
+static void tee_ta_init_got(struct tee_ta_ctx *const ctx)
+{
+ uint32_t *ptr;
+ uint32_t *end_ptr;
+ uint32_t va_start;
+
+ /*
+ * GOT and find_service_addr follows right after ro section.
+ */
+ if ((TA_HEAD_GOT_MASK & ctx->head->rel_dyn_got_size) == 0)
+ return;
+
+ va_start = ctx->load_addr;
+
+ ptr = (uint32_t *)(tee_ta_get_exec(ctx) + ctx->head->ro_size);
+ end_ptr = (uint32_t *)((uint32_t) ptr +
+ (TA_HEAD_GOT_MASK & ctx->head->rel_dyn_got_size));
+
+ while (ptr < end_ptr) {
+ *ptr += va_start;
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("GOT [0x%x] = 0x%x", ptr, *ptr);
+#endif
+ ptr++;
+ }
+}
+
+static void tee_ta_init_zi(struct tee_ta_ctx *const ctx)
+{
+ /* setup ZI data */
+ uint32_t start = tee_ta_get_exec(ctx) +
+ ctx->head->rw_size + ctx->head->ro_size;
+
+ memset((void *)start, 0, ctx->head->zi_size);
+}
+
+static void tee_ta_init_serviceaddr(struct tee_ta_ctx *const ctx)
+{
+ /*
+ * Kernel TA
+ *
+ * Find service follows right after GOT.
+ */
+ uint32_t saddr = tee_ta_get_exec(ctx) + ctx->head->ro_size +
+ (ctx->head->rel_dyn_got_size & TA_HEAD_GOT_MASK);
+ uint32_t *fsaddr = (uint32_t *)saddr;
+
+ *fsaddr = 0; /* we do not have any services */
+
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("find_service_addr [0x%x] = 0x%x", fsaddr, *fsaddr);
+#endif
+}
+
+/*
+ * Process rel.dyn
+ */
+static void tee_ta_init_reldyn(struct tee_ta_ctx *const ctx)
+{
+ uint32_t rel_dyn_size = ctx->head->rel_dyn_got_size >> 16;
+ uint32_t n;
+ uint32_t saddr =
+ tee_ta_get_exec(ctx) + ctx->head->ro_size - rel_dyn_size;
+
+ for (n = 0; n < rel_dyn_size; n += sizeof(struct ta_rel_dyn)) {
+ struct ta_rel_dyn *rel_dyn = (struct ta_rel_dyn *)(saddr + n);
+ uint32_t *data;
+
+ if (rel_dyn->info != 0x17) {
+ DMSG("Unknown rel_dyn info 0x%x", rel_dyn->info);
+ TEE_ASSERT(0);
+ }
+
+ data = (uint32_t *)(ctx->load_addr + rel_dyn->addr);
+ *data += ctx->load_addr;
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("rel.dyn [0x%x] = 0x%x", data, *data);
+#endif
+ }
+}
+
+/*
+ * Setup global variables initialized from TEE Core
+ */
+static void tee_ta_init_heap(struct tee_ta_ctx *const ctx, uint32_t heap_size)
+{
+ uint32_t *data;
+ tee_uaddr_t heap_start_addr;
+
+ /*
+ * User TA
+ *
+ * Heap base follows right after GOT
+ */
+
+ /* XXX this function shouldn't know this mapping */
+ heap_start_addr = ((TEE_DDR_VLOFFSET + 1) << SECTION_SHIFT) - heap_size;
+
+ data = (uint32_t *)(tee_ta_get_exec(ctx) + ctx->head->ro_size +
+ (ctx->head->rel_dyn_got_size & TA_HEAD_GOT_MASK));
+
+ *data = heap_start_addr;
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("heap_base [0x%x] = 0x%x", data, *data);
+#endif
+}
+
+/*-----------------------------------------------------------------------------
+ * Loads TA header and hashes.
+ * Verifies the TA signature.
+ * Returns session ptr and TEE_Result.
+ *---------------------------------------------------------------------------*/
+static TEE_Result tee_ta_load(const kta_signed_header_t *signed_ta,
+ struct tee_ta_ctx **ta_ctx)
+{
+ /* ta & ta_session is assumed to be != NULL from previous checks */
+ TEE_Result res;
+ uint32_t size;
+ size_t nbr_hashes;
+ int head_size;
+ uint32_t hash_type_size;
+ uint32_t hash_size;
+ void *head = NULL;
+ void *ptr = NULL;
+ uint32_t heap_size = 0; /* gcc warning */
+ struct tee_ta_ctx *ctx = NULL;
+ ta_head_t *ta =
+ (void *)((uint8_t *)signed_ta + signed_ta->size_of_signed_header);
+
+ /*
+ * ------------------------------------------------------------------
+ * 1st step: load in secure memory and check consisteny, signature.
+ * Note: this step defines the user/kernel priviledge of the TA.
+ * ------------------------------------------------------------------
+ */
+
+ /*
+ * Check that the GOT ends up at a properly aligned address.
+ * See tee_ta_load_page() for update of GOT.
+ */
+ if ((ta->ro_size % 4) != 0) {
+ DMSG("Bad ro_size %u", ta->ro_size);
+ return TEE_ERROR_BAD_FORMAT;
+ }
+
+ nbr_hashes = ((ta->ro_size + ta->rw_size) >> SMALL_PAGE_SHIFT) + 1;
+ if (nbr_hashes > TEE_PVMEM_PSIZE)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+#ifdef CFG_NO_TA_HASH_SIGN
+ hash_type_size = 0;
+#else
+ /* COPY HEADERS & HASHES: ta_head + ta_func_head(s) + hashes */
+ if (tee_hash_get_digest_size(ta->hash_type, &hash_type_size) !=
+ TEE_SUCCESS) {
+ DMSG("warning: invalid signed header: invalid hash id found!");
+ return TEE_ERROR_SECURITY;
+ }
+#endif
+ hash_size = hash_type_size * nbr_hashes;
+ head_size =
+ sizeof(ta_head_t) +
+ ta->nbr_func * sizeof(ta_func_head_t) + hash_size;
+
+ head = malloc(head_size);
+ if (head == NULL)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ /* cpy headers from normal world memory */
+ memcpy(head, ta, head_size - hash_size);
+
+ /* cpy hashes from normal world memory */
+ ptr =
+ (void *)((uint8_t *)head +
+ sizeof(ta_head_t) + ta->nbr_func * sizeof(ta_func_head_t));
+
+ memcpy(ptr, (void *)((uint8_t *)ta + sizeof(ta_head_t) +
+ ta->nbr_func * sizeof(ta_func_head_t) +
+ ta->ro_size + ta->rw_size), hash_size);
+
+ /* COPY SIGNATURE: alloc signature */
+ ptr = malloc(signed_ta->size_of_signed_header);
+ if (ptr == NULL) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+
+ /* cpy signature to secure memory */
+ memcpy(ptr, signed_ta, signed_ta->size_of_signed_header);
+
+ /*
+ * We may check signed TAs in this place
+ */
+
+
+ /*
+ * End of check of signed header from secure:
+ * hashes are safe and validated.
+ */
+
+ free(ptr);
+ ptr = NULL;
+
+ /*
+ * ------------------------------------------------------------------
+ * 2nd step: Register context
+ * Alloc and init the ta context structure, alloc physvical/virtual
+ * memories to store/map the TA.
+ * ------------------------------------------------------------------
+ */
+
+ /*
+ * Register context
+ */
+
+ /* code below must be protected by mutex (multi-threaded) */
+ ctx = calloc(1, sizeof(struct tee_ta_ctx));
+ if (ctx == NULL) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+ TAILQ_INIT(&ctx->open_sessions);
+ TAILQ_INIT(&ctx->cryp_states);
+ TAILQ_INIT(&ctx->objects);
+ TAILQ_INIT(&ctx->storage_enums);
+ ctx->head = (ta_head_t *)head;
+
+ /* by default NSec DDR: starts at TA function code. */
+ ctx->nmem = (void *)((uint32_t) ta + sizeof(ta_head_t) +
+ ta->nbr_func * sizeof(ta_func_head_t));
+
+ ctx->num_res_funcs = ctx->head->zi_size >> 20;
+ ctx->head->zi_size &= 0xfffff;
+ if (ctx->num_res_funcs > ctx->head->nbr_func) {
+ res = TEE_ERROR_BAD_FORMAT;
+ goto error_return;
+ }
+
+ /* full required execution size (not stack etc...) */
+ size = ctx->head->ro_size + ctx->head->rw_size + ctx->head->zi_size;
+
+ if (ctx->num_res_funcs == 2) {
+ ta_func_head_t *ta_func_head =
+ (ta_func_head_t *)((uint32_t) ctx->head +
+ sizeof(ta_head_t));
+
+ struct user_ta_sub_head *sub_head =
+ (struct user_ta_sub_head *)&ta_func_head[ctx->head->
+ nbr_func -
+ ctx->
+ num_res_funcs];
+ /* man_flags: mandatory flags */
+ uint32_t man_flags = TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+ uint32_t opt_flags = man_flags | TA_FLAG_SINGLE_INSTANCE |
+ TA_FLAG_MULTI_SESSION | TA_FLAG_UNSAFE_NW_PARAMS;
+
+ /*
+ * sub_head is the end area of func_head; the 2 last
+ * (2 'resisdent func') func_head area.
+ * sub_head structure is... twice the func_head struct. magic.
+ * sub_head stores the flags, heap_size, stack_size.
+ */
+ TEE_ASSERT((sizeof(struct user_ta_sub_head)) ==
+ (2 * sizeof(struct user_ta_func_head)));
+
+ /*
+ * As we support only UserTA: assue all TA are user TA !
+ */
+ sub_head->flags |= TA_FLAG_USER_MODE | TA_FLAG_EXEC_DDR;
+
+ /* check input flags bitmask consistency and save flags */
+ if ((sub_head->flags & opt_flags) != sub_head->flags ||
+ (sub_head->flags & man_flags) != man_flags) {
+ EMSG("TA flag issue: flags=%x opt=%X man=%X",
+ sub_head->flags, opt_flags, man_flags);
+ res = TEE_ERROR_BAD_FORMAT;
+ goto error_return;
+ }
+
+ ctx->flags = sub_head->flags;
+
+ /* Check if multi instance && single session config */
+ if (((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0) &&
+ ((ctx->flags & TA_FLAG_MULTI_SESSION) == 0)) {
+ /*
+ * assume MultiInstance/SingleSession,
+ * same as MultiInstance/MultiSession
+ */
+ ctx->flags |= TA_FLAG_MULTI_SESSION;
+ }
+
+ /* Ensure proper aligment of stack */
+ ctx->stack_size = TEE_ROUNDUP(sub_head->stack_size,
+ TEE_TA_STACK_ALIGNMENT);
+
+ heap_size = sub_head->heap_size;
+
+ if (ctx->stack_size + heap_size > SECTION_SIZE) {
+ EMSG("Too large combined stack and HEAP");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+
+ /*
+ * Allocate heap and stack
+ */
+ ctx->mm_heap_stack =
+ tee_mm_alloc(&tee_mm_sec_ddr, SECTION_SIZE);
+ if (ctx->mm_heap_stack == 0) {
+ EMSG("Failed to allocate %u bytes\n", SECTION_SIZE);
+ EMSG(" of memory for user heap and stack\n");
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+
+ } else if (ctx->num_res_funcs != 0) {
+ /* Unknown sub header */
+ res = TEE_ERROR_BAD_FORMAT;
+ goto error_return;
+ }
+
+ if ((ctx->flags & TA_FLAG_EXEC_DDR) != 0) {
+ /*
+ * Note that only User TA can be supported in DDR
+ * if executing in DDR, the size of the execution area
+ */
+ size +=
+ sizeof(ta_head_t) + ta->nbr_func * sizeof(ta_func_head_t) +
+ (ta->rel_dyn_got_size & TA_HEAD_GOT_MASK);
+
+ ctx->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
+
+ if (ctx->mm != NULL) {
+ /* cpy ddr TA into reserved memory space */
+ struct tee_ta_param param = { 0 };
+ void *dst;
+
+
+ res = tee_mmu_init(ctx);
+ if (res != TEE_SUCCESS)
+ goto error_return;
+
+ res = tee_mmu_map(ctx, ¶m);
+ if (res != TEE_SUCCESS) {
+ EMSG("call tee_mmu_map_uta() failed %X", res);
+ goto error_return;
+ }
+
+ tee_mmu_set_ctx(ctx);
+
+ dst = (void *)tee_mmu_get_load_addr(ctx);
+ if (!tee_vbuf_is_non_sec(ta, size)) {
+ EMSG("User TA isn't in non-secure memory");
+ res = TEE_ERROR_SECURITY;
+ goto error_return;
+ }
+ memcpy(dst, ta, size);
+
+ core_cache_maintenance(DCACHE_AREA_CLEAN, dst, size);
+ core_cache_maintenance(ICACHE_AREA_INVALIDATE, dst,
+ size);
+ }
+
+ } else {
+ size_t len = ctx->head->ro_size + ctx->head->rw_size;
+
+ SMSG("no TA is currently supported in TEE RAM: abort.");
+ res = TEE_ERROR_NOT_SUPPORTED;
+ goto error_return;
+
+ /*
+ * 'nmem' is normal world memory: saved read-only
+ * bytes of TA in pub DDR. They are protected by the related
+ * hashes saved in tee RAM.
+ */
+ if (!tee_vbuf_is(TEE_MEM_NOT_RES_MMU_UL1, ctx->nmem, len)) {
+ tee_mm_entry_t *mm_nmem;
+ void *new_nmem;
+ /*
+ * At least parts the memory backing that paging of the
+ * TA resides in memory that will not be reachable when
+ * a User TA is loaded.
+ *
+ * Allocate TEE core phys DDR and copy TA to that
+ * instead. This is similar to what's done when making
+ * a kernel TA resident.
+ */
+
+ mm_nmem = tee_mm_alloc(&tee_mm_pub_ddr, len);
+ if (mm_nmem == NULL) {
+ EMSG("Out of pub DDR, cannot allocate %u", len);
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+ new_nmem = (void *)tee_mm_get_smem(mm_nmem);
+ memcpy(new_nmem, ctx->nmem, len);
+ ctx->nmem = new_nmem;
+ }
+ ctx->mm = tee_mm_alloc(&tee_mm_vcore, size);
+ }
+
+ if (ctx->mm == NULL) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+
+ /* XXX is this used for a user TA in DDR? */
+ ctx->smem_size = size;
+
+ if ((ctx->flags & TA_FLAG_EXEC_DDR) == 0) {
+ /*
+ * HANDLE RW DATA
+ * Allocate data here and not in the abort handler to
+ * avoid running out of memory in abort mode.
+ */
+ ctx->rw_data =
+ (uint32_t) (char *)malloc(ctx->head->zi_size +
+ ctx->head->rw_size);
+ if (ctx->rw_data == 0) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto error_return;
+ }
+ ctx->rw_data_usage = 0;
+ }
+
+ if ((ctx->flags & TA_FLAG_EXEC_DDR) != 0) {
+ ctx->load_addr = tee_mmu_get_load_addr(ctx);
+ } else {
+ ctx->load_addr =
+ ((ctx->mm->offset << SMALL_PAGE_SHIFT) + TEE_PVMEM_LO) -
+ sizeof(ta_head_t) -
+ ctx->head->nbr_func * sizeof(ta_func_head_t);
+ }
+
+ ctx->ref_count = 1;
+
+ TAILQ_INSERT_TAIL(&tee_ctxes, ctx, link);
+ *ta_ctx = ctx;
+ /*
+ * Note that the setup below will cause at least one page fault so it's
+ * important that the session is fully registered at this stage.
+ */
+
+ /* Init rel.dyn, GOT, Service ptr, ZI and heap */
+ tee_ta_init_reldyn(ctx);
+ tee_ta_init_got(ctx);
+ if ((ctx->flags & TA_FLAG_USER_MODE) != 0)
+ tee_ta_init_heap(ctx, heap_size);
+ else
+ tee_ta_init_serviceaddr(ctx);
+ tee_ta_init_zi(ctx);
+
+ DMSG("Loaded TA at 0x%x, ro_size %u, rw_size %u, zi_size %u",
+ tee_mm_get_smem(ctx->mm), ctx->head->ro_size,
+ ctx->head->rw_size, ctx->head->zi_size);
+ DMSG("ELF load address 0x%x", ctx->load_addr);
+
+ tee_mmu_set_ctx(NULL);
+ /* end thread protection (multi-threaded) */
+
+ return TEE_SUCCESS;
+
+error_return:
+ tee_mmu_set_ctx(NULL);
+ free(head);
+ free(ptr);
+ if (ctx != NULL) {
+ if ((ctx->flags & TA_FLAG_USER_MODE) != 0)
+ tee_mmu_final(ctx);
+ tee_mm_free(ctx->mm_heap_stack);
+ tee_mm_free(ctx->mm);
+ /* If pub DDR was allocated for nmem free it */
+ tee_mm_free(tee_mm_find
+ (&tee_mm_pub_ddr, (uintptr_t) ctx->nmem));
+ free(ctx);
+ }
+ return res;
+}
+
+/* Maps kernal TA params */
+static TEE_Result tee_ta_param_pa2va(struct tee_ta_session *sess,
+ struct tee_ta_param *param)
+{
+ size_t n;
+ void *va;
+
+ /*
+ * If kernel TA is called from another TA the mapping
+ * of that TA is borrowed and the addresses are already
+ * virtual.
+ */
+ if (sess != NULL && sess->calling_sess != NULL)
+ return TEE_SUCCESS;
+
+ for (n = 0; n < 4; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ if (core_pa2va
+ ((uint32_t) param->params[n].memref.buffer,
+ (uint32_t *)&va))
+ return TEE_ERROR_BAD_PARAMETERS;
+ param->params[n].memref.buffer = va;
+ break;
+
+ default:
+ continue;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+/*-----------------------------------------------------------------------------
+ * Initialises a session based on the UUID or ptr to the ta
+ * Returns ptr to the session (ta_session) and a TEE_Result
+ *---------------------------------------------------------------------------*/
+static TEE_Result tee_ta_init_session(uint32_t *session_id,
+ struct tee_ta_session_head *open_sessions,
+ const TEE_UUID *uuid,
+ const kta_signed_header_t *signed_ta,
+ struct tee_ta_session **ta_session)
+{
+ TEE_Result res;
+ struct tee_ta_session *s;
+
+ if (*session_id != 0) {
+ /* Session specified */
+ res = tee_ta_verify_session_pointer((struct tee_ta_session *)
+ *session_id, open_sessions);
+
+ if (res == TEE_SUCCESS)
+ *ta_session = (struct tee_ta_session *)*session_id;
+
+ DMSG(" ... Re-open session => %p", (void *)*ta_session);
+ return res;
+ }
+
+ if (uuid != NULL) {
+ /* Session not specified, find one based on uuid */
+ struct tee_ta_ctx *ctx = NULL;
+
+ ctx = tee_ta_context_find(uuid);
+ if (ctx == NULL)
+ goto load_ta;
+
+ if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
+ goto load_ta;
+
+ if ((ctx->flags & TA_FLAG_MULTI_SESSION) == 0)
+ return TEE_ERROR_BUSY;
+
+ DMSG(" ... Re-open TA %08lx-%04x-%04x",
+ ctx->head->uuid.timeLow,
+ ctx->head->uuid.timeMid, ctx->head->uuid.timeHiAndVersion);
+
+ s = calloc(1, sizeof(struct tee_ta_session));
+ if (s == NULL)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ ctx->ref_count++;
+ s->ctx = ctx;
+ s->cancel_mask = true;
+ *ta_session = s;
+ *session_id = (uint32_t) s;
+ TAILQ_INSERT_TAIL(open_sessions, s, link);
+ return TEE_SUCCESS;
+ }
+
+load_ta:
+ s = calloc(1, sizeof(struct tee_ta_session));
+ if (s == NULL)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ res = TEE_ERROR_ITEM_NOT_FOUND;
+ if (signed_ta != NULL) {
+ DMSG(" Load dynamic TA");
+ /* load and verify */
+ res = tee_ta_load(signed_ta, &s->ctx);
+ } else if (uuid != NULL) {
+ DMSG(" Lookup for Static TA %08lx-%04x-%04x",
+ uuid->timeLow, uuid->timeMid, uuid->timeHiAndVersion);
+ /* Load Static TA */
+ ta_static_head_t *ta;
+ for (ta = &__start_ta_head_section;
+ ta < &__stop_ta_head_section; ta++) {
+ if (memcmp(&ta->uuid, uuid, sizeof(TEE_UUID)) == 0) {
+ /* Load a new TA and create a session */
+ DMSG(" Open %s", ta->name);
+ s->ctx = calloc(1, sizeof(struct tee_ta_ctx));
+ if (s->ctx == NULL) {
+ free(s);
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+ TAILQ_INIT(&s->ctx->open_sessions);
+ TAILQ_INIT(&s->ctx->cryp_states);
+ TAILQ_INIT(&s->ctx->objects);
+ s->ctx->num_res_funcs = 0;
+ s->ctx->ref_count = 1;
+ s->ctx->flags = TA_FLAG_MULTI_SESSION;
+ s->ctx->head = (ta_head_t *)ta;
+ s->ctx->static_ta = ta;
+ TAILQ_INSERT_TAIL(&tee_ctxes, s->ctx, link);
+ res = TEE_SUCCESS;
+ }
+ }
+ }
+
+ if (res != TEE_SUCCESS) {
+ if (uuid != NULL)
+ EMSG(" ... Not found %08lx-%04x-%04x",
+ ((uuid) ? uuid->timeLow : 0xDEAD),
+ ((uuid) ? uuid->timeMid : 0xDEAD),
+ ((uuid) ? uuid->timeHiAndVersion : 0xDEAD));
+ else
+ EMSG(" ... Not found");
+ free(s);
+ return res;
+ } else
+ DMSG(" %s : %08lx-%04x-%04x",
+ s->ctx->static_ta ? s->ctx->static_ta->name : "dyn TA",
+ s->ctx->head->uuid.timeLow,
+ s->ctx->head->uuid.timeMid,
+ s->ctx->head->uuid.timeHiAndVersion);
+
+ s->cancel_mask = true;
+ *ta_session = s;
+ *session_id = (uint32_t) s;
+ TAILQ_INSERT_TAIL(open_sessions, s, link);
+
+ /*
+ * Call create_entry_point: for the static TA: to be cleaned.
+ * Here, we should call the TA "create" entry point, if TA supports
+ * it. Else, no TA code to call here.
+ * Note that this can be move to open_session in order static-TA and
+ * user-TA behaves the same
+ */
+ if ((s->ctx->static_ta != NULL) &&
+ (s->ctx->static_ta->create_entry_point != NULL)) {
+ DMSG(" Call create_entry_point");
+ res = invoke_ta(s, 0, 0, COMMAND_CREATE_ENTRY_POINT);
+ if (res != TEE_SUCCESS) {
+ EMSG(" => (ret=%lx)", res);
+ tee_ta_close_session((uint32_t) s, open_sessions);
+ }
+ }
+
+ return res;
+}
+
+static void tee_ta_set_invoke_timeout(struct tee_ta_session *sess,
+ uint32_t cancel_req_to)
+{
+ TEE_Time current_time;
+ TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX };
+
+ if (cancel_req_to == TEE_TIMEOUT_INFINITE)
+ goto out;
+
+ if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
+ goto out;
+
+ /* Check that it doesn't wrap */
+ if (current_time.seconds + (cancel_req_to / 1000) >=
+ current_time.seconds) {
+ cancel_time.seconds =
+ current_time.seconds + cancel_req_to / 1000;
+ cancel_time.millis = current_time.millis + cancel_req_to % 1000;
+ if (cancel_time.millis > 1000) {
+ cancel_time.seconds++;
+ cancel_time.millis -= 1000;
+ }
+ }
+
+out:
+ sess->cancel_time = cancel_time;
+}
+
+static TEE_Result tee_user_ta_enter(TEE_ErrorOrigin *err,
+ struct tee_ta_session *session,
+ enum tee_user_ta_func func,
+ uint32_t cancel_req_to, uint32_t cmd,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+ TEE_Param *usr_params;
+ tee_paddr_t usr_stack;
+ tee_uaddr_t stack_uaddr;
+ tee_uaddr_t start_uaddr;
+ struct tee_ta_ctx *ctx = session->ctx;
+ ta_func_head_t *ta_func_head =
+ (ta_func_head_t *)((uint32_t) ctx->head + sizeof(ta_head_t));
+ tee_uaddr_t params_uaddr;
+ TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
+
+ TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
+
+ TEE_ASSERT((uint32_t) func <=
+ (ctx->head->nbr_func - ctx->num_res_funcs));
+
+ /* Set timeout of entry */
+ tee_ta_set_invoke_timeout(session, cancel_req_to);
+
+ /* Map user space memory */
+ res = tee_mmu_map(ctx, param);
+ if (res != TEE_SUCCESS)
+ goto cleanup_return;
+
+ /* Switch to user ctx */
+ tee_mmu_set_ctx(ctx);
+
+ /* Make room for usr_params at top of stack */
+ usr_stack = tee_mm_get_smem(ctx->mm_heap_stack) + ctx->stack_size;
+ usr_params = (TEE_Param *)usr_stack;
+ memcpy(usr_params, param->params, sizeof(param->params));
+ usr_stack -= sizeof(param->params);
+
+ res = tee_mmu_kernel_to_user(ctx, (tee_vaddr_t)usr_params,
+ ¶ms_uaddr);
+ if (res != TEE_SUCCESS)
+ goto cleanup_return;
+
+ res = tee_mmu_kernel_to_user(ctx, usr_stack, &stack_uaddr);
+ if (res != TEE_SUCCESS)
+ goto cleanup_return;
+
+ start_uaddr = ctx->load_addr + ta_func_head[func].start;
+ /* tee_thread_set_sess(session); */
+ tee_rs = session;
+
+ switch (func) {
+ case USER_TA_FUNC_OPEN_CLIENT_SESSION:
+ res =
+ tee_svc_enter_user_mode(param->types, params_uaddr,
+ (uint32_t) session, 0, stack_uaddr,
+ start_uaddr, &ctx->panicked,
+ &ctx->panic_code);
+
+ /*
+ * According to GP spec the origin should allways be set to the
+ * TA after TA execution
+ */
+ serr = TEE_ORIGIN_TRUSTED_APP;
+ break;
+
+ case USER_TA_FUNC_CLOSE_CLIENT_SESSION:
+ res = tee_svc_enter_user_mode((uint32_t) session, 0, 0, 0,
+ stack_uaddr, start_uaddr,
+ &ctx->panicked, &ctx->panic_code);
+
+ serr = TEE_ORIGIN_TRUSTED_APP;
+ break;
+
+ case USER_TA_FUNC_INVOKE_COMMAND:
+ res =
+ tee_svc_enter_user_mode(cmd, param->types, params_uaddr,
+ (uint32_t) session, stack_uaddr,
+ start_uaddr, &ctx->panicked,
+ &ctx->panic_code);
+
+ serr = TEE_ORIGIN_TRUSTED_APP;
+ break;
+
+ default:
+ serr = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_BAD_STATE;
+ }
+
+ /* tee_thread_set_sess(NULL); */
+ tee_rs = NULL;
+
+ if (ctx->panicked) {
+ DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
+ ctx->panic_code);
+ serr = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_TARGET_DEAD;
+ }
+
+ /* Copy out value results */
+ memcpy(param->params, usr_params, sizeof(param->params));
+
+cleanup_return:
+ /* Restore original ROM mapping */
+ tee_mmu_set_ctx(NULL);
+
+ /*
+ * Clear the cancel state now that the user TA has returned. The next
+ * time the TA will be invoked will be with a new operation and should
+ * not have an old cancellation pending.
+ */
+ session->cancel = false;
+
+ /*
+ * Can't update *err until now since it may point to an address
+ * mapped for the user mode TA.
+ */
+ *err = serr;
+
+ return res;
+}
+
+/*-----------------------------------------------------------------------------
+ * Sets up virtual memory for the service
+ *---------------------------------------------------------------------------*/
+static TEE_Result tee_ta_func_execute(TEE_ErrorOrigin *err,
+ struct tee_ta_session *const session,
+ const uint32_t cmd,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+ TEE_Result res2;
+ struct tee_ta_ctx *const ctx = session->ctx;
+ ta_func_head_t *ta_func_head =
+ (ta_func_head_t *)((uint32_t) ctx->head + sizeof(ta_head_t));
+ uint32_t offset;
+ TEE_Operation op;
+ uint32_t n;
+
+ res = tee_compat_param_new_to_old(param, &op);
+ if (res != TEE_SUCCESS) {
+ *err = TEE_ORIGIN_TEE;
+ return res;
+ }
+
+ /* search for ta function */
+ n = 0;
+ while (n < (ctx->head->nbr_func - ctx->num_res_funcs) &&
+ cmd != ta_func_head->cmd_id) {
+ ta_func_head++;
+ n++;
+ }
+ if (cmd != ta_func_head->cmd_id) {
+ /* sevice not found */
+ return TEE_ERROR_ITEM_NOT_FOUND;
+ }
+
+ /* call service */
+ offset =
+ ta_func_head->start - sizeof(ta_head_t) -
+ ctx->head->nbr_func * sizeof(ta_func_head_t) +
+ (ctx->mm->offset << SMALL_PAGE_SHIFT);
+
+ tee_rs = session;
+ res = ((uint32_t(*)(TEE_Operation *))
+ ((uint32_t) (TEE_PVMEM_LO + offset))) (&op);
+ tee_rs = NULL;
+ /*
+ * According to GP spec the origin should allways be set to the TA after
+ * TA execution
+ */
+ *err = TEE_ORIGIN_TRUSTED_APP;
+
+ res2 = tee_compat_param_old_to_new(&op, param);
+ if (res == TEE_SUCCESS && res2 != TEE_SUCCESS) {
+ *err = TEE_ORIGIN_TEE;
+ return res2;
+ }
+
+ return res;
+}
+
+/*
+ * Load a TA via RPC with UUID defined by input param uuid. The virtual
+ * address of the TA is recieved in out parameter ta
+ *
+ * Function is not thread safe
+ */
+TEE_Result tee_ta_rpc_load(const TEE_UUID *uuid, kta_signed_header_t **ta,
+ struct tee_ta_nwumap *map, uint32_t *ret_orig)
+{
+ TEE_Result res;
+ struct teesmc32_arg *arg;
+ struct teesmc32_param *params;
+ paddr_t pharg = 0;
+ paddr_t phpayload = 0;
+ paddr_t cookie = 0;
+ struct tee_rpc_load_ta_cmd *cmd_load_ta;
+ struct tee_ta_nwumap nwunmap;
+
+ if (uuid == NULL || ta == NULL || ret_orig == NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ /* get a rpc buffer */
+ pharg = thread_rpc_alloc_arg(TEESMC32_GET_ARG_SIZE(2));
+ thread_st_rpc_alloc_payload(sizeof(struct tee_rpc_load_ta_cmd),
+ &phpayload, &cookie);
+ if (!pharg || !phpayload) {
+ *ret_orig = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ if (!TEE_ALIGNMENT_IS_OK(pharg, struct teesmc32_arg) ||
+ !TEE_ALIGNMENT_IS_OK(phpayload, struct tee_rpc_load_ta_cmd)) {
+ *ret_orig = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+
+ if (core_pa2va(pharg, (uint32_t *)&arg) ||
+ core_pa2va(phpayload, (uint32_t *)&cmd_load_ta)) {
+ *ret_orig = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+
+ arg->cmd = TEE_RPC_LOAD_TA;
+ arg->num_params = 2;
+ /* Set a suitable error code in case our resquest is ignored. */
+ arg->ret = TEE_ERROR_NOT_IMPLEMENTED;
+ params = TEESMC32_GET_PARAMS(arg);
+ params[0].attr = TEESMC_ATTR_TYPE_MEMREF_INOUT |
+ TEESMC_ATTR_CACHE_DEFAULT << TEESMC_ATTR_CACHE_SHIFT;
+ params[1].attr = TEESMC_ATTR_TYPE_MEMREF_OUTPUT |
+ TEESMC_ATTR_CACHE_DEFAULT << TEESMC_ATTR_CACHE_SHIFT;
+
+ params[0].u.memref.buf_ptr = phpayload;
+ params[0].u.memref.size = sizeof(struct tee_rpc_load_ta_cmd);
+ params[1].u.memref.buf_ptr = 0;
+ params[1].u.memref.size = 0;
+
+ memset(cmd_load_ta, 0, sizeof(struct tee_rpc_load_ta_cmd));
+ memcpy(&cmd_load_ta->uuid, uuid, sizeof(TEE_UUID));
+
+ thread_rpc_cmd(pharg);
+ res = arg->ret;
+
+ if (res != TEE_SUCCESS) {
+ *ret_orig = TEE_ORIGIN_COMMS;
+ goto out;
+ }
+
+ nwunmap.ph = (paddr_t)cmd_load_ta->va;
+ nwunmap.size = params[1].u.memref.size;
+ if (core_pa2va(params[1].u.memref.buf_ptr, (uint32_t *)ta)) {
+ tee_ta_rpc_free(&nwunmap);
+ *ret_orig = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+ *map = nwunmap;
+
+out:
+ thread_rpc_free_arg(pharg);
+ thread_st_rpc_free_payload(cookie);
+ return res;
+}
+
+static TEE_Result tee_ta_rpc_free(struct tee_ta_nwumap *map)
+{
+ TEE_Result res;
+ struct teesmc32_arg *arg;
+ struct teesmc32_param *params;
+ paddr_t pharg = 0;
+
+ /* get a rpc buffer */
+ pharg = thread_rpc_alloc_arg(TEESMC32_GET_ARG_SIZE(1));
+ if (!pharg) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ if (!TEE_ALIGNMENT_IS_OK(pharg, struct teesmc32_arg)) {
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+
+ if (core_pa2va(pharg, (uint32_t *)&arg)) {
+ res = TEE_ERROR_GENERIC;
+ goto out;
+ }
+
+ arg->cmd = TEE_RPC_FREE_TA;
+ arg->num_params = 1;
+ /* Set a suitable error code in case our resquest is ignored. */
+ arg->ret = TEE_ERROR_NOT_IMPLEMENTED;
+ params = TEESMC32_GET_PARAMS(arg);
+ params[0].attr = TEESMC_ATTR_TYPE_MEMREF_INPUT |
+ TEESMC_ATTR_CACHE_DEFAULT << TEESMC_ATTR_CACHE_SHIFT;
+
+ params[0].u.memref.buf_ptr = map->ph;
+ params[0].u.memref.size = map->size;
+
+ thread_rpc_cmd(pharg);
+ res = arg->ret;
+out:
+ thread_rpc_free_arg(pharg);
+ return res;
+}
+
+/*-----------------------------------------------------------------------------
+ * Close a Trusted Application and free available resources
+ *---------------------------------------------------------------------------*/
+TEE_Result tee_ta_close_session(uint32_t id,
+ struct tee_ta_session_head *open_sessions)
+{
+ struct tee_ta_session *sess, *next;
+ TEE_Result res = TEE_SUCCESS;
+
+ DMSG("tee_ta_close_session(%x)", (unsigned int)id);
+
+ if (id == 0)
+ return TEE_ERROR_ITEM_NOT_FOUND;
+
+ TAILQ_FOREACH(sess, open_sessions, link) {
+ if (id == (uint32_t) sess) {
+ struct tee_ta_ctx *ctx = sess->ctx;
+
+ DMSG(" ... Destroy session");
+
+ if (ctx->locked)
+ return TEE_ERROR_BUSY;
+
+ if (ctx->busy)
+ return TEE_STE_ERROR_SYSTEM_BUSY;
+ ctx->busy = true;
+
+ if ((ctx->static_ta != NULL) &&
+ (ctx->static_ta->close_session_entry_point
+ != NULL) &&
+ (!ctx->panicked)) {
+ DMSG(" ... close_session_entry_point");
+ res =
+ invoke_ta(sess, 0, 0,
+ COMMAND_CLOSE_SESSION);
+
+ } else if (((ctx->flags & TA_FLAG_USER_MODE) != 0) &&
+ (!ctx->panicked)) {
+ TEE_ErrorOrigin err;
+ struct tee_ta_param param = { 0 };
+
+ tee_user_ta_enter(
+ &err, sess,
+ USER_TA_FUNC_CLOSE_CLIENT_SESSION,
+ TEE_TIMEOUT_INFINITE, 0,
+ ¶m);
+ }
+
+ TAILQ_REMOVE(open_sessions, sess, link);
+
+ ctx->busy = false;
+
+ TEE_ASSERT(ctx->ref_count > 0);
+ ctx->ref_count--;
+ if (ctx->ref_count > 0) {
+ free(sess);
+ sess = NULL;
+ return TEE_SUCCESS;
+ }
+
+ /*
+ * Clean all traces of the TA, both RO and RW data.
+ * No L2 cache maintenance to avoid sync problems
+ */
+ if ((ctx->flags & TA_FLAG_EXEC_DDR) != 0) {
+ void *pa;
+ void *va;
+ uint32_t s;
+
+ tee_mmu_set_ctx(ctx);
+
+ if (ctx->mm != NULL) {
+ pa = (void *)tee_mm_get_smem(ctx->mm);
+ if (tee_mmu_user_pa2va(ctx, pa, &va) ==
+ TEE_SUCCESS) {
+ s = tee_mm_get_bytes(ctx->mm);
+ memset(va, 0, s);
+ core_cache_maintenance
+ (DCACHE_AREA_CLEAN, va, s);
+ }
+ }
+
+ if (ctx->mm_heap_stack != NULL) {
+ pa = (void *)tee_mm_get_smem
+ (ctx->mm_heap_stack);
+ if (tee_mmu_user_pa2va(ctx, pa, &va) ==
+ TEE_SUCCESS) {
+ s = tee_mm_get_bytes
+ (ctx->mm_heap_stack);
+ memset(va, 0, s);
+ core_cache_maintenance
+ (DCACHE_AREA_CLEAN, va, s);
+ }
+ }
+ tee_mmu_set_ctx(NULL);
+ }
+
+ DMSG(" ... Destroy TA ctx");
+
+ TAILQ_REMOVE(&tee_ctxes, ctx, link);
+
+ /*
+ * Close sessions opened by this TA
+ * TAILQ_FOREACH() macro cannot be used as the element
+ * is removed inside tee_ta_close_session
+ */
+
+ for (struct tee_ta_session *linked_sess =
+ TAILQ_FIRST(&ctx->open_sessions); linked_sess;
+ linked_sess = next) {
+ next = linked_sess->link.tqe_next;
+ (void)tee_ta_close_session((uint32_t)
+ linked_sess,
+ &ctx->open_sessions);
+ }
+
+ if ((ctx->static_ta != NULL) &&
+ (ctx->static_ta->destroy_entry_point != NULL) &&
+ (!ctx->panicked)) {
+ DMSG(" ... destroy_entry_point");
+ res =
+ invoke_ta(sess, 0, 0,
+ COMMAND_DESTROY_ENTRY_POINT);
+ }
+
+ free(sess);
+ sess = NULL;
+
+ /* If TA was loaded in reserved DDR free the alloc. */
+ tee_mm_free(tee_mm_find
+ (&tee_mm_pub_ddr, (uintptr_t) ctx->nmem));
+
+ if (ctx->nwumap.size != 0)
+ tee_ta_rpc_free(&ctx->nwumap);
+
+ if ((ctx->flags & TA_FLAG_USER_MODE) != 0) {
+ tee_mmu_final(ctx);
+ tee_mm_free(ctx->mm_heap_stack);
+ }
+ if (ctx->static_ta == NULL) {
+ tee_mm_free(ctx->mm);
+ free((void *)ctx->rw_data);
+ free(ctx->head);
+ }
+
+ /* Free cryp states created by this TA */
+ tee_svc_cryp_free_states(ctx);
+ /* Close cryp objects opened by this TA */
+ tee_obj_close_all(ctx);
+ /* Free emums created by this TA */
+ tee_svc_storage_close_all_enum(ctx);
+
+ free(ctx);
+
+ return res;
+ }
+ }
+
+ EMSG(" .... Session %p to removed is not found", (void *)sess);
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+
+TEE_Result tee_ta_make_current_session_resident(void)
+{
+ tee_mm_entry_t *mm;
+ void *addr;
+ size_t len;
+ struct tee_ta_ctx *ctx = tee_rs->ctx;
+
+ /*
+ * Below reserved DDR is allocated for the backing memory of the TA
+ * and then the backing memory is copied to the new location and
+ * the pointer to normal world memory is updated.
+ */
+
+ if (tee_mm_addr_is_within_range(&tee_mm_pub_ddr, (uintptr_t) ctx->nmem))
+ /* The backing pages are already in reserved DDR */
+ goto func_ret;
+
+ len = ctx->head->ro_size + ctx->head->rw_size;
+ mm = tee_mm_alloc(&tee_mm_pub_ddr, len);
+ if (mm == NULL) {
+ DMSG("Out of pub DDR, cannot allocate %u", len);
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+ addr = (void *)tee_mm_get_smem(mm);
+
+ memcpy(addr, ctx->nmem, len);
+ ctx->nmem = addr;
+
+func_ret:
+ ctx->locked = true;
+ return TEE_SUCCESS;
+}
+
+void tee_ta_unlock_current_session(void)
+{
+ struct tee_ta_ctx *ctx = tee_rs->ctx;
+
+ ctx->locked = false;
+}
+
+static TEE_Result tee_ta_verify_param(struct tee_ta_session *sess,
+ struct tee_ta_param *param)
+{
+ tee_paddr_t p;
+ size_t l;
+ int n;
+
+ for (n = 0; n < TEE_NUM_PARAMS; n++) {
+ switch (TEE_PARAM_TYPE_GET(param->types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ case TEE_PARAM_TYPE_MEMREF_INPUT:
+
+ p = (tee_paddr_t)param->params[n].memref.buffer;
+ l = param->params[n].memref.size;
+
+ if (core_pbuf_is(CORE_MEM_NSEC_SHM, p, l))
+ break;
+ if ((sess->ctx->flags & TA_FLAG_UNSAFE_NW_PARAMS) &&
+ core_pbuf_is(CORE_MEM_MULTPURPOSE, p, l))
+ break;
+ if ((sess->clnt_id.login == TEE_LOGIN_TRUSTED_APP) &&
+ core_pbuf_is(CORE_MEM_TA_RAM, p, l))
+ break;
+
+ return TEE_ERROR_SECURITY;
+ default:
+ break;
+ }
+ }
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
+ struct tee_ta_session **sess,
+ struct tee_ta_session_head *open_sessions,
+ const TEE_UUID *uuid,
+ const kta_signed_header_t *ta,
+ const TEE_Identity *clnt_id,
+ uint32_t cancel_req_to,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+ uint32_t id = (uint32_t) *sess;
+ struct tee_ta_session *s = 0;
+ bool sess_inited = (*sess != NULL);
+ struct tee_ta_ctx *ctx;
+
+ res = tee_ta_init_session(&id, open_sessions, uuid, ta, &s);
+ if (res != TEE_SUCCESS) {
+ EMSG("tee_ta_init_session() failed with error 0x%lx", res);
+ *err = TEE_ORIGIN_TEE;
+ return res;
+ }
+
+ ctx = s->ctx;
+ ctx->nwumap.size = 0;
+
+ if (ctx->panicked) {
+ EMSG("Calls tee_ta_close_session()");
+ tee_ta_close_session(id, open_sessions);
+ *err = TEE_ORIGIN_TEE;
+ return TEE_ERROR_TARGET_DEAD;
+ }
+
+ *sess = s;
+ /* Save idenity of the owner of the session */
+ s->clnt_id = *clnt_id;
+
+ /*
+ * Session context is ready.
+ */
+ if (sess_inited)
+ goto out;
+
+ if (((ctx->flags & TA_FLAG_USER_MODE) != 0 || ctx->static_ta != NULL) &&
+ (!sess_inited)) {
+ /* Only User TA:s has a callback for open session */
+
+ res = tee_ta_verify_param(s, param);
+ if (res == TEE_SUCCESS) {
+ /* case the static TA */
+ if ((ctx->static_ta != NULL) &&
+ (ctx->static_ta->open_session_entry_point != NULL)
+ ) {
+ res =
+ invoke_ta(s, 0, param,
+ COMMAND_OPEN_SESSION);
+
+ /*
+ * Clear the cancel state now that the user TA
+ * has returned. The next time the TA will be
+ * invoked will be with a new operation and
+ * should not have an old cancellation pending.
+ */
+ s->cancel = false;
+ } else {
+ res = tee_user_ta_enter(
+ err, s,
+ USER_TA_FUNC_OPEN_CLIENT_SESSION,
+ cancel_req_to, 0, param);
+ }
+ }
+
+ if (ctx->panicked || (res != TEE_SUCCESS))
+ tee_ta_close_session(id, open_sessions);
+ }
+
+out:
+ /*
+ * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
+ * apart from panicking.
+ */
+ if (ctx->panicked)
+ *err = TEE_ORIGIN_TEE;
+ else
+ *err = TEE_ORIGIN_TRUSTED_APP;
+
+ if (res != TEE_SUCCESS)
+ EMSG("Failed. Return error 0x%lx", res);
+
+ return res;
+}
+
+TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
+ struct tee_ta_session *sess,
+ const TEE_Identity *clnt_id,
+ uint32_t cancel_req_to, uint32_t cmd,
+ struct tee_ta_param *param)
+{
+ TEE_Result res;
+
+ if (sess->ctx->panicked) {
+ EMSG(" Panicked !");
+ *err = TEE_ORIGIN_TEE;
+ OUTRMSG(TEE_ERROR_TARGET_DEAD);
+ }
+
+ if (sess->ctx->busy) {
+ *err = TEE_ORIGIN_TEE;
+ return TEE_STE_ERROR_SYSTEM_BUSY;
+ }
+ sess->ctx->busy = true;
+
+ res = tee_ta_verify_param(sess, param);
+ if (res != TEE_SUCCESS) {
+ *err = TEE_ORIGIN_TEE;
+ goto function_exit;
+ }
+
+ if ((sess->ctx->static_ta != NULL) &&
+ (sess->ctx->static_ta->invoke_command_entry_point != NULL)) {
+ res = tee_ta_param_pa2va(sess, param);
+ if (res != TEE_SUCCESS) {
+ *err = TEE_ORIGIN_TEE;
+ goto function_exit;
+ }
+
+ /* Set timeout of entry */
+ tee_ta_set_invoke_timeout(sess, cancel_req_to);
+
+ DMSG(" invoke_command_entry_point(%p)", sess->user_ctx);
+ res = invoke_ta(sess, cmd, param, COMMAND_INVOKE_COMMAND);
+
+ /*
+ * Clear the cancel state now that the user TA has returned.
+ * The next time the TA will be invoked will be with a new
+ * operation and should not have an old cancellation pending.
+ */
+ sess->cancel = false;
+
+ /*
+ * According to GP spec the origin should allways be set to the
+ * TA after TA execution
+ */
+ *err = TEE_ORIGIN_TRUSTED_APP;
+ } else if ((sess->ctx->flags & TA_FLAG_USER_MODE) != 0) {
+ res = tee_user_ta_enter(err, sess, USER_TA_FUNC_INVOKE_COMMAND,
+ cancel_req_to, cmd, param);
+ } else {
+ res = tee_ta_param_pa2va(sess, param);
+ if (res != TEE_SUCCESS) {
+ *err = TEE_ORIGIN_TEE;
+ goto function_exit;
+ }
+ res = tee_ta_func_execute(err, sess, cmd, param);
+ }
+
+ if (sess->ctx->panicked) {
+ *err = TEE_ORIGIN_TEE;
+ res = TEE_ERROR_TARGET_DEAD;
+ }
+
+function_exit:
+ sess->ctx->busy = false;
+ if (res != TEE_SUCCESS)
+ EMSG(" => Error: %lx of %ld\n", res, *err);
+ return res;
+}
+
+TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
+ struct tee_ta_session *sess,
+ const TEE_Identity *clnt_id)
+{
+ *err = TEE_ORIGIN_TEE;
+
+ sess->cancel = true;
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
+{
+ if (tee_rs == NULL)
+ return TEE_ERROR_BAD_STATE;
+ *sess = tee_rs;
+ return TEE_SUCCESS;
+}
+
+void tee_ta_set_current_session(struct tee_ta_session *sess)
+{
+ if (tee_rs != sess) {
+ struct tee_ta_ctx *ctx = NULL;
+
+ if (sess != NULL)
+ ctx = sess->ctx;
+
+ tee_rs = sess;
+ tee_mmu_set_ctx(ctx);
+ }
+}
+
+TEE_Result tee_ta_get_client_id(TEE_Identity *id)
+{
+ TEE_Result res;
+ struct tee_ta_session *sess;
+
+ res = tee_ta_get_current_session(&sess);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ if (id == NULL)
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ *id = sess->clnt_id;
+ return TEE_SUCCESS;
+}
+
+uintptr_t tee_ta_get_exec(const struct tee_ta_ctx *const ctx)
+{
+ if ((ctx->flags & TA_FLAG_EXEC_DDR) == 0) {
+ return tee_mm_get_smem(ctx->mm);
+ } else {
+ return tee_mmu_get_load_addr(ctx) + sizeof(ta_head_t) +
+ ctx->head->nbr_func * sizeof(ta_func_head_t);
+ }
+}
+
+TEE_Result tee_ta_verify_session_pointer(struct tee_ta_session *sess,
+ struct tee_ta_session_head
+ *open_sessions)
+{
+ struct tee_ta_session *s;
+
+ if (sess == (struct tee_ta_session *)TEE_SESSION_ID_STATIC_TA)
+ return TEE_SUCCESS;
+
+ TAILQ_FOREACH(s, open_sessions, link) {
+ if (s == sess)
+ return TEE_SUCCESS;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
diff --git a/core/arch/arm32/kernel/tee_time.c b/core/arch/arm32/kernel/tee_time.c
new file mode 100644
index 00000000000..a98e8d4f309
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_time.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+#define TEE_TIME_SHIFT 5
+
+#define TEE_RTT0_HZ 32768UL
+
+#define TEE_RTT0_TICKS_PER_SECOND (TEE_RTT0_HZ)
+#define TEE_RTT0_TICKS_PER_MINUTE (TEE_RTT0_TICKS_PER_SECOND * 60)
+#define TEE_RTT0_TICKS_PER_HOUR (TEE_RTT0_TICKS_PER_MINUTE * 60)
+
+/* We'll receive one interrupt per hour */
+#define TEE_RTT0_WRAP_TICKS TEE_RTT0_TICKS_PER_HOUR
+
+#define TEE_RTT1_HZ 10UL
+#define TEE_RTT1_WRAP_TICKS 0xffffffff
+
+/*
+ * Following is code example that could be used to activate time
+ * functionalities in TEE for arm32
+ *
+TEE_Result tee_time_init(void)
+{
+ - Disable timer and later change to 32kHz
+ IO(RTT0_CR) &= ~RTT_CR_EN;
+
+ if (!(IO(RTT1_CR) & RTT_CR_EN)) {
+ IO(RTT1_IMSC) |= RTT_IMSC_IMSC; - disable interrupts
+ IO(RTT1_LR) = TEE_RTT1_WRAP_TICKS; - start the timer
+
+ TEE_COMPILE_TIME_ASSERT(TEE_RTT1_HZ == TEE_TIME_BOOT_TICKS_HZ);
+ }
+
+ return TEE_SUCCESS;
+}
+
+uint32_t tee_time_get_boot_ticks(void)
+{
+ return TEE_RTT1_WRAP_TICKS - IO(RTT1_DR);
+}
+
+uint32_t tee_time_get_boot_time_in_seconds(void)
+{
+ return tee_time_get_boot_ticks() / TEE_RTT1_HZ;
+}
+*/
+
+static void tee_time_rtt0_init(void)
+{
+ static bool inited; /* initialized to false */
+
+ if (!inited) {
+ volatile uint32_t *cr = (uint32_t *)RTT0_CR;
+ volatile uint32_t *ctcr = (uint32_t *)RTT0_CTCR;
+ volatile uint32_t *lr = (uint32_t *)RTT0_LR;
+ volatile uint32_t *imsc = (uint32_t *)RTT0_IMSC;
+
+ DMSG("tee_time_rtt0_init: First call may take a few secs");
+
+ /*
+ * Make sure timer is disabled. RTT_CR_EN is not accurate,
+ * enabling can be in progress too. Checking *ctcr takes
+ * care of that since updates to ctcr only propagates once
+ * timer really is disabled.
+ */
+ while (*ctcr != 0 || (*cr & (RTT_CR_EN | RTT_CR_ENS)) != 0) {
+ *cr &= ~RTT_CR_EN;
+ *ctcr = 0;
+ }
+
+ /* Change to 32kHz */
+ *ctcr = 0;
+
+ /* Enable interrupts on wrap */
+ *imsc |= RTT_IMSC_IMSC;
+
+ /* Start with the desired interrupt interval */
+ *lr = TEE_RTT0_WRAP_TICKS;
+
+ inited = true;
+ }
+}
+
+
+/*
+ * Following is code example that could be used to activate time
+ * functionalities in TEE for arm32
+ *
+TEE_Result tee_time_stamp(uint32_t *stamp)
+{
+ tee_time_rtt0_init();
+
+ *stamp = IO(RTT0_DR);
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_time_get(uint32_t stamp, uint32_t *time)
+{
+ TEE_Result res;
+ uint32_t val;
+
+ res = tee_time_stamp(&val);
+ if (res != TEE_SUCCESS)
+ return res;
+
+ *time = (stamp - val) >> TEE_TIME_SHIFT;
+
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_time_secure_rtc_update(const void *time, uint32_t time_size)
+{
+ return TEE_SUCCESS;
+}
+
+TEE_Result tee_time_secure_rtc_update_check(bool *ok)
+{
+ *ok = true;
+ return TEE_SUCCESS;
+}
+*/
+
+TEE_Result tee_time_get_sys_time(TEE_Time *time)
+{
+ uint32_t wrap0;
+ uint32_t wrap;
+ uint32_t timer;
+
+ /* Stub system time support until a HW secure timer is supported */
+ return tee_time_get_ree_time(time);
+
+ tee_time_rtt0_init();
+
+ /*
+ * Reading wrap before and after we're reading DR to be able to
+ * detect if the timer wrapped while we where reading it.
+ */
+ do {
+ wrap0 = tee_time_rtt0_wrap;
+ timer = TEE_RTT0_WRAP_TICKS - IO(RTT0_DR);
+ wrap = tee_time_rtt0_wrap;
+ } while (wrap0 != wrap);
+
+ time->seconds = wrap * TEE_RTT0_WRAP_TICKS / TEE_RTT0_HZ +
+ timer / TEE_RTT0_HZ;
+ time->millis =
+ (timer % TEE_RTT0_HZ) / (TEE_RTT0_HZ / TEE_TIME_MILLIS_BASE);
+
+ return TEE_SUCCESS;
+}
+
+void tee_wait_specific(uint32_t milliseconds_delay)
+{
+ /*
+ * Any implementation must check it is secure, and robust to idle states
+ * of the arm
+ */
+ /* usleep to be implemented */
+ /* usleep(milliseconds_delay * 1000); */
+}
diff --git a/core/arch/arm32/kernel/tee_time_unpg.c b/core/arch/arm32/kernel/tee_time_unpg.c
new file mode 100644
index 00000000000..314532eef39
--- /dev/null
+++ b/core/arch/arm32/kernel/tee_time_unpg.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+volatile uint32_t tee_time_rtt0_wrap;
+
+void tee_time_rtt_interrupt(void)
+{
+ if (IO(RTT0_MIS) & RTT_MIS_MIS)
+ tee_time_rtt0_wrap++;
+
+ /* No need to clear the interrupt as ROM code is handling that. */
+}
diff --git a/core/arch/arm32/kernel/thread.c b/core/arch/arm32/kernel/thread.c
new file mode 100644
index 00000000000..b4daca961be
--- /dev/null
+++ b/core/arch/arm32/kernel/thread.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include "thread_private.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+static struct thread_ctx threads[NUM_THREADS];
+
+static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE];
+
+thread_call_handler_t thread_stdcall_handler_ptr;
+static thread_call_handler_t thread_fastcall_handler_ptr;
+thread_fiq_handler_t thread_fiq_handler_ptr;
+thread_svc_handler_t thread_svc_handler_ptr;
+thread_abort_handler_t thread_abort_handler_ptr;
+
+static unsigned int thread_global_lock = UNLOCK;
+
+static void lock_global(void)
+{
+ cpu_spin_lock(&thread_global_lock);
+}
+
+static void unlock_global(void)
+{
+ cpu_spin_unlock(&thread_global_lock);
+}
+
+static struct thread_core_local *get_core_local(void)
+{
+ uint32_t cpu_id = get_core_pos();
+
+ /*
+ * IRQs must be disabled before playing with core_local since
+ * we otherwhise may be rescheduled to a different core in the
+ * middle of this function.
+ */
+ assert(read_cpsr() & CPSR_I);
+
+ assert(cpu_id < CFG_TEE_CORE_NB_CORE);
+ return &thread_core_local[cpu_id];
+}
+
+static bool have_one_active_thread(void)
+{
+ size_t n;
+
+ for (n = 0; n < NUM_THREADS; n++) {
+ if (threads[n].state == THREAD_STATE_ACTIVE)
+ return true;
+ }
+
+ return false;
+}
+
+static void thread_alloc_and_run(struct thread_smc_args *args)
+{
+ size_t n;
+ struct thread_core_local *l = get_core_local();
+ bool found_thread = false;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ if (!have_one_active_thread()) {
+ for (n = 0; n < NUM_THREADS; n++) {
+ if (threads[n].state == THREAD_STATE_FREE) {
+ threads[n].state = THREAD_STATE_ACTIVE;
+ found_thread = true;
+ break;
+ }
+ }
+ }
+
+ unlock_global();
+
+ if (!found_thread) {
+ args->a0 = TEESMC_RETURN_EBUSY;
+ args->a1 = 0;
+ args->a2 = 0;
+ args->a3 = 0;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ threads[n].regs.pc = (uint32_t)thread_stdcall_entry;
+ /* Stdcalls starts in SVC mode with masked IRQ and unmasked FIQ */
+ threads[n].regs.cpsr = CPSR_MODE_SVC | CPSR_I;
+ threads[n].flags = 0;
+ /* Enable thumb mode if it's a thumb instruction */
+ if (threads[n].regs.pc & 1)
+ threads[n].regs.cpsr |= CPSR_T;
+ /* Reinitialize stack pointer */
+ threads[n].regs.svc_sp = threads[n].stack_va_end;
+
+ /*
+ * Copy arguments into context. This will make the
+ * arguments appear in r0-r7 when thread is started.
+ */
+ threads[n].regs.r0 = args->a0;
+ threads[n].regs.r1 = args->a1;
+ threads[n].regs.r2 = args->a2;
+ threads[n].regs.r3 = args->a3;
+ threads[n].regs.r4 = args->a4;
+ threads[n].regs.r5 = args->a5;
+ threads[n].regs.r6 = args->a6;
+ threads[n].regs.r7 = args->a7;
+
+ /* Save Hypervisor Client ID */
+ threads[n].hyp_clnt_id = args->a7;
+
+ thread_resume(&threads[n].regs);
+}
+
+static void thread_resume_from_rpc(struct thread_smc_args *args)
+{
+ size_t n = args->a3; /* thread id */
+ struct thread_core_local *l = get_core_local();
+ uint32_t rv = 0;
+
+ assert(l->curr_thread == -1);
+
+ lock_global();
+
+ if (have_one_active_thread()) {
+ rv = TEESMC_RETURN_EBUSY;
+ } else if (n < NUM_THREADS &&
+ threads[n].state == THREAD_STATE_SUSPENDED &&
+ args->a7 == threads[n].hyp_clnt_id) {
+ threads[n].state = THREAD_STATE_ACTIVE;
+ } else {
+ rv = TEESMC_RETURN_ERESUME;
+ }
+
+ unlock_global();
+
+ if (rv) {
+ args->a0 = rv;
+ args->a1 = 0;
+ args->a2 = 0;
+ args->a3 = 0;
+ return;
+ }
+
+ l->curr_thread = n;
+
+ if (threads[n].have_user_map)
+ tee_mmu_set_map(&threads[n].user_map);
+
+ /*
+ * Return from RPC to request service of an IRQ must not
+ * get parameters from non-secure world.
+ */
+ if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
+ /*
+ * Update returned values from RPC, values will appear in
+ * r0-r3 when thread is resumed.
+ */
+ threads[n].regs.r0 = args->a0;
+ threads[n].regs.r1 = args->a1;
+ threads[n].regs.r2 = args->a2;
+ threads[n].regs.r3 = args->a3;
+ threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ }
+
+ thread_resume(&threads[n].regs);
+}
+
+void thread_handle_smc_call(struct thread_smc_args *args)
+{
+ check_canaries();
+
+ if (TEESMC_IS_FAST_CALL(args->a0)) {
+ thread_fastcall_handler_ptr(args);
+ } else {
+ if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC)
+ thread_resume_from_rpc(args);
+ else
+ thread_alloc_and_run(args);
+ }
+}
+
+void *thread_get_tmp_sp(void)
+{
+ struct thread_core_local *l = get_core_local();
+
+ return (void *)l->tmp_stack_va_end;
+}
+
+void thread_state_free(void)
+{
+ struct thread_core_local *l = get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ lock_global();
+
+ assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
+ threads[l->curr_thread].state = THREAD_STATE_FREE;
+ threads[l->curr_thread].flags = 0;
+ l->curr_thread = -1;
+
+ unlock_global();
+}
+
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, uint32_t pc)
+{
+ struct thread_core_local *l = get_core_local();
+ int ct = l->curr_thread;
+
+ assert(ct != -1);
+
+ check_canaries();
+
+ lock_global();
+
+ assert(threads[ct].state == THREAD_STATE_ACTIVE);
+ threads[ct].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ threads[ct].flags |= flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN;
+ threads[ct].regs.cpsr = cpsr;
+ threads[ct].regs.pc = pc;
+ threads[ct].state = THREAD_STATE_SUSPENDED;
+
+ threads[ct].have_user_map = !tee_mmu_is_kernel_mapping();
+ if (threads[ct].have_user_map) {
+ tee_mmu_get_map(&threads[ct].user_map);
+ tee_mmu_set_map(NULL);
+ }
+
+
+ l->curr_thread = -1;
+
+ unlock_global();
+
+ return ct;
+}
+
+
+bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
+{
+ switch (thread_id) {
+ case THREAD_TMP_STACK: {
+ struct thread_core_local *l = get_core_local();
+
+ l->tmp_stack_va_end = sp;
+ l->curr_thread = -1;
+
+ thread_set_irq_sp(sp);
+ thread_set_fiq_sp(sp);
+ break;
+ }
+
+ case THREAD_ABT_STACK:
+ thread_set_abt_sp(sp);
+ break;
+
+ default:
+ if (thread_id >= NUM_THREADS)
+ return false;
+ if (threads[thread_id].state != THREAD_STATE_FREE)
+ return false;
+
+ threads[thread_id].stack_va_end = sp;
+ }
+
+ return true;
+}
+
+void thread_init_handlers(const struct thread_handlers *handlers)
+{
+ thread_stdcall_handler_ptr = handlers->stdcall;
+ thread_fastcall_handler_ptr = handlers->fastcall;
+ thread_fiq_handler_ptr = handlers->fiq;
+ thread_svc_handler_ptr = handlers->svc;
+ thread_abort_handler_ptr = handlers->abort;
+ thread_init_vbar();
+}
+
+void thread_set_tsd(void *tsd, thread_tsd_free_t free_func)
+{
+ struct thread_core_local *l = get_core_local();
+
+ assert(l->curr_thread != -1);
+ assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
+ threads[l->curr_thread].tsd = tsd;
+ threads[l->curr_thread].tsd_free = free_func;
+}
+
+void *thread_get_tsd(void)
+{
+ struct thread_core_local *l = get_core_local();
+ int ct = l->curr_thread;
+
+ if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE)
+ return NULL;
+ else
+ return threads[ct].tsd;
+}
+
+struct thread_ctx_regs *thread_get_ctx_regs(void)
+{
+ struct thread_core_local *l = get_core_local();
+
+ assert(l->curr_thread != -1);
+ return &threads[l->curr_thread].regs;
+}
+
+void thread_set_irq(bool enable)
+{
+ struct thread_core_local *l;
+ uint32_t cpsr = read_cpsr();
+
+ /* get_core_local() requires IRQs to be disabled */
+ write_cpsr(cpsr | CPSR_I);
+
+ l = get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (enable) {
+ threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE;
+ write_cpsr(cpsr & ~CPSR_I);
+ } else {
+ /*
+ * No need to disable IRQ here since it's already disabled
+ * above.
+ */
+ threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE;
+ }
+}
+
+void thread_restore_irq(void)
+{
+ struct thread_core_local *l;
+ uint32_t cpsr = read_cpsr();
+
+ /* get_core_local() requires IRQs to be disabled */
+ write_cpsr(cpsr | CPSR_I);
+
+ l = get_core_local();
+
+ assert(l->curr_thread != -1);
+
+ if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE)
+ write_cpsr(cpsr & ~CPSR_I);
+}
+
+paddr_t thread_rpc_alloc_arg(size_t size)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ TEESMC_RETURN_RPC_ALLOC_ARG, size};
+
+ thread_rpc(rpc_args);
+ return rpc_args[1];
+}
+
+paddr_t thread_rpc_alloc_payload(size_t size)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size};
+
+ thread_rpc(rpc_args);
+ return rpc_args[1];
+}
+
+void thread_rpc_free_arg(paddr_t arg)
+{
+ if (arg) {
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ TEESMC_RETURN_RPC_FREE_ARG, arg};
+
+ thread_rpc(rpc_args);
+ }
+}
+void thread_rpc_free_payload(paddr_t payload)
+{
+ if (payload) {
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ TEESMC_RETURN_RPC_FREE_PAYLOAD, payload};
+
+ thread_rpc(rpc_args);
+ }
+}
+
+void thread_rpc_cmd(paddr_t arg)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg};
+
+ thread_rpc(rpc_args);
+}
+
+void thread_st_rpc_alloc_payload(size_t size, paddr_t *payload, paddr_t *cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
+ TEESMC_RETURN_ST_RPC_ALLOC_PAYLOAD, size};
+
+ thread_rpc(rpc_args);
+ if (payload)
+ *payload = rpc_args[1];
+ if (cookie)
+ *cookie = rpc_args[2];
+}
+
+void thread_st_rpc_free_payload(paddr_t cookie)
+{
+ uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={
+ TEESMC_RETURN_ST_RPC_FREE_PAYLOAD, cookie};
+
+ thread_rpc(rpc_args);
+}
diff --git a/core/arch/arm32/kernel/thread_asm.S b/core/arch/arm32/kernel/thread_asm.S
new file mode 100644
index 00000000000..2a988bf8b2a
--- /dev/null
+++ b/core/arch/arm32/kernel/thread_asm.S
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+FUNC thread_set_abt_sp , :
+ mrs r1, cpsr
+ cps #CPSR_MODE_ABT
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+END_FUNC thread_set_abt_sp
+
+FUNC thread_set_irq_sp , :
+ mrs r1, cpsr
+ cps #CPSR_MODE_IRQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+END_FUNC thread_set_irq_sp
+
+
+FUNC thread_set_fiq_sp , :
+ mrs r1, cpsr
+ cps #CPSR_MODE_FIQ
+ mov sp, r0
+ msr cpsr, r1
+ bx lr
+END_FUNC thread_set_irq_sp
+
+FUNC thread_recv_smc_call , :
+ ldr r9, =TEESMC32_CALL_HANDLE_FIQ
+ cmp r0, r9
+ bne .recv_smc
+ /*
+ * FIQ raised while in non-secure world, unmask FIQ temporarily to
+ * receive the FIQ in as it's normally recieved when secure world
+ * is active when FIQ is raised.
+ */
+ cpsie f
+ nop
+ nop
+ nop
+ nop /* We should have received IT by now... */
+ cpsid f
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+ mov r3, r4
+ b thread_issue_smc
+.recv_smc:
+ push {r0-r7}
+ mov r0, sp
+ bl thread_handle_smc_call
+ /*
+ * Normally thread_handle_smc_call() should return via
+ * thread_exit(), thread_rpc(), but if thread_handle_smc_call()
+ * hasn't switched stack (fast call, FIQ, error detected) it will
+ * do a normal "C" return.
+ */
+ pop {r0-r7}
+thread_issue_smc:
+ smc #0
+ b thread_recv_smc_call /* Next entry to secure world is here */
+END_FUNC thread_recv_smc_call
+
+/* void thread_resume(struct thread_ctx_regs *regs) */
+FUNC thread_resume , :
+ add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
+
+ cps #CPSR_MODE_SYS
+ ldm r12!, {r1, sp, lr}
+ msr spsr, r1
+
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ cps #CPSR_MODE_IRQ
+ ldm r12!, {r1, sp, lr}
+ msr spsr, r1
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+
+ cps #CPSR_MODE_SVC
+ ldm r12!, {r1, sp, lr}
+ msr spsr, r1
+
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ cps #CPSR_MODE_ABT
+ ldm r12!, {r1, sp, lr}
+ msr spsr, r1
+
+ cps #CPSR_MODE_UND
+ ldm r12!, {r1, sp, lr}
+ msr spsr, r1
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+
+ cps #CPSR_MODE_SVC
+ ldm r12, {r1, r2}
+ push {r1, r2}
+
+ ldm r0, {r0-r12}
+
+
+ /* Restore CPSR and jump to the instruction to resume at */
+ rfefd sp!
+END_FUNC thread_resume
+
+/*
+ * Disables IRQ and FIQ and saves state of thread, returns original
+ * CPSR.
+ */
+LOCAL_FUNC thread_save_state , :
+ push {r12, lr}
+ /*
+ * Uses stack for temporary storage, while storing needed
+ * context in the thread context struct.
+ */
+
+ mrs r12, cpsr
+
+ cpsid if /* Disable IRQ and FIQ */
+
+ push {r4-r7}
+ push {r0-r3}
+
+ mov r5, r12 /* Save CPSR in a preserved register */
+ mrs r6, cpsr /* Save current CPSR */
+
+ bl thread_get_ctx_regs
+
+ pop {r1-r4} /* r0-r3 pushed above */
+ stm r0!, {r1-r4}
+ pop {r1-r4} /* r4-r7 pushed above */
+ stm r0!, {r1-r4}
+ stm r0!, {r8-r11}
+
+ pop {r12, lr}
+ stm r0!, {r12}
+
+ cps #CPSR_MODE_SYS
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ cps #CPSR_MODE_IRQ
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+
+ cps #CPSR_MODE_SVC
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ cps #CPSR_MODE_ABT
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+
+ cps #CPSR_MODE_UND
+ mrs r1, spsr
+ stm r0!, {r1, sp, lr}
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+
+ msr cpsr, r6 /* Restore mode */
+
+ mov r0, r5 /* Return original CPSR */
+ bx lr
+END_FUNC thread_save_state
+
+FUNC thread_stdcall_entry , :
+ /* Pass r0-r7 in a struct thread_smc_args */
+ push {r0-r7}
+ mov r0, sp
+ ldr lr, =thread_stdcall_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ /*
+ * Load the returned r0-r3 into preserved registers and skip the
+ * "returned" r4-r7 since they will not be returned to normal
+ * world.
+ */
+ pop {r4-r7}
+ add sp, #(4 * 4)
+
+ /* Disable interrupts before switching to temporary stack */
+ cpsid if
+ bl thread_get_tmp_sp
+ mov sp, r0
+
+ bl thread_state_free
+
+ mov r0, r4
+ mov r1, r5
+ mov r2, r6
+ mov r3, r7
+ b thread_issue_smc
+END_FUNC thread_stdcall_entry
+
+
+/*
+ * void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
+ */
+FUNC thread_rpc , :
+ push {lr}
+ push {r0}
+
+ bl thread_save_state
+ mov r4, r0 /* Save original CPSR */
+
+ /*
+ * Switch to temporary stack and SVC mode. Save CPSR to resume into.
+ */
+ bl thread_get_tmp_sp
+ ldr r5, [sp] /* Get pointer to rv[] */
+ cps #CPSR_MODE_SVC /* Change to SVC mode */
+ mov sp, r0 /* Switch to tmp stack */
+
+ mov r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
+ mov r1, r4 /* CPSR to restore */
+ ldr r2, =.thread_rpc_return
+ bl thread_state_suspend
+ mov r3, r0 /* Supply thread index */
+ ldm r5, {r0-r2} /* Load rv[] into r0-r2 */
+ b thread_issue_smc
+
+.thread_rpc_return:
+ /*
+ * At this point has the stack pointer been restored to the value
+ * it had when thread_save_state() was called above.
+ *
+ * Jumps here from thread_resume above when RPC has returned. The
+ * IRQ and FIQ bits are restored to what they where when this
+ * function was originally entered.
+ */
+ pop {r12} /* Get pointer to rv[] */
+ stm r12, {r0-r2} /* Store r0-r2 into rv[] */
+ pop {pc}
+END_FUNC thread_rpc
+
+LOCAL_FUNC thread_fiq_handler , :
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ push {r0-r12, lr}
+ bl check_canaries
+ ldr lr, =thread_fiq_handler_ptr
+ ldr lr, [lr]
+ blx lr
+ pop {r0-r12, lr}
+ movs pc, lr
+END_FUNC thread_fiq_handler
+
+LOCAL_FUNC thread_irq_handler , :
+ /*
+ * IRQ mode is set up to use tmp stack so FIQ has to be
+ * disabled before touching the stack. We can also assign
+ * SVC sp from IRQ sp to get SVC mode into the state we
+ * need when doing the SMC below.
+ */
+ cpsid f /* Disable FIQ also */
+ sub lr, lr, #4
+ push {lr}
+ push {r12}
+
+ bl thread_save_state
+
+ mov r0, #0
+ mrs r1, spsr
+ pop {r12}
+ pop {r2}
+ blx thread_state_suspend
+ mov r3, r0 /* Supply thread index */
+
+ /*
+ * Switch to SVC mode and copy current stack pointer as it already
+ * is the tmp stack.
+ */
+ mov r0, sp
+ cps #CPSR_MODE_SVC
+ mov sp, r0
+
+ ldr r0, =TEESMC_RETURN_RPC_IRQ
+ mov r1, #0
+ mov r2, #0
+ /* r3 is already filled in above */
+ b thread_issue_smc
+END_FUNC thread_irq_handler
+
+FUNC thread_init_vbar , :
+ /* Set vector (VBAR) */
+ ldr r0, =thread_vect_table
+ write_vbar r0
+ bx lr
+END_FUNC thread_init_vbar
+
+LOCAL_FUNC thread_abort_handler , :
+thread_abort_handler:
+thread_und_handler:
+ /*
+ * Switch to abort mode to use that stack instead.
+ */
+ cps #CPSR_MODE_ABT
+ sub lr, lr, #4
+ push {r0-r3, ip, lr}
+ cps #CPSR_MODE_UND
+ mrs r0, spsr
+ cps #CPSR_MODE_ABT
+ push {r0, r1}
+ msr spsr, r0 /* In case some code reads spsr directly */
+ mov r0, #THREAD_ABORT_UNDEF
+ b .thread_abort_generic
+
+thread_dabort_handler:
+ sub lr, lr, #8
+ push {r0-r3, ip, lr}
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #THREAD_ABORT_DATA
+ b .thread_abort_generic
+
+thread_pabort_handler:
+ sub lr, lr, #4
+ push {r0-r3, ip, lr}
+ mrs r0, spsr
+ push {r0, r1}
+ mov r0, #THREAD_ABORT_PREFETCH
+ b .thread_abort_generic
+
+.thread_abort_generic:
+ mov r1, sp
+ ldr lr, =thread_abort_handler_ptr;
+ ldr lr, [lr]
+ blx lr
+ pop {r0, r1}
+ msr spsr, r0
+ pop {r0-r3, ip, lr}
+ movs pc, lr
+END_FUNC thread_abort_handler
+
+LOCAL_FUNC thread_svc_handler , :
+/*
+ * TODO figure out an efficient way of redesigning tee_svc_syscall to allow
+ * usage of this code instead.
+ */
+#if 0
+ push {r0-r5, lr}
+ mrs r0, spsr
+ push {r0}
+ mov r0, sp
+ ldr lr, =thread_svc_handler_ptr;
+ ldr lr, [lr]
+ blx lr
+ pop {r0}
+ msr spsr, r0
+ pop {r0-r5, lr}
+ movs pc, lr
+#else
+ ldr pc, =tee_svc_syscall
+#endif
+END_FUNC thread_svc_handler
+
+ .align 5
+LOCAL_FUNC thread_vect_table , :
+ b . /* Reset */
+ b thread_und_handler /* Undefined instruction */
+ b thread_svc_handler /* System call */
+ b thread_pabort_handler /* Prefetch abort */
+ b thread_dabort_handler /* Data abort */
+ b . /* Reserved */
+ b thread_irq_handler /* IRQ */
+ b thread_fiq_handler /* FIQ */
+END_FUNC thread_vect_table
diff --git a/core/arch/arm32/kernel/thread_private.h b/core/arch/arm32/kernel/thread_private.h
new file mode 100644
index 00000000000..9e6dfef265d
--- /dev/null
+++ b/core/arch/arm32/kernel/thread_private.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_PRIVATE_H
+#define THREAD_PRIVATE_H
+
+#include
+
+enum thread_state {
+ THREAD_STATE_FREE,
+ THREAD_STATE_SUSPENDED,
+ THREAD_STATE_ACTIVE,
+};
+
+struct thread_ctx_regs {
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ uint32_t r11;
+ uint32_t r12;
+ uint32_t usr_spsr;
+ uint32_t usr_sp;
+ uint32_t usr_lr;
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ uint32_t irq_spsr;
+ uint32_t irq_sp;
+ uint32_t irq_lr;
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+ uint32_t svc_spsr;
+ uint32_t svc_sp;
+ uint32_t svc_lr;
+#ifdef THREAD_LOCAL_EXCEPTION_SPS
+ uint32_t abt_spsr;
+ uint32_t abt_sp;
+ uint32_t abt_lr;
+ uint32_t und_spsr;
+ uint32_t und_sp;
+ uint32_t und_lr;
+#endif /*THREAD_LOCAL_EXCEPTION_SPS*/
+ uint32_t pc;
+ uint32_t cpsr;
+};
+
+struct thread_ctx {
+ enum thread_state state;
+ vaddr_t stack_va_end;
+ void *tsd;
+ thread_tsd_free_t tsd_free;
+ uint32_t hyp_clnt_id;
+ uint32_t flags;
+ struct thread_ctx_regs regs;
+ struct tee_mmu_mapping user_map;
+ bool have_user_map;
+};
+
+struct thread_core_local {
+ vaddr_t tmp_stack_va_end;
+ int curr_thread;
+};
+
+
+/*
+ * Initializes VBAR for current CPU (called by thread_init_handlers()
+ */
+void thread_init_vbar(void);
+
+/* Handles a stdcall, r0-r7 holds the parameters */
+void thread_stdcall_entry(void);
+
+/*
+ * Resumes execution of currently active thread by restoring context and
+ * jumping to the instruction where to continue execution.
+ *
+ * Arguments supplied by non-secure world will be copied into the saved
+ * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
+ * in the flags field in the thread context.
+ */
+void thread_resume(struct thread_ctx_regs *regs);
+
+/*
+ * Private functions made available for thread_asm.S
+ */
+
+/* Returns the temp stack for current CPU */
+void *thread_get_tmp_sp(void);
+
+/* Handles an SMC call by disptaching to the correct handler */
+void thread_handle_smc_call(struct thread_smc_args *args);
+
+/*
+ * Marks the current thread as suspended. And updated the flags
+ * for the thread context (see thread resume for use of flags).
+ * Returns thread index of the thread that was suspended.
+ */
+int thread_state_suspend(uint32_t flags, uint32_t cpsr, uint32_t pc);
+
+/*
+ * Marks the current thread as free.
+ */
+void thread_state_free(void);
+
+/* Returns a pointer to the saved registers in current thread context. */
+struct thread_ctx_regs *thread_get_ctx_regs(void);
+
+/* Sets sp for abort mode */
+void thread_set_abt_sp(vaddr_t sp);
+
+/* Sets sp for irq mode */
+void thread_set_irq_sp(vaddr_t sp);
+
+/* Sets sp for fiq mode */
+void thread_set_fiq_sp(vaddr_t sp);
+
+extern thread_call_handler_t thread_stdcall_handler_ptr;
+
+/*
+ * Suspends current thread and temorarily exits to non-secure world.
+ * This function returns later when non-secure world returns.
+ *
+ * The purpose of this function is to request services from non-secure
+ * world.
+ */
+#define THREAD_RPC_NUM_ARGS 3
+void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
+
+#endif /*THREAD_PRIVATE_H*/
diff --git a/core/arch/arm32/kernel/tz_proc.S b/core/arch/arm32/kernel/tz_proc.S
new file mode 100644
index 00000000000..7794f336828
--- /dev/null
+++ b/core/arch/arm32/kernel/tz_proc.S
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 core support routines
+ */
+
+#include
+
+.global cpu_dsb
+.global cpu_dmb
+.global cpu_isb
+.global cpu_wfe
+.global cpu_sev
+.global cpu_disable_its
+.global cpu_enable_its
+.global cpu_read_cpsr
+.global cpu_write_cpsr
+.global cpu_read_ttbr0
+.global cpu_write_ttbr0
+.global cpu_spin_lock
+.global cpu_spin_trylock
+.global cpu_spin_unlock
+.global mmu_enable @ CeCh ADD
+.global mmu_enable_icache @ CeCh ADD
+.global mmu_enable_dcache @ CeCh ADD
+
+.section .text
+.balign 4
+.code 32
+
+/* void cpu_dmb(void); */
+cpu_dmb:
+ dmb
+ bx lr
+
+/* void cpu_dsb(void); */
+cpu_dsb:
+ dsb
+ bx lr
+
+/* void cpu_isb(void); */
+cpu_isb:
+ isb
+ bx lr
+
+/* void cpu_wfe(void); */
+cpu_wfe:
+ wfe
+ bx lr
+
+/* void cpu_sev(void); */
+cpu_sev:
+ sev
+ bx lr
+
+/* void cpu_disable_its(void) - disable local core interruptions */
+cpu_disable_its:
+ cpsid if
+ mov pc, lr
+
+/* void cpu_enable_its(void) - enable local core interruptions */
+cpu_enable_its:
+ cpsie if
+ mov pc, lr
+
+/* unsigned int cpu_read_cpsr(void) - return CPRS in R0 */
+cpu_read_cpsr:
+ MRS R0, CPSR
+ BX LR
+
+/* void cpu_write_cpsr(cpsr) - write R0 content to CPSR */
+cpu_write_cpsr:
+ MSR CPSR_cxsf, R0
+ BX LR
+
+/* uint32_t cpu_read_ttbr0(void) - read CP15 TTBR0 */
+cpu_read_ttbr0:
+ mrc p15, 0, r0, c2, c0 ,0
+ bx lr
+
+/* void cpu_write_ttbr0(uint32_t ttbr0) - write CP15 TTBR0 */
+cpu_write_ttbr0:
+ mcr p15, 0, r0, c2, c0 ,0
+ bx lr
+
+/* void cpu_spin_lock(lock address) - lock mutex */
+cpu_spin_lock:
+ mov r2, #LOCK
+_spinlock_loop:
+ ldrex r1, [r0]
+ cmp r1, #UNLOCK
+ wfene
+ bne _spinlock_loop
+ strex r1, r2, [r0]
+ cmp r1, #0
+ wfene
+ bne _spinlock_loop
+ dmb
+ bx lr
+
+/* int cpu_spin_trylock(lock address) - return 0 on success */
+cpu_spin_trylock:
+ mov r2, #LOCK
+ mov r1, r0
+_trylock_loop:
+ ldrex r0, [r1]
+ cmp r0, #0
+ bne _trylock_out
+ strex r0, r2, [r1]
+ cmp r0, #0
+ bne _trylock_loop
+ dmb
+ bx lr
+_trylock_out:
+ clrex
+ dmb
+ bx lr
+
+/* void cpu_spin_unlock(lock address) - unlock mutex */
+cpu_spin_unlock:
+ dmb
+ mov r1, #UNLOCK
+ str r1, [r0]
+ dsb
+ sev
+ bx lr
+
+/*
+ * void mmu_enable(void) - enable MMU
+ *
+ * TLBs are invalidated before MMU is enabled.
+ * An DSB and ISB insures MMUs is enabled before routine returns
+ */
+mmu_enable:
+
+ MCR p15, 0, R0, c8, c7, 0
+
+ MRC p15, 0, R0, c1, c0, 0
+
+ ORR R0, R0, #CP15_CONTROL_M_MASK
+ MCR p15, 0, R0, c1, c0, 0
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+/* void mmu_enable_icache(void) - enable instruction cache */
+mmu_enable_icache:
+
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 0 /* write to Cache operations register */
+
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+
+ MRC p15, 0, R1, c1, c0 , 0 /* read control reg */
+ ORR R1, R1, #CP15_CONTROL_I_MASK /* set ICache enable bit */
+ MCR p15, 0, R1, c1, c0 , 0 /* write control reg */
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+/* void mmu_enable_dcache(void) - enable data cache */
+mmu_enable_dcache:
+
+ PUSH {R4,LR}
+
+
+ MRC p15, 0, R1, c1, c0 , 0 /* read control reg */
+ ORR R1, R1, #CP15_CONTROL_C_MASK /* set DCache enable bit */
+ MCR p15, 0, R1, c1, c0 , 0 /* write control reg */
+
+ DSB
+ ISB
+
+ POP {R4,PC}
diff --git a/core/arch/arm32/kernel/tz_ssvce.S b/core/arch/arm32/kernel/tz_ssvce.S
new file mode 100644
index 00000000000..981f06eaa45
--- /dev/null
+++ b/core/arch/arm32/kernel/tz_ssvce.S
@@ -0,0 +1,846 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARMv7 Secure Services library
+ */
+
+/*
+ * Variable(s)
+ */
+
+#include
+#include
+
+/* tee inits/monitors services */
+.global ssvce_monitormutex
+.global secure_get_cpu_id
+.global secure_setstacks
+.global secure_restorecontext
+.global secure_savecontext
+.global secure_savecontext_reenter
+.global ssvce_topoftempstack
+
+/* mmu init */
+.global secure_mmu_init
+.global secure_mmu_init_cpuN
+.global secure_mmu_disable /* TODO: align with mmu_enable() */
+
+/* TLB maintenance */
+.global secure_mmu_datatlbinvall
+.global secure_mmu_unifiedtlbinvall
+.global secure_mmu_unifiedtlbinvbymva
+.global secure_mmu_unifiedtlbinv_curasid
+.global secure_mmu_unifiedtlbinv_byasid
+
+/* cache maintenance */
+.global arm_cl1_d_cleanbysetway
+.global arm_cl1_d_invbysetway
+.global arm_cl1_d_cleaninvbysetway
+.global arm_cl1_d_cleanbypa
+.global arm_cl1_d_invbypa
+.global arm_cl1_d_cleaninvbypa
+.global arm_cl1_i_inv_all
+.global arm_cl1_i_inv
+.global arm_cl2_cleaninvbyway
+.global arm_cl2_invbyway
+.global arm_cl2_cleanbyway
+.global arm_cl2_cleanbypa
+.global arm_cl2_invbypa
+.global arm_cl2_cleaninvbypa
+
+/*
+ * Get CPU id: macro for local call.
+ * export unsigned long secure_get_cpu_id(void).
+ */
+.macro GET_CPU_ID reg
+ MRC p15, 0, \reg, c0, c0, 5 @ ; read MPIDR
+ AND \reg, #0x3 @ ; Get CPU ID
+.endm
+
+.code 32
+.section .text
+.balign 4
+
+secure_get_cpu_id:
+ GET_CPU_ID R0
+ MOV PC, LR
+
+
+/*
+ * Store TTBR0 base address for tee core and TAs.
+ * These are defined from scatter file and resolved during linkage.
+ * Currently all cores use the same MMU L1 tables (core and TAs).
+ * Maybe some day, each CPU will use its own MMU table.
+ */
+CORE0_TTBR0_ADDR:
+ .word SEC_MMU_TTB_FLD
+CORE0_TA_TTBR0_ADDR:
+ .word SEC_TA_MMU_TTB_FLD
+
+/*
+ * secure_mmu_init - init MMU for primary cpu
+ */
+secure_mmu_init:
+ MRC p15, 0, r0, c1, c0, 0 @ store in r0 contain of SCTLR (system control register) from CP15
+ BIC r0, r0, #0x00004 @ disable data cache. (BIC = bit clear)
+ BIC r0, r0, #0x01000 @ disable instruction cache.
+ MCR p15, 0, r0, c1, c0, 0
+
+ MOV r0, #0x05 @ domain 0: teecore, domain 1: TA
+ MCR p15, 0, r0, c3, c0, 0
+
+ /* load tee core default mapping */
+ push {lr}
+ LDR r0, CORE0_TTBR0_ADDR
+ LDR r1, CORE0_TA_TTBR0_ADDR
+ BL core_init_mmu
+ pop {lr}
+
+/*
+ * Set Table Table Base Control Reg
+ * ---------------------------------
+ * 31:6 - SBZ
+ * 5 - PD[1], whether misses in TTBR1 causes a table walk
+ * 4 - PD[0], whether misses in TTBR0 causes a table walk
+ * 3 - SBZ
+ * 2:0 - N, split between TTBR0 and TTBR1
+ */
+ MOV r0,#0x0 @ N=0 => no TTBR1 used
+ MCR p15, 0, r0, c2, c0, 2
+
+ MOV PC, LR
+
+/*
+ * void secure_mmu_disable(void);
+ */
+secure_mmu_disable:
+ MRC p15, 0, R0, c1, c0, 0
+
+ BIC R0, R0, #CP15_CONTROL_M_MASK
+ MCR p15, 0, R0, c1, c0, 0
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+.equ SEC_MMU_TTB_FLD_SN_SHM , 0x00011c02 @ 0x00011c0e to have memory cached
+ @ 0x00011c02 to have memory uncached
+
+.equ SEC_MMU_TTB_FLD_SN_DEV , 0x00001c02 @ device memory (iomem)
+
+/* @ ; r0 = base address (physical address) */
+/* @ ; Add a section of 1MBytes. */
+/* @ ; Base address r0 is aligned on 1MB */
+secure_mmu_addsection:
+
+ MRC p15, 0, R1, c2, c0 ,0 /* Get TTBR0 location */
+
+ LSR R0, R0, #20 /* Clear bottom 20 bits, to find which 1MB block its in */
+ LSL R2, R0, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+ LSL R0, R0, #20 /* Put back in address format */
+
+ LDR R3, =SEC_MMU_TTB_FLD_SN_SHM /* Descriptor template */
+ ORR R0, R0, R3 /* Combine address and template */
+ STR R0, [R1, R2]
+
+ MOV PC, LR
+
+secure_mmu_addsectiondevice:
+
+ MRC p15, 0, R1, c2, c0 ,0 /* Get TTBR0 location */
+
+ LSR R0, R0, #20 /* Clear bottom 20 bits, to find which 1MB block its in */
+ LSL R2, R0, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+ LSL R0, R0, #20 /* Put back in address format */
+
+ LDR R3, =SEC_MMU_TTB_FLD_SN_DEV /* Descriptor template */
+ ORR R0, R0, R3 /* Combine address and template */
+ STR R0, [R1, R2]
+
+ MOV PC, LR
+
+secure_mmu_removesection:
+
+ MRC p15, 0, R1, c2, c0 ,0 /* Get TTBR0 location */
+
+ LSR R0, R0, #20 /* Clear bottom 20 bits, to find which 1MB block its in */
+ LSL R2, R0, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+ LSL R0, R0, #20 /* Put back in address format */
+
+ MOV R3, #0 /* Descriptor template */
+ ORR R0, R0, R3 /* Combine address and template */
+ STR R0, [R1, R2]
+
+ MOV PC, LR
+
+.equ SEC_MMU_TTB_FLD_PT_SHM , 0xbfed0001 @ Template descriptor
+
+.equ SEC_MMU_TTB_SLD_SP_SHM , 0x00000473 @ 0x0000047f to have memory cached
+ @ 0x00000433 to have memory strongly ordered
+ @ 0x00000473 to have memory uncached
+
+/* @ ; r0 = base address (physical address) */
+/* @ ; Add a section of 4KB. */
+/* @ ; Base address r0 is aligned on 4KB */
+secure_mmu_addsmallpage:
+ PUSH {R4, R5}
+
+ LDR R1, =SEC_MMU_TTB_SLD
+
+ MOVW R2, #0x0000
+ MOVT R2, #0xFFF0
+
+ BIC R2, R0, R2
+
+ LSR R2, R2, #12 /* Clear bottom 12 bits, to find which 4KB block its in */
+ LSL R2, R2, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+
+ LDR R3, =SEC_MMU_TTB_SLD_SP_SHM /* Descriptor template */
+ ORR R0, R0, R3 /* Combine address and template */
+ STR R0, [R1, R2]
+
+ LDR R5, =SEC_MMU_TTB_SLD
+ LDR R4, [R5]
+
+ MRC p15, 0, R1, c2, c0 ,0 /* Get TTBR0 location */
+
+ LSR R0, R0, #20 /* Clear bottom 20 bits, to find which 1MB block its in */
+ LSL R2, R0, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+ LSL R0, R0, #20 /* Put back in address format */
+
+ LDR R3, =SEC_MMU_TTB_FLD_PT_SHM /* Descriptor template */
+ STR R3, [R1, R2]
+
+ POP {R4, R5}
+ MOV PC, LR
+
+secure_mmu_removesmallpage:
+
+ LDR R1, =SEC_MMU_TTB_SLD
+
+ MOVW R2, #0x0000
+ MOVT R2, #0xFFF0
+
+ BIC R2, R0, R2
+
+ LSR R2, R2, #12 /* Clear bottom 12 bits, to find which 4KB block its in */
+ LSL R2, R2, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+
+ MOV R3, #0 /* Descriptor template */
+ ORR R0, R0, R3 /* Combine address and template */
+ STR R0, [R1, R2]
+
+ LDR R5, =SEC_MMU_TTB_SLD
+ LDR R4, [R5]
+
+ MRC p15, 0, R1, c2, c0 ,0 /* Get TTBR0 location */
+
+ LSR R0, R0, #20 /* Clear bottom 20 bits, to find which 1MB block its in */
+ LSL R2, R0, #2 /* Make a copy, and multiply by four. This gives offset into the page tables */
+ LSL R0, R0, #20 /* Put back in address format */
+
+ LDR R3, =SEC_MMU_TTB_FLD_PT_SHM /* Descriptor template */
+ STR R3, [R1, R2]
+
+ MOV PC, LR
+
+/*
+ * - MMU maintenaince support ---------------------------------------------
+ */
+
+/*
+ * void secure_mmu_datatlbinvall(void);
+ */
+secure_mmu_datatlbinvall:
+
+ MCR p15, 0, R0, c8, c6, 0
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+/*
+ * void secure_mmu_instrtlbinvall(void);
+ */
+secure_mmu_instrtlbinvall:
+
+ MCR p15, 0, R0, c8, c5, 0
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+/*
+ * void secure_mmu_unifiedtlbinvall(void);
+ */
+secure_mmu_unifiedtlbinvall:
+
+ MCR p15, 0, R0, c8, c7, 0
+
+ DSB
+ ISB
+
+ MOV PC, LR
+
+/*
+ * void secure_mmu_unifiedtlbinvbymva(mva);
+ *
+ * Combine VA and current ASID, and invalidate matching TLB
+ */
+secure_mmu_unifiedtlbinvbymva:
+
+ b . @ Wrong code to force fix/check the routine before using it
+
+ MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
+ ANDS R1, R1, #0xFF /* Get current ASID */
+ ORR R1, R1, R0 /* Combine MVA and ASID */
+
+ MCR p15, 0, R1, c8, c7, 1 /* Invalidate Unified TLB entry by MVA */
+
+ DSB
+ ISB
+
+ MOV PC, LR
+/*
+ * void secure_mmu_unifiedtlbinv_curasid(void)
+ *
+ * Invalidate TLB matching current ASID
+ */
+secure_mmu_unifiedtlbinv_curasid:
+
+ MRC p15, 0, R0, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
+ AND R0, R0, #0xFF /* Get current ASID */
+ MCR p15, 0, R0, c8, c7, 2 /* Invalidate Unified TLB entry by ASID */
+ DSB
+ ISB
+ MOV PC, LR
+
+/*
+ * void secure_mmu_unifiedtlbinv_byasid(unsigned int asid)
+ *
+ * Invalidate TLB matching current ASID
+ */
+secure_mmu_unifiedtlbinv_byasid:
+
+ AND R0, R0, #0xFF /* Get current ASID */
+ MCR p15, 0, R0, c8, c7, 2 /* Invalidate Unified TLB entry by ASID */
+ DSB
+ ISB
+ MOV PC, LR
+
+/*
+ * void arm_cl1_d_cleanbysetway(void)
+ */
+arm_cl1_d_cleanbysetway:
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cl_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cl_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c10, 2 @ ; DCCSW Clean data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cl_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cl_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+
+arm_cl1_d_invbysetway:
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_dcache_off:
+ MOV R0, #0 @ ; set way number to 0
+_inv_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_inv_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c6, 2 @ ; DCISW Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _inv_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _inv_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+
+arm_cl1_d_cleaninvbysetway:
+
+ MOV R0, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ MOV R0, #0 @ ; set way number to 0
+_cli_nextWay:
+ MOV R1, #0 @ ; set line number (=index) to 0
+_cli_nextLine:
+ ORR R2, R0, R1 @ ; construct way/index value
+ MCR p15, 0, R2, c7, c14, 2 @ ; DCCISW Clean and Invalidate data or unified cache line by set/way
+ ADD R1, R1, #1 << LINE_FIELD_OFFSET @ ; increment the index
+ CMP R1, #1 << LINE_FIELD_OVERFLOW @ ; look for overflow out of set field
+ BNE _cli_nextLine
+ ADD R0, R0, #1 << WAY_FIELD_OFFSET @ ; increment the way number
+ CMP R0, #0 @ ; look for overflow out of way field
+ BNE _cli_nextWay
+
+ DSB @ ; synchronise
+ MOV PC, LR
+
+/*
+ * void arm_cl1_d_cleanbypa(unsigned long s, unsigned long e);
+ */
+arm_cl1_d_cleanbypa:
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cl_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cl_area_nextLine:
+ MCR p15, 0, R0, c7, c10, 1 @ ; Clean data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cl_area_nextLine
+
+_cl_area_exit:
+
+ DSB @ ; synchronise
+ MOV PC, LR
+
+/*
+ * void arm_cl1_d_invbypa(unsigned long s, unsigned long e);
+ */
+arm_cl1_d_invbypa:
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _inv_area_dcache_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+_inv_area_dcache_off:
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_inv_area_dcache_nl:
+ MCR p15, 0, R0, c7, c6, 1 @ ; Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _inv_area_dcache_nl
+
+_inv_area_dcache_exit:
+ DSB
+ MOV PC, LR
+
+/*
+ * void arm_cl1_d_cleaninvbypa(unsigned long s, unsigned long e);
+ */
+arm_cl1_d_cleaninvbypa:
+
+ CMP R0, R1 @ ; check that end >= start. Otherwise return.
+ BHI _cli_area_exit
+
+ MOV R2, #0 @ ; write the Cache Size selection register to be
+ MCR p15, 2, R2, c0, c0, 0 @ ; sure we address the data cache
+ ISB @ ; ISB to sync the change to the CacheSizeID reg
+
+ BIC R0, R0, #0x1F @ ; Mask 5 LSBits
+_cli_area_nextLine:
+ MCR p15, 0, R0, c7, c14, 1 @ ; Clean and Invalidate data or unified cache line by MVA to PoC
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET @ ; Next cache line
+ CMP R1, R0
+ BPL _cli_area_nextLine
+
+_cli_area_exit:
+ DSB @ ; synchronise
+ MOV PC, LR
+
+/*
+ * void arm_cl1_i_inv_all( void );
+ *
+ * Invalidates the whole instruction cache.
+ * It also invalidates the BTAC.
+ */
+arm_cl1_i_inv_all:
+
+ /* Invalidate Entire Instruction Cache */
+ MOV R0, #0
+ MCR p15, 0, R0, c7, c5, 0
+ DSB
+
+ /* Flush entire branch target cache */
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+ BX LR
+
+/*
+ * void arm_cl1_i_inv(unsigned long start, unsigned long p_end);
+ *
+ * Invalidates instruction cache area whose (physical) limits are given in parameters.
+ * It also invalidates the BTAC.
+ */
+arm_cl1_i_inv:
+
+ CMP R0, R1 /* Check that end >= start. Otherwise return. */
+ BHI _inv_icache_exit
+
+ BIC R0, R0, #0x1F /* Mask 5 LSBits */
+_inv_icache_nextLine:
+ MCR p15, 0, R0, c7, c5, 1 /* Invalidate ICache single entry (MVA) */
+ ADD R0, R0, #1 << LINE_FIELD_OFFSET /* Next cache line */
+ CMP R1, R0
+ BPL _inv_icache_nextLine
+ DSB
+
+ /* Flush entire branch target cache */
+ MOV R1, #0
+ MCR p15, 0, R1, c7, c5, 6 /* write to Cache operations register */
+ DSB /* ensure that maintenance operations are seen */
+ ISB /* by the instructions rigth after the ISB */
+
+_inv_icache_exit:
+ BX LR
+
+/*
+ * void arm_cl2_cleaninvbyway(void) - clean & invalidate the whole L2 cache.
+ */
+arm_cl2_cleaninvbyway:
+
+ /* Clean and invalidate all cache ways */
+ movw r0, #0x27FC
+ movt r0, #0xFFFE
+ movw r1, #0x00FF
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* Wait for all cache ways to be cleaned and invalidated */
+loop_cli_way_done:
+ ldr r2, [r0]
+ and r2,r2,r1
+ cmp r2, #0
+ bne loop_cli_way_done
+
+ /* Cache Sync */
+ movw r0, #0x2730
+ movt r0, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_cli_sync:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cli_sync
+
+ movw r1, #0x0001
+ movt r1, #0x0000
+ str r1, [r0]
+
+loop_cli_sync_done:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cli_sync_done
+
+ mov pc, lr
+
+/* void (arm_cl2_invbyway(void) */
+arm_cl2_invbyway:
+
+ /* Clean by Way */
+ movw r0, #0x277C
+ movt r0, #0xFFFE
+ movw r1, #0x00FF /* assumes here 8-way L2 cache (orly) */
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* Wait end of Invalidate by Way */
+loop_inv_way_done:
+ ldr r2, [r0]
+ and r2,r2,r1
+ cmp r2, #0
+ bne loop_inv_way_done
+
+ /* Cache Sync */
+ movw r0, #0x2730
+ movt r0, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_inv_way_sync:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_inv_way_sync
+
+ movw r1, #0x0001
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* Wait end of Cache Sync */
+loop_inv_way_sync_done:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_inv_way_sync_done
+
+ mov pc, lr
+
+/* void arm_cl2_cleanbyway(u32 pa) */
+arm_cl2_cleanbyway:
+
+ /* Clean by Way */
+ movw r0, #0x27BC
+ movt r0, #0xFFFE
+ movw r1, #0x00FF
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* Wait end of Clean by Way */
+loop_cl_way_done:
+ ldr r2, [r0]
+ and r2,r2,r1
+ cmp r2, #0
+ bne loop_cl_way_done
+
+ /* Cache Sync */
+ movw r0, #0x2730
+ movt r0, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_cl_way_sync:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cl_way_sync
+
+ movw r1, #0x0001
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* Wait end of Cache Sync */
+loop_cl_way_sync_done:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cl_way_sync_done
+
+ mov pc, lr
+
+/*
+ * void arm_cl2_cleanbypa(unsigned long start, unsigned long end);
+ *
+ * clean L2 cache by physical address range.
+ */
+arm_cl2_cleanbypa:
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ MOVW R2, #0x0030 /* LSB */
+ MOVT R2, #0xFFFE /* MSB */
+ MOVW R3, #0x0001
+ MOVT R3, #0x0000
+ STR R3, [R2]
+
+ DSB
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+
+ /* Clean PA */
+loop_cl2_clean_by_pa:
+ movw R2, #0x27B0
+ movt R2, #0xFFFE
+ str R0, [R2]
+
+ /* Wait for PA to be cleaned */
+loop_cl_pa_done:
+ ldr R3, [R2]
+ and R3,R3,R0
+ cmp R3, #0
+ bne loop_cl_pa_done
+
+ add R0, R0, #32
+ cmp R1, R0
+ bne loop_cl2_clean_by_pa
+
+ /* Cache Sync */
+ movw R2, #0x2730
+ movt R2, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_cl_pa_sync:
+ ldr R0, [R2]
+ cmp R0, #0
+ bne loop_cl_pa_sync
+
+ movw R0, #0x0001
+ movt R0, #0x0000
+ str R0, [R2]
+
+loop_cl_pa_sync_done:
+ ldr R0, [R2]
+ cmp R0, #0
+ bne loop_cl_pa_sync_done
+
+ mov pc, lr
+
+/*
+ * void arm_cl2_invbypa(unsigned long start, unsigned long end);
+ *
+ * invalidate L2 cache by physical address range.
+ */
+arm_cl2_invbypa:
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ MOVW R2, #0x0030 /* LSB */
+ MOVT R2, #0xFFFE /* MSB */
+ MOVW R3, #0x0001
+ MOVT R3, #0x0000
+ STR R3, [R2]
+
+ DSB
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+
+ /* Invalidate PA */
+loop_cl2_inv_by_pa:
+ MOVW R2, #0x2770
+ MOVT R2, #0xFFFE
+ STR R0, [R2]
+
+ /* Wait for PA to be invalidated */
+loop_inv_pa_done:
+ LDR R3, [R2]
+ AND R3,R3,R0
+ CMP R3, #0
+ BNE loop_inv_pa_done
+
+ ADD R0, R0, #32
+ CMP R1, R0
+ BNE loop_cl2_inv_by_pa
+
+
+ /* Cache Sync */
+ MOVW R2, #0x2730
+ MOVT R2, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_inv_pa_sync:
+ LDR R0, [R2]
+ CMP R0, #0
+ BNE loop_inv_pa_sync
+
+ MOVW R0, #0x0001
+ MOVT R0, #0x0000
+ STR R0, [R2]
+
+loop_inv_pa_sync_done:
+ LDR R0, [R2]
+ CMP R0, #0
+ BNE loop_inv_pa_sync_done
+
+ MOV PC, LR
+
+/*
+ * void arm_cl2_cleaninvbypa(unsigned long start, unsigned long end);
+ *
+ * clean and invalidate L2 cache by physical address range.
+ */
+arm_cl2_cleaninvbypa:
+
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+ MOVW R0, #0x0030 /* LSB */
+ MOVT R0, #0xFFFE /* MSB */
+ MOVW R1, #0x0001
+ MOVT R1, #0x0000
+ STR R1, [R0]
+
+ DSB
+ /*
+ * ARM ERRATA #764369
+ * Undocummented SCU Diagnostic Control Register
+ */
+
+ /* Invalidate PA */
+ movw r0, #0x27F0
+ movt r0, #0xFFFE
+ mov r1, r12 // CeCh
+ str r1, [r0]
+
+ /* Wait for PA to be invalidated */
+loop_cli_pa_done:
+ ldr r2, [r0]
+ and r2,r2,r1
+ cmp r2, #0
+ bne loop_cli_pa_done
+
+ /* Cache Sync */
+ movw r0, #0x2730
+ movt r0, #0xFFFE
+
+ /* Wait for writing cache sync */
+loop_cli_pa_sync:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cli_pa_sync
+
+ movw r1, #0x0001
+ movt r1, #0x0000
+ str r1, [r0]
+
+loop_cli_pa_sync_done:
+ ldr r1, [r0]
+ cmp r1, #0
+ bne loop_cli_pa_sync_done
+
+ mov pc, lr
diff --git a/core/arch/arm32/mm/core_mmu.c b/core/arch/arm32/mm/core_mmu.c
new file mode 100644
index 00000000000..55133fbee0e
--- /dev/null
+++ b/core/arch/arm32/mm/core_mmu.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This core mmu supports static section mapping (1MByte).
+ * It should should finer mapping (4kByte coarse pages).
+ * It should also allow core to map/unmap (and va/pa) at run-time.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* Default NSec shared memory allocated from NSec world */
+unsigned long default_nsec_shm_paddr;
+unsigned long default_nsec_shm_size;
+
+/* platform handler for core_pbuf_is() */
+static unsigned long bootcfg_pbuf_is = 1; /* NOT is BSS */
+typedef bool(*platform_pbuf_is_t) (unsigned long attr, unsigned long paddr,
+ size_t size);
+
+/*
+ * WARNING: resources accessed during the initialization
+ * (sequence core_init_mmu()) are accessed *before* BSS is zero-initialised.
+ * Be careful NOT to load data that can be 'reset' to zero after
+ * core_init_mmu(), due to BSS init loop.
+ */
+static struct map_area *static_memory_map = (void *)1; /* not in BSS */
+static struct map_area *map_tee_ram = (void *)1; /* not in BSS */
+static struct map_area *map_ta_ram = (void *)1; /* not in BSS */
+static struct map_area *map_nsec_shm = (void *)1; /* not in BSS */
+
+/*
+ * Save TTBR0 per CPU core running ARM-TZ. (Actually only 1 cpu run TEE)
+ * Save TTBR0 used for TA ampping (kTA or uTA).
+ * Currently not in BSS since BSS is init after MMU setup.
+ */
+static unsigned int core_ttbr0[CFG_TEE_CORE_NB_CORE] = { ~0, ~0 };
+static unsigned int coreta_ttbr0_pa[CFG_TEE_CORE_NB_CORE] = { ~0, ~0 };
+static unsigned int coreta_ttbr0_va[CFG_TEE_CORE_NB_CORE] = { ~0, ~0 };
+
+/* bss is not init: def value must be non-zero */
+static bool memmap_notinit[CFG_TEE_CORE_NB_CORE] = { true, true };
+
+#define MEMLAYOUT_NOT_INIT 1
+#define MEMLAYOUT_INIT 2
+static int memlayout_init = MEMLAYOUT_NOT_INIT;
+
+/* check if target buffer fits in a core default map area */
+static bool pbuf_inside_map_area(unsigned long p, size_t l,
+ struct map_area *map)
+{
+ if ((map->size == 0) ||
+ (((uint32_t) p + l) < (uint32_t) p) ||
+ ((uint32_t) p < map->pa) ||
+ (((uint32_t) p + l) > (map->pa + map->size)))
+ return false;
+ return true;
+}
+
+static struct map_area *find_map_by_va(void *va)
+{
+ struct map_area *map = static_memory_map;
+ unsigned long a = (unsigned long)va;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((a >= map->va) && (a < (map->va + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+static struct map_area *find_map_by_pa(unsigned long pa)
+{
+ struct map_area *map = static_memory_map;
+
+ while (map->type != MEM_AREA_NOTYPE) {
+ if ((pa >= map->pa) && (pa < (map->pa + map->size)))
+ return map;
+ map++;
+ }
+ return NULL;
+}
+
+/* armv7 memory mapping attributes: section mapping */
+#define SECTION_SECURE (0 << 19)
+#define SECTION_NOTSECURE (1 << 19)
+#define SECTION_SHARED (1 << 16)
+#define SECTION_NOTGLOBAL (1 << 17)
+#define SECTION_RW ((0 << 15) | (1 << 10))
+#define SECTION_RO ((1 << 15) | (1 << 10))
+#define SECTION_TEXCB(tex, c, b) ((tex << 12) | (c << 3) | (b << 2))
+#define SECTION_DEVICE SECTION_TEXCB(0, 0, 1)
+#define SECTION_NORMAL SECTION_TEXCB(1, 0, 0)
+#define SECTION_NORMAL_CACHED SECTION_TEXCB(1, 1, 1)
+#define SECTION_NO_EXEC (1 << 4)
+#define SECTION_SECTION (2 << 0)
+/*
+ * memarea_not_mapped - check memory not already (partially) mapped
+ * A finer mapping must be supported. Currently section mapping only!
+ */
+static bool memarea_not_mapped(struct map_area *map, void *ttbr0)
+{
+ uint32_t m, n;
+
+ m = (map->pa >> 20) * 4; /* assumes pa=va */
+ n = map->size >> 20;
+ while (n--) {
+ if (*((uint32_t *)((uint32_t)ttbr0 + m)) != 0)
+ return false;
+ m += 4;
+ }
+ return true;
+}
+
+/*
+* map_memarea - load mapping in target L1 table
+* A finer mapping must be supported. Currently section mapping only!
+*/
+static int map_memarea(struct map_area *map, void *ttbr0)
+{
+ uint32_t m, n;
+ unsigned long attr;
+
+ /*
+ * invalid area confing
+ * - only section mapping currently supported
+ * - first section cannot be mapped (safety)
+ */
+ if ((map == NULL) ||
+ (ttbr0 == NULL) ||
+ (map->va != 0) ||
+ (map->region_size != 0) ||
+ (map->pa == 0) ||
+ ((map->pa + map->size - 1) < map->pa) ||
+ (map->size == 0) ||
+ (map->size & 0x000FFFFF) ||
+ (map->pa & 0x000FFFFF) || (map->va & 0x000FFFFF)) {
+ while (1)
+ ;
+ return 1;
+ }
+
+ attr = SECTION_SHARED | SECTION_NOTGLOBAL | SECTION_SECTION;
+
+ if (map->device == true)
+ attr |= SECTION_DEVICE;
+ else if (map->cached == true)
+ attr |= SECTION_NORMAL_CACHED;
+ else
+ attr |= SECTION_NORMAL;
+
+ if (map->rw == true)
+ attr |= SECTION_RW;
+ else
+ attr |= SECTION_RO;
+
+ if (map->exec == false)
+ attr |= SECTION_NO_EXEC;
+ if (map->secure == false)
+ attr |= SECTION_NOTSECURE;
+
+ map->va = map->pa; /* 1-to-1 pa=va mapping */
+ map->region_size = 1 << 20; /* 1MB section mapping */
+
+ m = (map->pa >> 20) * 4;
+ n = map->size >> 20;
+ while (n--) {
+ *((uint32_t *)((uint32_t)ttbr0 + m)) = (m << 18) | attr;
+ m += 4;
+ }
+
+ return 0;
+}
+
+/* load_bootcfg_mapping - attempt to map the teecore static mapping */
+static void load_bootcfg_mapping(void *ttbr0)
+{
+ struct map_area *map, *in;
+ uint32_t *p, n;
+
+ /* get memory bootcfg from system */
+ in = bootcfg_get_memory();
+ bootcfg_pbuf_is = (unsigned long)bootcfg_get_pbuf_is_handler();
+ if (bootcfg_pbuf_is == 0) {
+ EMSG("invalid platform handler for pbuf_is");
+ assert(0);
+ }
+
+ /* we must find at least a PUB_RAM area and a TEE_RAM area */
+ map_tee_ram = NULL;
+ map_ta_ram = NULL;
+ map_nsec_shm = NULL;
+
+ /* reset L1 table */
+ for (p = (uint32_t *)ttbr0, n = 4096; n > 0; n--)
+ *(p++) = 0;
+
+ /* map what needs to be mapped (non-null size and non INTRAM/EXTRAM) */
+ map = in;
+ while (map->type != MEM_AREA_NOTYPE) {
+ if (memarea_not_mapped(map, ttbr0) == false) {
+ EMSG("overlapping mapping ! trap CPU");
+ assert(0);
+ }
+
+ if (map_memarea(map, ttbr0)) {
+ EMSG("mapping failed ! trap CPU");
+ assert(0);
+ }
+
+ if (map->type == MEM_AREA_TEE_RAM)
+ map_tee_ram = map;
+ else if (map->type == MEM_AREA_TA_RAM)
+ map_ta_ram = map;
+ else if (map->type == MEM_AREA_NSEC_SHM)
+ map_nsec_shm = map;
+
+ map++;
+ }
+
+ if ((map_tee_ram == NULL) || (map_ta_ram == NULL) ||
+ (map_nsec_shm == NULL)) {
+ EMSG("mapping area missing");
+ assert(0);
+ }
+
+ static_memory_map = in;
+}
+
+/*
+ * core_init_mmu - init tee core default memory mapping
+ *
+ * location of target MMU L1 table is provided as argument.
+ * this routine sets the static default tee core mapping.
+ *
+ * If an error happend: core_init_mmu.c is expected to reset.
+ */
+unsigned int core_init_mmu(unsigned int ttbr0, unsigned int ta_ttbr0)
+{
+ uint32_t n;
+
+ if (secure_get_cpu_id() >= CFG_TEE_CORE_NB_CORE) {
+ EMSG("invalid core ID %d. teecore supports %d cores.",
+ secure_get_cpu_id(), CFG_TEE_CORE_NB_CORE);
+ assert(0);
+ }
+
+ if ((ttbr0 & TEE_MMU_TTBRX_ATTR_MASK) ||
+ (ta_ttbr0 & TEE_MMU_TTBRX_ATTR_MASK)) {
+ EMSG("invalid MMU L1 addr: core=0x%X TA=0x%X", ttbr0, ta_ttbr0);
+ assert(0);
+ }
+
+ if (memlayout_init == MEMLAYOUT_INIT)
+ goto skip_mmu_fill;
+
+ /* Note that the initialization of the mmu may depend on the cutID */
+ load_bootcfg_mapping((void *)ttbr0);
+ memlayout_init = MEMLAYOUT_INIT;
+
+skip_mmu_fill:
+ /* All CPUs currently use the same mapping, even on SMP */
+ n = secure_get_cpu_id();
+ ttbr0 |= TEE_MMU_DEFAULT_ATTRS;
+ core_ttbr0[n] = ttbr0;
+ cpu_write_ttbr0(ttbr0);
+
+ memmap_notinit[n] = false;
+
+ /* prepare TA mmu table handling */
+ /* Support 1 TA MMU table location per CPU core must be implemented */
+ if (core_pa2va(ta_ttbr0, (uint32_t *)&(coreta_ttbr0_va[n]))) {
+ EMSG("failed to get virtual address of ta_ttbr0 0x%X",
+ ta_ttbr0);
+ assert(0);
+ }
+ coreta_ttbr0_pa[n] = ta_ttbr0;
+
+ return 0;
+}
+
+/* return the tee core CP15 TTBR0 */
+uint32_t core_mmu_get_ttbr0(void)
+{
+ return core_ttbr0[secure_get_cpu_id()];
+}
+
+/* return the tee core mmu L1 table base address */
+uint32_t core_mmu_get_ttbr0_base(void)
+{
+ return core_mmu_get_ttbr0() & TEE_MMU_TTBRX_TTBX_MASK;
+}
+
+/* return the tee core mmu L1 attributes */
+uint32_t core_mmu_get_ttbr0_attr(void)
+{
+ return core_mmu_get_ttbr0() & TEE_MMU_TTBRX_ATTR_MASK;
+}
+
+/* return physical address of MMU L1 table of for TA mapping */
+uint32_t core_mmu_get_ta_ul1_pa(void)
+{
+ return coreta_ttbr0_pa[secure_get_cpu_id()];
+}
+
+/* return virtual address of MMU L1 table of for TA mapping */
+uint32_t core_mmu_get_ta_ul1_va(void)
+{
+ return coreta_ttbr0_va[secure_get_cpu_id()];
+}
+
+/* routines to retreive shared mem configuration */
+bool core_mmu_is_shm_cached(void)
+{
+ return map_nsec_shm ? map_nsec_shm->cached : false;
+}
+
+/*
+ * test attributes of target physical buffer
+ *
+ * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
+ *
+ */
+bool core_pbuf_is(uint32_t attr, tee_paddr_t pbuf, size_t len)
+{
+ struct map_area *map;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ switch (attr) {
+ case CORE_MEM_SEC:
+ return ((platform_pbuf_is_t) bootcfg_pbuf_is) (attr, pbuf, len);
+ case CORE_MEM_NON_SEC:
+ return ((platform_pbuf_is_t) bootcfg_pbuf_is) (attr, pbuf, len);
+ case CORE_MEM_TEE_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_tee_ram);
+ case CORE_MEM_TA_RAM:
+ return pbuf_inside_map_area(pbuf, len, map_ta_ram);
+ case CORE_MEM_NSEC_SHM:
+ return pbuf_inside_map_area(pbuf, len, map_nsec_shm);
+ /* MultiPurpose and External RAM tests are platform specific */
+ case CORE_MEM_MULTPURPOSE:
+ return ((platform_pbuf_is_t) bootcfg_pbuf_is) (attr, pbuf, len);
+ case CORE_MEM_EXTRAM:
+ return ((platform_pbuf_is_t) bootcfg_pbuf_is) (attr, pbuf, len);
+ case CORE_MEM_CACHED:
+ map = find_map_by_pa(pbuf);
+ if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
+ return false;
+ return map->cached;
+ default:
+ return false;
+ }
+}
+
+/* test attributes of target virtual buffer (in core mapping) */
+bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
+{
+ uint32_t p;
+
+ /* Empty buffers complies with anything */
+ if (len == 0)
+ return true;
+
+ if (core_va2pa((uint32_t) vbuf, &p))
+ return false;
+
+ return core_pbuf_is(attr, (tee_paddr_t) p, len);
+}
+
+/*
+ * Return true is MMU is initialized for current core
+ * Note that this is for DEBUG only, to help preventing
+ * use of pa2va va2pa before mmu table is setup !
+ */
+static bool is_coremap_init(void)
+{
+ return !memmap_notinit[secure_get_cpu_id()];
+}
+
+/* core_va2pa - teecore exported service */
+int core_va2pa(uint32_t va, uint32_t *pa)
+{
+ struct map_area *map;
+
+ if (!is_coremap_init())
+ return -1;
+
+ map = find_map_by_va((void *)va);
+ if (map == NULL)
+ return -1;
+
+ *pa = (va & (map->region_size - 1)) |
+ ((map->pa + va - map->va) & ~(map->region_size - 1));
+ return 0;
+}
+
+/* core_pa2va - teecore exported service */
+int core_pa2va(uint32_t pa, uint32_t *va)
+{
+ struct map_area *map;
+
+ if (!is_coremap_init())
+ return -1;
+
+ map = find_map_by_pa((unsigned long)pa);
+ if (map == NULL)
+ return -1;
+
+ *va = (pa & (map->region_size - 1)) |
+ (((map->va + pa - map->pa)) & ~(map->region_size - 1));
+ return 0;
+}
+
+/*
+ * teecore gets some memory area definitions
+ */
+void core_mmu_get_mem_by_type(unsigned int type, unsigned int *s,
+ unsigned int *e)
+{
+ struct map_area *map;
+
+ /* first scan the bootcfg memory layout */
+ map = static_memory_map;
+ while (map->type != MEM_AREA_NOTYPE) {
+ if (map->type == type) {
+ *s = map->va;
+ *e = map->va + map->size;
+ return;
+ }
+ map++;
+ }
+ *s = 0;
+ *e = 0;
+}
+
+int core_tlb_maintenance(int op, unsigned int a)
+{
+ switch (op) {
+ case TLBINV_DATATLB:
+ secure_mmu_datatlbinvall(); /* ??? */
+ break;
+ case TLBINV_UNIFIEDTLB:
+ secure_mmu_unifiedtlbinvall();
+ break;
+ case TLBINV_CURRENT_ASID:
+ secure_mmu_unifiedtlbinv_curasid();
+ break;
+ case TLBINV_BY_ASID:
+ SMSG("TLBINV_BY_ASID is not yet supproted. Trap CPU!");
+ while (1)
+ ;
+ secure_mmu_unifiedtlbinv_byasid(a);
+ break;
+ case TLBINV_BY_MVA:
+ SMSG("TLB_INV_SECURE_MVA is not yet supported!");
+ while (1)
+ ;
+ secure_mmu_unifiedtlbinvbymva(a);
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned int cache_maintenance_l1(int op, void *start, size_t len)
+{
+ switch (op) {
+ case DCACHE_CLEAN:
+ arm_cl1_d_cleanbysetway();
+ break;
+ case DCACHE_AREA_CLEAN:
+ arm_cl1_d_cleanbysetway();
+ break;
+ case DCACHE_INVALIDATE:
+ arm_cl1_d_invbysetway();
+ break;
+ case DCACHE_AREA_INVALIDATE:
+ arm_cl1_d_invbysetway();
+ break;
+ case ICACHE_INVALIDATE:
+ arm_cl1_i_inv_all();
+ break;
+ case ICACHE_AREA_INVALIDATE:
+ arm_cl1_i_inv_all();
+ break;
+ case WRITE_BUFFER_DRAIN:
+ DMSG("unsupported operation 0x%X (WRITE_BUFFER_DRAIN)",
+ (unsigned int)op);
+ return -1;
+ case DCACHE_CLEAN_INV:
+ arm_cl1_d_cleaninvbysetway();
+ break;
+ case DCACHE_AREA_CLEAN_INV:
+ arm_cl1_d_cleaninvbysetway();
+ break;
+ default:
+ return TEE_ERROR_NOT_IMPLEMENTED;
+ }
+ return TEE_SUCCESS;
+
+}
+
+/*
+ * outer cahce maintenance mutex shared with NSec.
+ *
+ * At boot, teecore do not need a shared mutex with NSec.
+ * Once core has entered NSec state, teecore is not allowed to run outer cache
+ * maintenace sequence unless it has necogiate with NSec a shared mutex to
+ * spin lock on.
+ *
+ * In some situation (i.e boot, hibernation), teecore natively synchronise the
+ * cores and hence do not need to rely on NSec shared mutex. This can happend
+ * with NSec having previously negociated a shared mutex or not. Thus if
+ * teecore "disables" the outer (l2cc) shared mutex, it must be able to backup
+ * the registered one when enabling back the shared mutex.
+ *
+ * Currently no multi-cpu lock synchronisation: teecore runs execlusivley on
+ * 1 core at a given time.
+ */
+static unsigned int *l2cc_mutex;
+static bool l2cc_mutex_required; /* default false */
+
+void core_l2cc_mutex_set(void *mutex)
+{
+ l2cc_mutex = (unsigned int *)mutex;
+}
+void core_l2cc_mutex_activate(bool en)
+{
+ l2cc_mutex_required = en;
+}
+
+static unsigned int cache_maintenance_l2(int op, void *start, size_t len)
+{
+ unsigned int ret;
+
+ /* is shared mutex configured */
+ if (l2cc_mutex_required && (l2cc_mutex == NULL))
+ return TEE_ERROR_GENERIC;
+ if (l2cc_mutex_required)
+ cpu_spin_lock(l2cc_mutex);
+
+ ret = TEE_SUCCESS;
+ switch (op) {
+ case L2CACHE_INVALIDATE:
+ arm_cl2_invbyway();
+ break;
+ case L2CACHE_AREA_INVALIDATE:
+ arm_cl2_invbyway();
+ break;
+ case L2CACHE_CLEAN:
+ arm_cl2_cleanbyway();
+ break;
+ case L2CACHE_AREA_CLEAN:
+ arm_cl2_cleanbyway();
+ break;
+ case L2CACHE_CLEAN_INV:
+ arm_cl2_cleaninvbyway();
+ break;
+ case L2CACHE_AREA_CLEAN_INV:
+ arm_cl2_cleaninvbyway();
+ break;
+ default:
+ ret = TEE_ERROR_NOT_IMPLEMENTED;
+ }
+
+ if (l2cc_mutex_required)
+ cpu_spin_unlock(l2cc_mutex);
+ return ret;
+}
+
+unsigned int core_cache_maintenance(int op, void *start, size_t len)
+{
+ unsigned int ret;
+
+ ret = cache_maintenance_l1(op, start, len);
+ if (ret != TEE_ERROR_NOT_IMPLEMENTED)
+ return ret;
+
+ ret = cache_maintenance_l2(op, start, len);
+ if (ret != TEE_ERROR_NOT_IMPLEMENTED)
+ return ret;
+
+ EMSG("unsupported operation 0x%X", (unsigned int)op);
+ return TEE_ERROR_GENERIC;
+}
diff --git a/core/arch/arm32/mm/kta_table_unpg_asm.S b/core/arch/arm32/mm/kta_table_unpg_asm.S
new file mode 100644
index 00000000000..e7ae339b00f
--- /dev/null
+++ b/core/arch/arm32/mm/kta_table_unpg_asm.S
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+.section .text
+
+.global translate_va2pa
+.global invalidate_mmu_tlb
+
+ .global WriteSCTLR
+ .global ReadSCTLR
+ .global WriteACTLR
+ .global ReadACTLR
+ .global WriteDIAGR
+ .global ReadDIAGR
+
+
+.func translate_va2pa
+translate_va2pa:
+ // R0 contains the original logical address
+ // Use privileged read VA to PA translation priviledges
+ MCR p15,0,R0,c7,c8,0
+ ISB
+
+ // Read the PAR value
+ MRC p15,0,R0,c7,c4,0
+ ISB
+
+ // return to caller
+ BX LR
+.endfunc
+
+
+.func invalidate_mmu_tlb
+invalidate_mmu_tlb:
+ /* Invalidate entire Unified TLB Inner Sharable. Data R0 value ignored */
+ MCR p15, 0, R0, c8, c3, 0
+
+
+ // Ensure completion of the invalidate TBL operation
+ DSB
+
+ // Ensure table changes visible to instruction fetch
+ ISB
+
+ BX LR
+.endfunc
+
+#ifdef __GRANT_RESTRICTED_ACCESS_SECURE_REGS
+
+/* __asm void WriteSCTLR(uint32_t regVal) */
+ .func WriteSCTLR
+WriteSCTLR:
+ MCR p15, 0, r0, c1, c0, 0
+ BX LR
+ .endfunc
+
+
+/* __asm uint32_t ReadSCTLR( void ) */
+ .func ReadSCTLR
+ReadSCTLR:
+ MRC p15, 0, r0, c1, c0, 0
+ BX LR
+ .endfunc
+
+/* __asm void WriteACTLR(uint32_t regVal) */
+ .func WriteACTLR
+WriteACTLR:
+ MCR p15, 0, r0, c1, c0, 1
+ BX LR
+ .endfunc
+
+
+/* __asm uint32_t ReadACTLR( void ) */
+ .func ReadACTLR
+ReadACTLR:
+ MRC p15, 0, r0, c1, c0, 1
+ BX LR
+ .endfunc
+
+
+/* __asm void WriteDIAGR(uint32_t regVal) */
+ .func WriteDIAGR
+WriteDIAGR:
+ MCR p15, 0, r0, c15, c0, 1
+ BX LR
+ .endfunc
+
+/* __asm uint32_t ReadDIAGR( void ) */
+ .func ReadDIAGR
+ReadDIAGR:
+ MRC p15, 0, r0, c15, c0, 1
+ BX LR
+ .endfunc
+
+#endif
+
+
+/*-----------------------------------------------------------------------------
+ GLOBAL VARIABLES
+ *---------------------------------------------------------------------------*/
diff --git a/core/arch/arm32/mm/sub.mk b/core/arch/arm32/mm/sub.mk
new file mode 100644
index 00000000000..71f2dae03f9
--- /dev/null
+++ b/core/arch/arm32/mm/sub.mk
@@ -0,0 +1,20 @@
+srcs-y += core_mmu.c
+cflags-core_mmu.c-y += -Wno-strict-aliasing -Wno-unused-parameter
+
+srcs-y += tee_pager_unpg.c
+cflags-tee_pager_unpg.c-y += -Wno-unused-parameter
+
+
+srcs-y += tee_mmu.c
+cflags-tee_mmu.c-y += -Wno-unused-parameter
+
+srcs-y += kta_table_unpg_asm.S
+srcs-y += tee_mm.c
+cflags-tee_mm.c-y += -Wno-format
+cflags-tee_mm.c-y += -Wno-format-nonliteral -Wno-format-security
+
+srcs-y += tee_mm_unpg.c
+srcs-y += tee_mmu_unpg_asm.S
+srcs-y += tee_mmu_unpg.c
+srcs-y += tee_pager.c
+srcs-y += tee_pager_unpg_asm.S
diff --git a/core/arch/arm32/mm/tee_mm.c b/core/arch/arm32/mm/tee_mm.c
new file mode 100644
index 00000000000..fcb8abfe5c8
--- /dev/null
+++ b/core/arch/arm32/mm/tee_mm.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+
+#include
+#include
+
+bool tee_mm_init(tee_mm_pool_t *pool, uint32_t lo, uint32_t hi, uint8_t shift,
+ uint32_t flags)
+{
+ if (pool == NULL)
+ return false;
+
+ pool->lo = lo;
+ pool->hi = hi;
+ pool->shift = shift;
+ pool->flags = flags;
+ pool->entry = calloc(1, sizeof(tee_mm_entry_t));
+
+ if (pool->entry == NULL)
+ return false;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ pool->entry->offset = ((hi - lo - 1) >> shift) + 1;
+ pool->entry->pool = pool;
+
+ return true;
+}
+
+void tee_mm_final(tee_mm_pool_t *pool)
+{
+ if (pool == NULL || pool->entry == NULL)
+ return;
+
+ while (pool->entry->next != NULL)
+ tee_mm_free(pool->entry->next);
+ free(pool->entry);
+ pool->entry = NULL;
+}
+
+static tee_mm_entry_t *tee_mm_add(tee_mm_entry_t *p)
+{
+ /* add to list */
+ if (p->next == NULL) {
+ p->next = malloc(sizeof(tee_mm_entry_t));
+ if (p->next == NULL)
+ return NULL;
+ p->next->next = NULL;
+ } else {
+ tee_mm_entry_t *nn = malloc(sizeof(tee_mm_entry_t));
+ if (nn == NULL)
+ return NULL;
+ nn->next = p->next;
+ p->next = nn;
+ }
+ return p->next;
+}
+
+tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, uint32_t size)
+{
+ uint32_t psize;
+ tee_mm_entry_t *entry;
+ tee_mm_entry_t *nn;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ entry = pool->entry;
+ if (size == 0)
+ psize = 0;
+ else
+ psize = ((size - 1) >> pool->shift) + 1;
+ /* Protect with mutex (multi thread) */
+
+ /* find free slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL && psize >
+ (entry->offset - entry->next->offset -
+ entry->next->size))
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && psize >
+ (entry->next->offset - entry->size - entry->offset))
+ entry = entry->next;
+ }
+
+ /* check if we have enough memory */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ if (((entry->offset << pool->shift) - pool->lo) < size)
+ /* out of memory */
+ return NULL;
+ } else {
+ if (((entry->offset << pool->shift) - pool->lo) < size)
+ /* out of memory */
+ return NULL;
+ if ((((entry->offset + entry->size) << pool->shift) + size) >=
+ (pool->hi - pool->lo))
+ /* out of memory */
+ return NULL;
+ }
+
+ nn = tee_mm_add(entry);
+ if (nn == NULL)
+ return NULL;
+
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC)
+ nn->offset = entry->offset - psize;
+ else
+ nn->offset = entry->offset + entry->size;
+ nn->size = psize;
+ nn->pool = pool;
+
+ /* Protect with mutex end (multi thread) */
+
+ return nn;
+}
+
+static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
+ uint32_t offslo, uint32_t offshi)
+{
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ if (offshi > e->offset ||
+ (e->next != NULL &&
+ (offslo < e->next->offset + e->next->size)) ||
+ (offshi << pool->shift) - 1 > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ } else {
+ if (offslo < (e->offset + e->size) ||
+ (e->next != NULL && (offshi > e->next->offset)) ||
+ (offshi << pool->shift) > (pool->hi - pool->lo))
+ /* memory not available */
+ return false;
+ }
+
+ return true;
+}
+
+tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, tee_vaddr_t base,
+ size_t size)
+{
+ tee_mm_entry_t *entry;
+ uint32_t offslo;
+ uint32_t offshi;
+ tee_mm_entry_t *mm;
+
+ /* Check that pool is initialized */
+ if (!pool || !pool->entry)
+ return NULL;
+
+ /* Wrapping and sanity check */
+ if ((base + size) < base || base < pool->lo)
+ return NULL;
+
+ entry = pool->entry;
+ offslo = (base - pool->lo) >> pool->shift;
+ offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
+
+ /* find slot */
+ if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
+ while (entry->next != NULL &&
+ offshi < entry->next->offset + entry->next->size)
+ entry = entry->next;
+ } else {
+ while (entry->next != NULL && offslo > entry->next->offset)
+ entry = entry->next;
+ }
+
+ /* Check that memory is available */
+ if (!fit_in_gap(pool, entry, offslo, offshi))
+ return NULL;
+
+ mm = tee_mm_add(entry);
+ if (mm == NULL)
+ return NULL;
+
+ mm->offset = offslo;
+ mm->size = offshi - offslo;
+ mm->pool = pool;
+
+ return mm;
+}
+
+void tee_mm_free(tee_mm_entry_t *p)
+{
+ tee_mm_entry_t *entry;
+
+ if (!p || !p->pool)
+ return;
+
+ entry = p->pool->entry;
+
+ /* Protect with mutex (multi thread) */
+
+ /* remove entry from list */
+ while (entry->next != NULL && entry->next != p)
+ entry = entry->next;
+
+ if (entry->next == NULL) {
+ DMSG("invalid mm_entry %p", p);
+ TEE_ASSERT(0);
+ }
+ entry->next = entry->next->next;
+
+ if (p->pool->flags & TEE_MM_POOL_PAGED) {
+ /* unmap entry */
+ tee_pager_unmap((uint32_t) (p->offset << p->pool->shift) +
+ p->pool->lo, p->size);
+ }
+
+ free(p);
+
+ /* Protect with mutex end (multi thread) */
+}
+
+size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
+{
+ if (!mm || !mm->pool)
+ return 0;
+ else
+ return mm->size << mm->pool->shift;
+}
+
+bool tee_mm_addr_is_within_range(tee_mm_pool_t *pool, uint32_t addr)
+{
+ return (pool && ((addr >= pool->lo) && (addr <= pool->hi)));
+}
+
+bool tee_mm_is_empty(tee_mm_pool_t *pool)
+{
+ return pool == NULL || pool->entry == NULL || pool->entry->next == NULL;
+}
diff --git a/core/arch/arm32/mm/tee_mm_unpg.c b/core/arch/arm32/mm/tee_mm_unpg.c
new file mode 100644
index 00000000000..91f4416a31d
--- /dev/null
+++ b/core/arch/arm32/mm/tee_mm_unpg.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+
+/* Physical Public DDR pool */
+tee_mm_pool_t tee_mm_pub_ddr;
+
+/* Physical Secure DDR pool */
+tee_mm_pool_t tee_mm_sec_ddr;
+
+/* Virtual eSRAM pool */
+tee_mm_pool_t tee_mm_vcore;
+
+tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, uint32_t addr)
+{
+ tee_mm_entry_t *entry = pool->entry;
+ uint16_t offset = (addr - pool->lo) >> pool->shift;
+
+ if (addr > pool->hi || addr < pool->lo)
+ return NULL;
+
+ while (entry->next != NULL) {
+ entry = entry->next;
+
+ if ((offset >= entry->offset) &&
+ (offset < (entry->offset + entry->size))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
+{
+ return (mm->offset << mm->pool->shift) + mm->pool->lo;
+}
diff --git a/core/arch/arm32/mm/tee_mmu.c b/core/arch/arm32/mm/tee_mmu.c
new file mode 100644
index 00000000000..ded29793c53
--- /dev/null
+++ b/core/arch/arm32/mm/tee_mmu.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "tee_api_types.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define TEE_MMU_PAGE_TEX_SHIFT 6
+
+/* MMU table page flags */
+#define TEE_MMU_PAGE_NG (1 << 11)
+#define TEE_MMU_PAGE_S (1 << 10)
+#define TEE_MMU_PAGE_AP2 (1 << 9)
+#define TEE_MMU_PAGE_TEX(x) (x << TEE_MMU_PAGE_TEX_SHIFT)
+#define TEE_MMU_PAGE_AP1 (1 << 5)
+#define TEE_MMU_PAGE_AP0 (1 << 4)
+#define TEE_MMU_PAGE_C (1 << 3)
+#define TEE_MMU_PAGE_B (1 << 2)
+#define TEE_MMU_PAGE (1 << 1)
+#define TEE_MMU_PAGE_XN (1 << 0)
+
+#define TEE_MMU_PAGE_CACHE_MASK \
+ (TEE_MMU_PAGE_TEX(7) | TEE_MMU_PAGE_C | TEE_MMU_PAGE_B)
+
+#define TEE_MMU_PAGE_MASK ((1 << 12) - 1)
+
+/* For legacy */
+#define TEE_MMU_PAGE_LEGACY 0
+
+/* MMU table section flags */
+#define TEE_MMU_SECTION_NS (1 << 19)
+#define TEE_MMU_SECTION_NG (1 << 17)
+#define TEE_MMU_SECTION_S (1 << 16)
+#define TEE_MMU_SECTION_AP2 (1 << 15)
+#define TEE_MMU_SECTION_TEX(x) (x << 12)
+#define TEE_MMU_SECTION_AP1 (1 << 11)
+#define TEE_MMU_SECTION_AP0 (1 << 10)
+#define TEE_MMU_SECTION_DOMAIN(x) (x << 5)
+#define TEE_MMU_SECTION_XN (1 << 4)
+#define TEE_MMU_SECTION_C (1 << 3)
+#define TEE_MMU_SECTION_B (1 << 2)
+#define TEE_MMU_SECTION (1 << 1)
+
+/* User data, no cache attributes */
+#define TEE_MMU_SECTION_UDATA \
+ (TEE_MMU_SECTION_NG | TEE_MMU_SECTION_S | \
+ TEE_MMU_SECTION_AP1 | TEE_MMU_SECTION_AP0 | TEE_MMU_SECTION_XN |\
+ TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
+
+/* User code, no cache attributes */
+#define TEE_MMU_SECTION_UCODE \
+ (TEE_MMU_SECTION_NG | TEE_MMU_SECTION_S | \
+ TEE_MMU_SECTION_AP1 | TEE_MMU_SECTION_AP0 | \
+ TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
+
+/* Kernel data, global, privonly access, no exec, no cache attributes */
+#define TEE_MMU_SECTION_KDATA \
+ (TEE_MMU_SECTION_S | \
+ TEE_MMU_SECTION_AP0 | TEE_MMU_SECTION_XN | \
+ TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
+
+/* Kernel data, global, privonly access, no exec, no cache attributes */
+#define TEE_MMU_SECTION_KCODE \
+ (TEE_MMU_SECTION_S | \
+ TEE_MMU_SECTION_AP0 | \
+ TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
+
+/* Outer & Inner Write-Back, Write-Allocate. Default cache settings */
+#define TEE_MMU_SECTION_CACHEMASK \
+ (TEE_MMU_SECTION_TEX(7) | TEE_MMU_SECTION_C | TEE_MMU_SECTION_B)
+#define TEE_MMU_SECTION_OIWBWA \
+ (TEE_MMU_SECTION_TEX(1) | TEE_MMU_SECTION_C | TEE_MMU_SECTION_B)
+#define TEE_MMU_SECTION_NOCACHE \
+ TEE_MMU_SECTION_TEX(1)
+
+#define TEE_MMU_KL2_ENTRY(page_num) \
+ (*(uint32_t *)(SEC_VIRT_MMU_L2_BASE + ((uint32_t)(page_num)) * 4))
+
+#define TEE_MMU_UL1_ENTRY(page_num) \
+ (*(uint32_t *)(TEE_MMU_UL1_BASE + ((uint32_t)(page_num)) * 4))
+
+/* Extract AP[2] and AP[1:0] */
+#define TEE_MMU_L1_AP(e) (((e >> 13) & 1) | ((e >> 10) & 3))
+
+#define TEE_MMU_AP_USER_RO 0x02
+#define TEE_MMU_AP_USER_RW 0x03
+
+/* Support for 31 concurrent sessions */
+static uint32_t g_asid = 0xffffffff;
+static uint32_t g_current_context;
+
+static tee_mm_pool_t tee_mmu_virt_kmap;
+
+static uint32_t tee_mmu_get_io_size(const struct tee_ta_param *param)
+{
+ uint32_t i;
+ uint32_t res = 0;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, i);
+
+ if ((param_type == TEE_PARAM_TYPE_MEMREF_INPUT ||
+ param_type == TEE_PARAM_TYPE_MEMREF_OUTPUT ||
+ param_type == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ param->params[i].memref.size != 0) {
+ res +=
+ ((((uint32_t) param->params[i].memref.
+ buffer & SECTION_MASK) +
+ param->params[i].memref.size) >> SECTION_SHIFT) +
+ 1;
+ }
+ }
+
+ return res;
+}
+
+/*
+ * tee_mmu_is_mapped - Check if range defined by input params is mapped.
+ */
+static bool tee_mmu_is_mapped(const struct tee_ta_ctx *ctx, const uint32_t addr,
+ const uint32_t length, const uint32_t type)
+{
+ uint32_t i = 0;
+ uint32_t nbr_sections = (((addr & SECTION_MASK) + length)
+ >> SECTION_SHIFT) + 1;
+ bool ret = false;
+
+ if (ctx == NULL || ctx->mmu == NULL || ctx->mmu->table == NULL ||
+ ctx->mmu->size < ((addr + length) >> SECTION_SHIFT))
+ return ret;
+
+ while (i < ctx->mmu->size && !ret) {
+ if (addr > (ctx->mmu->table[i] & ~SECTION_MASK) &&
+ addr < ((ctx->mmu->table[i] & ~SECTION_MASK)
+ + (1 << SECTION_SHIFT)) &&
+ ((ctx->mmu->table[i] & SECTION_MASK) == type)) {
+ uint32_t section = 1;
+ while (section < nbr_sections) {
+ if ((ctx->mmu->table[i] >> SECTION_SHIFT) +
+ section !=
+ (ctx->mmu->
+ table[i + section] >> SECTION_SHIFT) ||
+ ((ctx->mmu->
+ table[i + section] & SECTION_MASK) !=
+ type))
+ break;
+ section++;
+ }
+ if (section == nbr_sections)
+ ret = true;
+ }
+ i++;
+ }
+
+ return ret;
+}
+
+TEE_Result tee_mmu_init(struct tee_ta_ctx *ctx)
+{
+ uint32_t asid = 1;
+
+ if (ctx->context == 0) {
+ ctx->context = 1;
+
+ /* Find available ASID */
+ while (!(asid & g_asid) && (asid != 0)) {
+ ctx->context++;
+ asid = asid << 1;
+ }
+
+ if (asid == 0) {
+ DMSG("Failed to allocate ASID");
+ return TEE_ERROR_GENERIC;
+ }
+ g_asid &= ~asid;
+ }
+
+ ctx->mmu = malloc(sizeof(tee_mmu_info_t));
+ if (ctx->mmu) {
+ tee_mmu_info_t *p = ctx->mmu;
+ p->table = 0;
+ p->size = 0;
+ } else {
+ return TEE_ERROR_OUT_OF_MEMORY;
+ }
+
+ return TEE_SUCCESS;
+}
+
+static TEE_Result tee_mmu_map_io(struct tee_ta_ctx *ctx, uint32_t **buffer,
+ const uint32_t vio, struct tee_ta_param *param)
+{
+ uint32_t i;
+ uint32_t vi_offset = vio;
+ TEE_Result res = TEE_SUCCESS;
+ uint32_t nbr_sections, py_offset, v, section, sect_prot;
+
+ /* Map IO buffers in public memory */
+ for (i = 0; i < 4; i++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, i);
+ TEE_Param *p = ¶m->params[i];
+
+ if ((!((param_type == TEE_PARAM_TYPE_MEMREF_INPUT) ||
+ (param_type == TEE_PARAM_TYPE_MEMREF_OUTPUT) ||
+ (param_type == TEE_PARAM_TYPE_MEMREF_INOUT))) ||
+ (p->memref.size == 0))
+ continue;
+
+ nbr_sections =
+ ((((uint32_t) p->memref.buffer & SECTION_MASK) +
+ p->memref.size) >> SECTION_SHIFT) + 1;
+ py_offset = (uint32_t) p->memref.buffer >> SECTION_SHIFT;
+ v = ((vi_offset << SECTION_SHIFT) +
+ ((uint32_t) p->memref.buffer & SECTION_MASK));
+ section = 0;
+
+ if ((ctx->flags & TA_FLAG_USER_MODE) ==
+ TA_FLAG_USER_MODE) {
+ sect_prot = TEE_MMU_SECTION_UDATA;
+ } else {
+ sect_prot = TEE_MMU_SECTION_KDATA;
+ }
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("tee_mmu_map: i 0x%x ph %p -> v %p\n nbr_sections %u", i,
+ p->memref.buffer, v, nbr_sections);
+#endif
+ /* Set NS bit if buffer is not secure */
+ if (tee_pbuf_is_non_sec
+ (p->memref.buffer, p->memref.size) == true) {
+ sect_prot |= TEE_MMU_SECTION_NS;
+ } else {
+ /*
+ * If secure, check here if security level is
+ * reached. This operation is likely to be
+ * platform dependent.
+ */
+
+ /* case STTEE on Orly2: it has to be TEE external DDR */
+ if (core_pbuf_is(CORE_MEM_EXTRAM,
+ (tee_paddr_t) p->memref.buffer,
+ p->memref.size) == false)
+ return TEE_ERROR_SECURITY;
+ }
+
+ /*
+ * Configure inner and outer cache settings.
+ */
+ sect_prot &= ~TEE_MMU_SECTION_CACHEMASK;
+ sect_prot |= TEE_MMU_SECTION_TEX(4);
+ if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_THR)
+ sect_prot |= TEE_MMU_SECTION_TEX(2);
+ if (param->param_attr[i] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
+ sect_prot |= TEE_MMU_SECTION_TEX(1);
+ if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_THR)
+ sect_prot |= TEE_MMU_SECTION_C;
+ if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
+ sect_prot |= TEE_MMU_SECTION_B;
+
+ if (((sect_prot & TEE_MMU_SECTION_NS) == TEE_MMU_SECTION_NS) &&
+ ((sect_prot & TEE_MMU_SECTION_XN) == 0)) {
+ EMSG("invalid map config: nsec mem map as executable!");
+ sect_prot |= TEE_MMU_SECTION_XN;
+ }
+
+ if (tee_mmu_is_mapped(ctx, (uint32_t) p->memref.buffer,
+ p->memref.size, sect_prot)) {
+ res = tee_mmu_user_pa2va(ctx, p->memref.buffer,
+ &p->memref.buffer);
+ if (res != TEE_SUCCESS)
+ return res;
+ } else {
+ p->memref.buffer = (void *)v;
+
+ while (section < nbr_sections) {
+ **buffer =
+ ((section + py_offset) << SECTION_SHIFT) |
+ sect_prot;
+ (*buffer)++;
+ section++;
+ }
+
+ vi_offset += nbr_sections;
+ }
+ }
+
+ return res;
+}
+
+/*
+ * tee_mmu_map - alloc and fill mmu mapping table for a user TA (uTA).
+ *
+ * param - Contains the physical addr of the input buffers
+ * Returns logical addresses
+ *
+ * Allocate a table to store the N first section entries of the MMU L1 table
+ * used to map the target user TA, and clear table to 0.
+ * Load mapping for the TA stack_heap area, code area and params area (params
+ * are the 4 GP TEE TA invoke parameters buffer).
+ */
+TEE_Result tee_mmu_map(struct tee_ta_ctx *ctx, struct tee_ta_param *param)
+{
+ TEE_Result res = TEE_SUCCESS;
+ uint32_t py_offset;
+ void *p;
+ uintptr_t smem;
+ uint32_t *buffer;
+ uint32_t section = 0, section_cnt = 0;
+
+ TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
+
+ ctx->mmu->size = tee_mm_get_size(ctx->mm_heap_stack) +
+ tee_mm_get_size(ctx->mm) + tee_mmu_get_io_size(param) +
+ TEE_DDR_VLOFFSET;
+
+ if (ctx->mmu->size > TEE_MMU_UL1_NUM_USER_ENTRIES) {
+ res = TEE_ERROR_EXCESS_DATA;
+ goto exit;
+ }
+
+ if (ctx->mmu->table)
+ free(ctx->mmu->table);
+
+ ctx->mmu->table = malloc(ctx->mmu->size * 4);
+ if (ctx->mmu->table == NULL) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+ memset(ctx->mmu->table, 0, ctx->mmu->size * 4);
+
+ /*
+ * Map heap and stack
+ */
+ smem = tee_mm_get_smem(ctx->mm_heap_stack);
+ if (core_va2pa((uint32_t)smem, (uint32_t *)&p)) {
+ res = TEE_ERROR_SECURITY;
+ goto exit;
+ }
+
+ py_offset = (uint32_t)p >> SECTION_SHIFT;
+
+ buffer = (uint32_t *)ctx->mmu->table + TEE_DDR_VLOFFSET;
+ while (section < tee_mm_get_size(ctx->mm_heap_stack)) {
+ *buffer++ = ((section++ + py_offset) << SECTION_SHIFT) |
+ TEE_MMU_SECTION_UDATA | TEE_MMU_SECTION_OIWBWA;
+ section_cnt++;
+ }
+
+ /*
+ * Map code
+ */
+ smem = tee_mm_get_smem(ctx->mm);
+ if (core_va2pa((uint32_t)smem, (uint32_t *)&p)) {
+ res = TEE_ERROR_SECURITY;
+ goto exit;
+ }
+
+ py_offset = (uint32_t) p >> SECTION_SHIFT;
+
+ section = 0;
+ while (section < tee_mm_get_size(ctx->mm)) {
+ *buffer++ = ((section++ + py_offset) << SECTION_SHIFT) |
+ (TEE_MMU_SECTION_UCODE | TEE_MMU_SECTION_OIWBWA);
+ section_cnt++;
+ }
+
+ ctx->mmu->ta_private_vmem_start = TEE_DDR_VLOFFSET << SECTION_SHIFT;
+ ctx->mmu->ta_private_vmem_end = (TEE_DDR_VLOFFSET + section_cnt) <<
+ SECTION_SHIFT;
+
+ /*
+ * Map io parameters
+ */
+ res =
+ tee_mmu_map_io(ctx, &buffer,
+ ((uint32_t) buffer - (uint32_t) ctx->mmu->table) / 4,
+ param);
+
+exit:
+ if (res != TEE_SUCCESS) {
+ free(ctx->mmu->table);
+ ctx->mmu->table = NULL;
+ ctx->mmu->size = 0;
+ }
+
+ return res;
+}
+
+/*
+ * tee_mmu_final - finalise and free ctx mmu
+ */
+void tee_mmu_final(struct tee_ta_ctx *ctx)
+{
+ uint32_t asid = 1 << ((ctx->context - 1) & 0xff);
+
+ /* return ASID */
+ g_asid |= asid;
+ g_current_context = 0;
+
+ /* clear MMU entries to avoid clash when asid is reused */
+ tee_mmu_invtlb_asid(ctx->context & 0xff);
+ ctx->context = 0;
+
+ if (ctx->mmu != NULL) {
+ tee_mmu_info_t *p = ctx->mmu;
+ free(p->table);
+ free(ctx->mmu);
+ }
+ ctx->mmu = NULL;
+}
+
+/* return true only if buffer fits inside TA private memory */
+bool tee_mmu_is_vbuf_inside_ta_private(const struct tee_ta_ctx *ctx,
+ const uint32_t va, size_t size)
+{
+ if ((va + size < va) ||
+ (va < ctx->mmu->ta_private_vmem_start) ||
+ ((va + size) > ctx->mmu->ta_private_vmem_end))
+ return false;
+ return true;
+}
+
+/* return true only if buffer fits outside TA private memory */
+bool tee_mmu_is_vbuf_outside_ta_private(const struct tee_ta_ctx *ctx,
+ const uint32_t va, size_t size)
+{
+ if (va + size < va)
+ return false;
+ if ((va < ctx->mmu->ta_private_vmem_start) &&
+ ((va + size) > ctx->mmu->ta_private_vmem_start))
+ return false;
+ if ((va < ctx->mmu->ta_private_vmem_end) &&
+ ((va + size) > ctx->mmu->ta_private_vmem_end))
+ return false;
+ return true;
+}
+
+TEE_Result tee_mmu_kernel_to_user(const struct tee_ta_ctx *ctx,
+ const uint32_t kaddr, uint32_t *uaddr)
+{
+ uint32_t i = 0;
+ uint32_t pa;
+
+ if (core_va2pa(kaddr, &pa))
+ return TEE_ERROR_SECURITY;
+
+ while (i < ctx->mmu->size) {
+ if ((pa & (~SECTION_MASK)) ==
+ (ctx->mmu->table[i] & (~SECTION_MASK))) {
+ *uaddr = (i << SECTION_SHIFT) + (kaddr & SECTION_MASK);
+ return TEE_SUCCESS;
+ }
+ i++;
+ }
+
+ return TEE_ERROR_ITEM_NOT_FOUND;
+}
+
+TEE_Result tee_mmu_user_va2pa_helper(const struct tee_ta_ctx *ctx, void *ua,
+ void **pa)
+{
+ uint32_t n = (uint32_t) ua >> SECTION_SHIFT;
+
+ if (n >= ctx->mmu->size)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ *pa = (void *)((ctx->mmu->table[n] & ~SECTION_MASK) |
+ ((uint32_t) ua & SECTION_MASK));
+ return TEE_SUCCESS;
+}
+
+/* */
+TEE_Result tee_mmu_user_pa2va_helper(struct tee_ta_ctx *ctx, void *pa,
+ void **va)
+{
+ uint32_t i = 0;
+
+ while (i < ctx->mmu->size) {
+ if (ctx->mmu->table[i] != 0 &&
+ (uint32_t) pa >= (ctx->mmu->table[i] & ~SECTION_MASK) &&
+ (uint32_t) pa < ((ctx->mmu->table[i] & ~SECTION_MASK)
+ + (1 << SECTION_SHIFT))) {
+ *va = (void *)((i << SECTION_SHIFT) +
+ ((uint32_t) pa & SECTION_MASK));
+ return TEE_SUCCESS;
+ }
+ i++;
+ }
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_check_access_rights(struct tee_ta_ctx *ctx,
+ uint32_t flags, tee_uaddr_t uaddr,
+ size_t len)
+{
+ tee_uaddr_t a;
+ uint32_t param_section;
+
+ /* Address wrap */
+ if (uaddr + len < uaddr)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ param_section = TEE_DDR_VLOFFSET +
+ tee_mm_get_size(ctx->mm_heap_stack) + tee_mm_get_size(ctx->mm);
+
+ for (a = uaddr; a < (uaddr + len); a += SECTION_SIZE) {
+ uint32_t n = a >> SECTION_SHIFT;
+
+ if (n >= ctx->mmu->size)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ if ((flags & TEE_MEMORY_ACCESS_ANY_OWNER) !=
+ TEE_MEMORY_ACCESS_ANY_OWNER && n >= param_section) {
+ void *pa;
+ TEE_Result res =
+ tee_mmu_user_va2pa(ctx, (void *)a, &pa);
+
+ if (res != TEE_SUCCESS)
+ return res;
+ /*
+ * Parameters are shared with normal world if they
+ * aren't in secure DDR.
+ *
+ * If the parameters are in secure DDR it's because one
+ * TA is invoking another TA and in that case there's
+ * new memory allocated privately for the paramters to
+ * this TA.
+ */
+ if (!tee_mm_addr_is_within_range
+ (&tee_mm_sec_ddr, (uint32_t) pa))
+ return TEE_ERROR_ACCESS_DENIED;
+ }
+
+ /* Check Access Protection from L1 entry */
+ switch (TEE_MMU_L1_AP(ctx->mmu->table[n])) {
+ case TEE_MMU_AP_USER_RO:
+ if ((flags & TEE_MEMORY_ACCESS_WRITE) != 0)
+ return TEE_ERROR_ACCESS_DENIED;
+ break;
+ case TEE_MMU_AP_USER_RW:
+ break;
+ default:
+ return TEE_ERROR_ACCESS_DENIED;
+ }
+ }
+
+ return TEE_SUCCESS;
+}
+
+void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
+{
+ if (ctx == NULL) {
+ tee_mmu_switch(core_mmu_get_ttbr0(), 0);
+ } else {
+ uint32_t base, va, i;
+
+ base = TEE_MMU_UL1_BASE;
+
+ if (core_pa2va(core_mmu_get_ttbr0_base(), &va)) {
+ EMSG("unmapped teecore mmu table! trap CPU!");
+ assert(0);
+ }
+ /* TODO: why not using the same L1 table and play only on ASID
+ * to identify tee_mmu_is_kmapping() ?
+ * Do TEEcore need to protect against TA changing the mapping ?
+ */
+
+ /* copy teecore mapping (priviledge mapping only) */
+ memcpy((void *)base, (void *)va, 16 * 1024);
+
+ /* check the 1st entries are not mapped: we will map uTA in ! */
+ for (i = 0; i < (ctx->mmu->size * 4); i += 4) {
+ if (*(uint32_t *)(base + i) != 0) {
+ EMSG("mmu table is not clean: cannot add map");
+ assert(0);
+ }
+ }
+
+ /* copy uTA mapping at begning of mmu table */
+ memcpy((void *)base, ctx->mmu->table, ctx->mmu->size * 4);
+
+ /* Change ASID to new value */
+ tee_mmu_switch(TEE_MMU_UL1_PA_BASE | TEE_MMU_DEFAULT_ATTRS,
+ ctx->context);
+ }
+ core_tlb_maintenance(TLBINV_CURRENT_ASID, 0);
+}
+
+uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
+{
+ TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
+
+ return (TEE_DDR_VLOFFSET + tee_mm_get_size(ctx->mm_heap_stack)) <<
+ SECTION_SHIFT;
+}
+
+/*
+ * tee_mmu_kmap_init - init TA mapping support
+ *
+ * TAs are mapped in virtual space [0 32MB].
+ * The TA MMU L1 table is always located at TEE_MMU_UL1_BASE.
+ * The MMU table for a target TA instance will be copied to this address
+ * when tee core sets up TA context.
+ */
+void tee_mmu_kmap_init(void)
+{
+ tee_vaddr_t s = TEE_MMU_UL1_NUM_USER_ENTRIES << SECTION_SHIFT;
+ tee_vaddr_t e = s + (TEE_MMU_UL1_NUM_KERN_ENTRIES << SECTION_SHIFT);
+
+ if ((TEE_MMU_UL1_PA_BASE % TEE_MMU_UL1_SIZE) != 0) {
+ DMSG("Bad MMU addr va 0x%x pa 0x%x 0x%x\n",
+ TEE_MMU_UL1_BASE, TEE_MMU_UL1_PA_BASE,
+ TEE_MMU_UL1_PA_BASE % TEE_MMU_UL1_SIZE);
+ assert(0);
+ }
+
+ /* Configure MMU UL1 */
+ memset((void *)TEE_MMU_UL1_BASE, 0, TEE_MMU_UL1_SIZE);
+
+ if (!tee_mm_init(&tee_mmu_virt_kmap, s, e, SECTION_SHIFT,
+ TEE_MM_POOL_NO_FLAGS)) {
+ DMSG("Failed to init kmap. Trap CPU!");
+ assert(0);
+ }
+}
+
+TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
+{
+ tee_mm_entry_t *mm;
+ size_t n;
+ uint32_t *l1 = (uint32_t *)TEE_MMU_UL1_KERN_BASE;
+ uint32_t py_offset = (uint32_t) pa >> SECTION_SHIFT;
+ uint32_t pa_s = TEE_ROUNDDOWN(pa, SECTION_SIZE);
+ uint32_t pa_e = TEE_ROUNDUP(pa + len, SECTION_SIZE);
+ uint32_t flags;
+
+ mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
+ if (mm == NULL)
+ return TEE_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * check memory attributes (must either secure or unsecured)
+ *
+ * Warning: platform depedancy: was is cached and uncached.
+ */
+ flags = TEE_MMU_SECTION_KDATA;
+ if (tee_pbuf_is_sec(pa, len) == true) {
+ flags |= TEE_MMU_SECTION_OIWBWA;
+ } else if (tee_pbuf_is_non_sec(pa, len) == true) {
+ flags |= TEE_MMU_SECTION_NS;
+ if (core_mmu_is_shm_cached())
+ flags |= TEE_MMU_SECTION_OIWBWA;
+ else
+ flags |= TEE_MMU_SECTION_NOCACHE;
+ } else {
+ return TEE_ERROR_GENERIC;
+ }
+
+ for (n = 0; n < tee_mm_get_size(mm); n++)
+ l1[n + tee_mm_get_offset(mm)] =
+ ((n + py_offset) << SECTION_SHIFT) | flags;
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ *va = (void *)(tee_mm_get_smem(mm) + (pa & SECTION_MASK));
+ return TEE_SUCCESS;
+}
+
+void tee_mmu_kunmap(void *va, size_t len)
+{
+ size_t n;
+ tee_mm_entry_t *mm;
+ uint32_t *l1 = (uint32_t *)TEE_MMU_UL1_KERN_BASE;
+
+ mm = tee_mm_find(&tee_mmu_virt_kmap, (uint32_t) va);
+ if (mm == NULL || len > tee_mm_get_bytes(mm))
+ return; /* Invalid range, not much to do */
+
+ /* Clear the mmu entries */
+ for (n = 0; n < tee_mm_get_size(mm); n++)
+ l1[n + tee_mm_get_offset(mm)] = 0;
+
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ tee_mm_free(mm);
+}
+
+TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va)
+{
+ size_t n;
+
+ for (n = TEE_MMU_UL1_NUM_USER_ENTRIES;
+ n < TEE_MMU_UL1_NUM_ENTRIES;
+ n++) {
+ if (TEE_MMU_UL1_ENTRY(n) != 0 &&
+ (uint32_t) pa >= (TEE_MMU_UL1_ENTRY(n) & ~SECTION_MASK) &&
+ (uint32_t) pa < ((TEE_MMU_UL1_ENTRY(n) & ~SECTION_MASK)
+ + (1 << SECTION_SHIFT))) {
+ *va = (void *)((n << SECTION_SHIFT) +
+ ((uint32_t) pa & SECTION_MASK));
+ return TEE_SUCCESS;
+ }
+ }
+ return TEE_ERROR_ACCESS_DENIED;
+}
+
+TEE_Result tee_mmu_kmap_va2pa_helper(void *va, void **pa)
+{
+ uint32_t n = (uint32_t) va >> SECTION_SHIFT;
+
+ if (n < TEE_MMU_UL1_NUM_USER_ENTRIES && n >= TEE_MMU_UL1_NUM_ENTRIES)
+ return TEE_ERROR_ACCESS_DENIED;
+ *pa = (void *)((TEE_MMU_UL1_ENTRY(n) & ~SECTION_MASK) |
+ ((uint32_t) va & SECTION_MASK));
+ return TEE_SUCCESS;
+}
+
+bool tee_mmu_kmap_is_mapped(void *va, size_t len)
+{
+ tee_vaddr_t a = (tee_vaddr_t) va;
+ tee_mm_entry_t *mm = tee_mm_find(&tee_mmu_virt_kmap, a);
+
+ if (mm == NULL)
+ return false;
+
+ if ((a + len) > (tee_mm_get_smem(mm) + tee_mm_get_bytes(mm)))
+ return false;
+
+ return true;
+}
+
+bool tee_mmu_is_kernel_mapping(void)
+{
+ return (tee_mmu_get_ttbr0() == core_mmu_get_ttbr0());
+}
+
+void teecore_init_ta_ram(void)
+{
+ unsigned int s, e;
+
+ /* get virtual addr/size of RAM where TA are loaded/executedNSec
+ * shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
+
+ TEE_ASSERT((s & (SECTION_SIZE - 1)) == 0);
+ TEE_ASSERT((e & (SECTION_SIZE - 1)) == 0);
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ TEE_ASSERT(tee_vbuf_is_sec(s, e - s) == true);
+
+ TEE_ASSERT(tee_mm_is_empty(&tee_mm_sec_ddr));
+
+ /* remove previous config and init TA ddr memory pool */
+ tee_mm_final(&tee_mm_sec_ddr);
+ tee_mm_init(&tee_mm_sec_ddr, s, e, SECTION_SHIFT, TEE_MM_POOL_NO_FLAGS);
+}
+
+void teecore_init_pub_ram(void)
+{
+ unsigned int s, e;
+ unsigned int nsec_tee_size = 32 * 1024;
+
+ /* get virtual addr/size of NSec shared mem allcated from teecore */
+ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
+
+ TEE_ASSERT(s < e);
+ TEE_ASSERT((s & (SECTION_SIZE - 1)) == 0);
+ TEE_ASSERT((e & (SECTION_SIZE - 1)) == 0);
+ /* extra check: we could rely on core_mmu_get_mem_by_type() */
+ TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true);
+
+ /*
+ * 32kByte first bytes are allocated from teecore.
+ * Remaining is under control of the NSec allocator.
+ */
+ TEE_ASSERT((e - s) > nsec_tee_size);
+
+ TEE_ASSERT(tee_mm_is_empty(&tee_mm_pub_ddr));
+ tee_mm_final(&tee_mm_pub_ddr);
+ tee_mm_init(&tee_mm_pub_ddr, s, s + nsec_tee_size, SMALL_PAGE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS);
+
+ s += nsec_tee_size;
+ default_nsec_shm_paddr = s;
+ default_nsec_shm_size = e - s;
+}
+
+void *tee_mmu_ioremap(tee_paddr_t pa, size_t len)
+{
+ /* return (void *)ioremap((void *)pa, len); */
+ return (void *)NULL;
+}
+
+void tee_mmu_iounmap(void *va)
+{
+ /* linux API */
+ /* iounmap(va); */
+}
+
+static uint32_t section_to_teesmc_cache_attr(uint32_t sect)
+{
+
+ if (sect & TEE_MMU_SECTION_TEX(4)) {
+ uint32_t attr = 0;
+
+ if (sect & TEE_MMU_SECTION_TEX(2))
+ attr |= TEESMC_ATTR_CACHE_O_WRITE_THR;
+ if (sect & TEE_MMU_SECTION_TEX(1))
+ attr |= TEESMC_ATTR_CACHE_I_WRITE_BACK;
+ if (sect & TEE_MMU_SECTION_C)
+ attr |= TEESMC_ATTR_CACHE_O_WRITE_THR;
+ if (sect & TEE_MMU_SECTION_B)
+ attr |= TEESMC_ATTR_CACHE_O_WRITE_BACK;
+ assert(attr == TEESMC_ATTR_CACHE_DEFAULT);
+ return attr;
+ }
+
+ switch (sect & TEE_MMU_SECTION_CACHEMASK) {
+ /* outer and inner write-back */
+ /* no write-allocate */
+ case TEE_MMU_SECTION_TEX(0) | TEE_MMU_SECTION_B:
+ /* write-allocate */
+ case TEE_MMU_SECTION_TEX(1) | TEE_MMU_SECTION_B | TEE_MMU_SECTION_C:
+ return TEESMC_ATTR_CACHE_I_WRITE_BACK |
+ TEESMC_ATTR_CACHE_O_WRITE_BACK;
+
+ /* outer and inner write-through */
+ case TEE_MMU_SECTION_TEX(0) | TEE_MMU_SECTION_C:
+ panic();
+ return TEESMC_ATTR_CACHE_I_WRITE_THR |
+ TEESMC_ATTR_CACHE_O_WRITE_THR;
+
+ /* outer and inner no-cache */
+ case TEE_MMU_SECTION_TEX(1):
+ panic();
+ return TEESMC_ATTR_CACHE_I_NONCACHE |
+ TEESMC_ATTR_CACHE_O_NONCACHE;
+ default:
+ panic();
+ }
+}
+
+uint32_t tee_mmu_kmap_get_cache_attr(void *va)
+{
+ uint32_t n = (vaddr_t)va >> SECTION_SHIFT;
+
+ assert(n >= TEE_MMU_UL1_NUM_USER_ENTRIES &&
+ n < TEE_MMU_UL1_NUM_ENTRIES);
+
+ return section_to_teesmc_cache_attr(TEE_MMU_UL1_ENTRY(n));
+}
+
+uint32_t tee_mmu_user_get_cache_attr(struct tee_ta_ctx *ctx, void *va)
+{
+ uint32_t n = (vaddr_t)va >> SECTION_SHIFT;
+
+ assert(n < ctx->mmu->size);
+
+ return section_to_teesmc_cache_attr(ctx->mmu->table[n]);
+}
diff --git a/core/arch/arm32/mm/tee_mmu_unpg.c b/core/arch/arm32/mm/tee_mmu_unpg.c
new file mode 100644
index 00000000000..73da99f3963
--- /dev/null
+++ b/core/arch/arm32/mm/tee_mmu_unpg.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+void tee_mmu_get_map(struct tee_mmu_mapping *map)
+{
+ if (map == NULL)
+ return;
+
+ map->ttbr0 = tee_mmu_get_ttbr0();
+ map->ctxid = tee_mmu_get_context();
+}
+
+void tee_mmu_set_map(struct tee_mmu_mapping *map)
+{
+ if (map == NULL)
+ tee_mmu_switch(core_mmu_get_ttbr0(), 0);
+ else
+ tee_mmu_switch(map->ttbr0, map->ctxid);
+
+ invalidate_mmu_tlb();
+}
diff --git a/core/arch/arm32/mm/tee_mmu_unpg_asm.S b/core/arch/arm32/mm/tee_mmu_unpg_asm.S
new file mode 100644
index 00000000000..1867b02c217
--- /dev/null
+++ b/core/arch/arm32/mm/tee_mmu_unpg_asm.S
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+
+ .global tee_mmu_switch
+ .global tee_mmu_set_context
+ .global tee_mmu_invtlb_asid
+ .global tee_mmu_get_ttbr0
+ .global tee_mmu_get_context
+
+ .section .text
+ .balign 4
+ .code 32
+
+ /*
+ * INPUT - r0, ttbr0 base
+ * r1, Context ID
+ *
+ * Disable IRQ/FIQs during the operation (atomic ASID/TTBR0 loading).
+ */
+ .func tee_mmu_switch
+tee_mmu_switch:
+
+ /* save/mask IRQs/FIQs */
+ mrs r2, cpsr
+ and r3, r2, #CPSR_FIQ_IRQ_MASK
+ orr r2, r2, #CPSR_FIQ_IRQ_MASK
+ msr cpsr_cxsf, r2
+ /* set reserved context id */
+ dsb /* ARM erratum 754322 */
+ mov r2, #0
+ mcr p15, 0, r2, c13, c0, 1
+ isb
+ /* set ttbr0 */
+ mcr p15, 0, r0, c2, c0, 0
+ isb
+ /* set context id */
+ mcr p15, 0, r1, c13, c0, 1
+ isb
+ /* restore irq/fiq mask */
+ mrs r1, cpsr
+ bic r1, r1, #CPSR_FIQ_IRQ_MASK
+ orr r1, r1, r3
+ msr cpsr_cxsf, r1
+
+ bx lr
+ .endfunc
+
+ /*
+ * INPUT - r0, Context ID
+ */
+ .func tee_mmu_set_context
+tee_mmu_set_context:
+ dsb
+ mcr p15, 0, r0, c13, c0, 1
+ isb
+ bx lr
+ .endfunc
+
+ /*
+ * INPUT - r0, ASID to be invalidated
+ */
+ .func tee_mmu_invtlb_asid
+tee_mmu_invtlb_asid:
+ mcr p15, 0, r0, c8, c3, 2
+ dsb
+ /* No isb due to that we know we will take an exception
+ * before we need a clean TLB */
+ bx lr
+ .endfunc
+
+ /*
+ * OUTPUT - r0, ttbr0 base
+ */
+ .func tee_mmu_get_ttbr0
+tee_mmu_get_ttbr0:
+ mrc p15, 0, r0, c2, c0, 0
+ bx lr
+ .endfunc
+
+ /*
+ * INPUT - r0, Context ID
+ */
+ .func tee_mmu_get_context
+tee_mmu_get_context:
+ mrc p15, 0, r0, c13, c0, 1
+ bx lr
+ .endfunc
diff --git a/core/arch/arm32/mm/tee_pager.c b/core/arch/arm32/mm/tee_pager.c
new file mode 100644
index 00000000000..ed68b9aef10
--- /dev/null
+++ b/core/arch/arm32/mm/tee_pager.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages)
+{
+ size_t n;
+
+ /* setup memory */
+ for (n = 0; n < npages; n++) {
+ struct tee_pager_pmem *apage;
+ tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
+ uint32_t *mmu_entry = tee_pager_get_mmu_entry(va);
+
+ /* Ignore unmapped entries */
+ if (*mmu_entry == 0)
+ continue;
+
+ apage = malloc(sizeof(struct tee_pager_pmem));
+ if (apage == NULL) {
+ DMSG("Can't allocate memory");
+ while (1)
+ ;
+ }
+
+ apage->mmu_entry = (uint32_t *)mmu_entry;
+
+ /*
+ * Set to TEE_PAGER_NO_ACCESS_ATTRIBUTES and not
+ * TEE_PAGER_PAGE_UNLOADED since pager would misstake it for a
+ * hidden page in case the virtual address was reused before
+ * the physical page was used for another virtual page.
+ */
+ *mmu_entry = (*mmu_entry & ~SMALL_PAGE_MASK) |
+ TEE_PAGER_NO_ACCESS_ATTRIBUTES;
+ apage->ctx_handle = NULL;
+
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+ tee_pager_npages++;
+ }
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+void tee_pager_unmap(uint32_t page, uint8_t psize)
+{
+ int i;
+
+ if ((page & 0xFFF) != 0) {
+ EMSG("Invalid page address");
+ while (1)
+ ;
+ }
+
+ for (i = 0; i < psize; i++) {
+ uint32_t addr = page + (i << SMALL_PAGE_SHIFT);
+ uint32_t *mmu_entry = tee_pager_get_mmu_entry(addr);
+
+ if (*mmu_entry != 0) {
+ struct tee_pager_pmem *apage;
+
+ /* Invalidate mmu_entry */
+ *mmu_entry &= ~SMALL_PAGE_MASK;
+
+ /*
+ * Unregister the session from the page entry using
+ * this mmu_entry.
+ */
+ TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
+ if (apage->mmu_entry == (uint32_t *)mmu_entry) {
+ apage->ctx_handle = NULL;
+ break;
+ }
+ }
+
+ if (apage == NULL) {
+ EMSG("Physical page to unmap not found");
+ while (1)
+ ;
+ }
+ }
+ }
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
+
+void tee_pager_unhide_all_pages(void)
+{
+ struct tee_pager_pmem *apage;
+ bool has_hidden_page = false;
+
+ TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
+ if ((*apage->mmu_entry & 0xfff) == TEE_PAGER_PAGE_UNLOADED) {
+ /* Page is hidden, unhide it */
+ has_hidden_page = true;
+ *apage->mmu_entry |= 0x10;
+ }
+ }
+
+ /* Only invalidate secure TLB if something was changed */
+ if (has_hidden_page)
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+}
diff --git a/core/arch/arm32/mm/tee_pager_unpg.c b/core/arch/arm32/mm/tee_pager_unpg.c
new file mode 100644
index 00000000000..9c81efa4291
--- /dev/null
+++ b/core/arch/arm32/mm/tee_pager_unpg.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+/* Dummies to allow the macros to be left at current places below */
+#define TEE_PAGER_RECORD_FAULT(x) do { } while (0)
+#define TEE_PAGER_SET_OLD_VA(x) do { } while (0)
+#define TEE_PAGER_SET_PA(x) do { } while (0)
+#define TEE_PAGER_SET_COPY(x) do { } while (0)
+#define TEE_PAGER_SET_UNHIDE(x) do { } while (0)
+#define TEE_PAGER_DUMP_RECORDING() do { } while (0)
+#define TEE_PRINT_SAVED_REGS() do { } while (0)
+
+/* The list of physical pages. The first page in the list is the oldest */
+struct tee_pager_pmem_head tee_pager_pmem_head =
+TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
+
+/* number of pages hidden */
+#define TEE_PAGER_NHIDE (tee_pager_npages / 3)
+
+/* number of pages */
+uint8_t tee_pager_npages;
+
+static bool tee_pager_is_monitor_exception(void)
+{
+ return (tee_pager_get_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
+ TEE_PAGER_SPSR_MODE_MON;
+}
+
+bool tee_pager_is_user_exception(void)
+{
+ return (tee_pager_get_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
+ TEE_PAGER_SPSR_MODE_USR;
+}
+
+bool tee_pager_is_abort_in_abort_handler(void)
+{
+ return (tee_pager_get_spsr() & TEE_PAGER_SPSR_MODE_MASK) ==
+ TEE_PAGER_SPSR_MODE_ABT;
+}
+
+static void tee_pager_print_abort(const uint32_t addr, const uint32_t fsr,
+ const uint32_t pc, const uint32_t flags,
+ const uint32_t dbgpcsr)
+{
+ DMSG("%s at 0x%x: FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
+ (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
+ (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
+ addr, fsr, pc, tee_mmu_get_ttbr0(), tee_mmu_get_context());
+ DMSG("CPUID %dd DBGPCSR 0x%x SPSR_abt 0x%x",
+ TEE_PAGER_GET_CPUID_asm(), dbgpcsr, tee_pager_get_spsr());
+}
+
+static void tee_pager_print_error_abort(const uint32_t addr, const uint32_t fsr,
+ const uint32_t pc, const uint32_t flags,
+ const uint32_t dbgpcsr)
+{
+ EMSG("%s at 0x%x\n"
+ "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
+ "CPUID 0x%x DBGPCSR 0x%x CPSR 0x%x (read from SPSR)",
+ (flags == TEE_PAGER_DATA_ABORT) ? "data-abort" :
+ (flags == TEE_PAGER_PREF_ABORT) ? "prefetch-abort" : "undef-abort",
+ addr, fsr, pc, tee_mmu_get_ttbr0(), tee_mmu_get_context(),
+ TEE_PAGER_GET_CPUID_asm(), dbgpcsr, tee_pager_get_spsr());
+}
+
+static uint32_t tee_pager_handle_abort(const uint32_t flags, const uint32_t pc,
+ const uint32_t dbgpcsr)
+{
+ struct tee_pager_pmem *apage;
+ uint32_t addr;
+ uint32_t w_addr;
+ uint32_t i;
+ uint32_t fsr;
+
+ if (flags == TEE_PAGER_DATA_ABORT) {
+ fsr = TEE_PAGER_GET_DFSR_asm();
+ addr = TEE_PAGER_GET_DFAR_asm();
+ } else {
+ if (flags == TEE_PAGER_PREF_ABORT) {
+ fsr = TEE_PAGER_GET_IFSR_asm();
+ addr = TEE_PAGER_GET_IFAR_asm();
+ } else {
+ fsr = 0;
+ addr = pc;
+ }
+ }
+
+ w_addr = addr;
+
+ /*
+ * w_addr is the address that we intend to handle to the page fault
+ * for. This is normally the same as addr except in the case where we
+ * have thumb instruction spread over two pages and the first page
+ * already is available. In that case will addr still be the beginning
+ * of the instruction even if the fault really is for the second page.
+ */
+
+ /* In case of multithreaded version, this section must be protected */
+
+ if (tee_pager_is_user_exception()) {
+ tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ EMSG("[TEE_PAGER] abort in User mode (TA will panic)");
+ return TEE_PAGER_USER_TA_PANIC;
+ }
+
+ if (tee_pager_is_monitor_exception())
+ EMSG("[TEE_PAGER] abort in monitor!");
+
+ if (tee_pager_is_abort_in_abort_handler()) {
+ tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ EMSG("[TEE_PAGER] abort in abort handler (trap CPU)");
+ while (1)
+ ;
+ }
+
+ if (flags == TEE_PAGER_UNDEF_ABORT) {
+ tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ EMSG("[TEE_PAGER] undefined abort (trap CPU)");
+ while (1)
+ ;
+ }
+
+ switch (fsr & TEE_FSR_FS_MASK) {
+ case TEE_FSR_FS_ALIGNMENT_FAULT: /* Only possible for data abort */
+ tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ EMSG("[TEE_PAGER] alignement fault! (trap CPU)");
+ while (1)
+ ;
+
+ case TEE_FSR_FS_DEBUG_EVENT:
+ tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+ DMSG("[TEE_PAGER] Ignoring debug event!");
+ return TEE_PAGER_NORMAL_RETURN;
+
+ case TEE_FSR_FS_ASYNC_EXTERNAL_ABORT: /* Only possible for data abort */
+ tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+ DMSG("[TEE_PAGER] Ignoring async external abort!");
+ return TEE_PAGER_NORMAL_RETURN;
+
+ default:
+#ifdef PAGER_DEBUG_PRINT
+ tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+#endif
+ break;
+ }
+
+#ifndef CFG_TEE_PAGER
+ /*
+ * Until PAGER is supported, trap CPU here.
+ */
+ tee_pager_print_error_abort(addr, fsr, pc, flags, dbgpcsr);
+ EMSG("Unexpected page fault! Trap CPU");
+ while (1)
+ ;
+#endif
+
+ TEE_PAGER_RECORD_FAULT(addr);
+
+ /* check if the access is valid */
+ if (!tee_mm_validate(&tee_mm_vcore, w_addr)) {
+ tee_pager_print_abort(addr, fsr, pc, flags, dbgpcsr);
+ DMSG("Invalid addr 0x%" PRIx32, addr);
+ TEE_PRINT_SAVED_REGS();
+ TEE_PAGER_DUMP_RECORDING();
+ while (1)
+ ;
+ }
+
+ /* check if page is hidden */
+ TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
+ if (((*apage->mmu_entry & 0xFFF) == TEE_PAGER_PAGE_UNLOADED) &&
+ apage->ctx_handle != NULL &&
+ w_addr >= TEE_PAGER_GET_VA(apage->mmu_entry) &&
+ w_addr <
+ TEE_PAGER_GET_VA(apage->mmu_entry) + SMALL_PAGE_SIZE) {
+ /* page is hidden, show and move to back */
+ *(apage->mmu_entry) |= TEE_MMU_L2SP_PRIV_ACC;
+ TEE_PAGER_SET_UNHIDE(1);
+ TEE_PAGER_SET_PA((*(apage->mmu_entry)) & 0xFFFFF000);
+
+ TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+
+ w_addr = 0;
+
+ break;
+ }
+ }
+
+ if (apage == NULL) {
+ /* the page wasn't hidden */
+ uint32_t pa;
+ uint32_t *mmu_entry =
+ (uint32_t *)tee_pager_get_mmu_entry((tee_vaddr_t) w_addr);
+
+ if (*mmu_entry != 0) {
+ /*
+ * There's an pmem entry using this mmu entry, let's use
+ * that entry in the new mapping.
+ */
+ TAILQ_FOREACH(apage, &tee_pager_pmem_head, link) {
+ if (apage->mmu_entry == mmu_entry)
+ break;
+ }
+ if (apage == NULL) {
+ tee_pager_print_abort(addr, fsr, pc, flags,
+ dbgpcsr);
+ DMSG("Couldn't find pmem for mmu_entry %p",
+ (void *)mmu_entry);
+ while (1)
+ ;
+ }
+ } else {
+ apage = TAILQ_FIRST(&tee_pager_pmem_head);
+ if (apage == NULL) {
+ tee_pager_print_abort(addr, fsr, pc, flags,
+ dbgpcsr);
+ DMSG("No pmem entries");
+ while (1)
+ ;
+ }
+ }
+
+ TEE_PAGER_SET_OLD_VA(TEE_PAGER_GET_VA(apage->mmu_entry));
+
+ /* save rw data if needed */
+ if ((*apage->mmu_entry & 0xFFF) != 0 &&
+ tee_ta_check_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
+ apage->ctx_handle)) {
+ /* make sure the page is accessible */
+ if (((*apage->mmu_entry & 0xFFF) ==
+ TEE_PAGER_PAGE_UNLOADED)) {
+ *apage->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+ }
+
+ tee_ta_save_rw(TEE_PAGER_GET_VA(apage->mmu_entry),
+ apage->ctx_handle);
+ }
+
+ /* move page to back */
+ TAILQ_REMOVE(&tee_pager_pmem_head, apage, link);
+ TAILQ_INSERT_TAIL(&tee_pager_pmem_head, apage, link);
+
+ /* add page to mmu table, small pages [31:12]PA */
+ pa = *apage->mmu_entry & 0xFFFFF000;
+ TEE_PAGER_SET_PA(pa);
+
+ *apage->mmu_entry = 0;
+ apage->mmu_entry = mmu_entry;
+
+ *apage->mmu_entry = pa | TEE_PAGER_PAGE_LOADED;
+
+#ifdef PAGER_DEBUG_PRINT
+ DMSG("Mapped %p -> %p", w_addr & 0xFFFFF000, pa);
+#endif
+ }
+
+ /* Hide */
+ {
+ struct tee_pager_pmem *bpage;
+
+ i = 0;
+ TAILQ_FOREACH(bpage, &tee_pager_pmem_head, link) {
+ if (i >= TEE_PAGER_NHIDE)
+ break;
+ i++;
+ *bpage->mmu_entry =
+ TEE_MMU_L2SP_CLEAR_ACC(*bpage->mmu_entry);
+ }
+ }
+
+ /* Invalidate secure TLB */
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ if (w_addr) {
+ /* load page code & data */
+ apage->ctx_handle = tee_ta_load_page(w_addr);
+ TEE_PAGER_SET_COPY(1);
+
+ core_cache_maintenance(DCACHE_AREA_CLEAN,
+ (void *)(w_addr & 0xFFFFF000),
+ SMALL_PAGE_SIZE);
+
+ core_cache_maintenance(ICACHE_AREA_INVALIDATE,
+ (void *)(w_addr & 0xFFFFF000),
+ SMALL_PAGE_SIZE);
+ }
+
+ /* end protect (multithreded version) */
+
+ /*
+ * Until now we've been running with IRQ blocked. Let's enble IRQ now
+ * when it should be safe to do further processing with them enabled.
+ *
+ * It should be possible to enable IRQ earlier, but MMU updates and
+ * cache mainentance may need some tweaking to guarentee coherency in
+ * case we switch CPU in the middle of an operation.
+ */
+ tee_pager_restore_irq();
+
+ return TEE_PAGER_NORMAL_RETURN;
+}
+
+void tee_pager_abort_handler(uint32_t abort_type,
+ struct thread_abort_regs *regs)
+{
+ static const uint32_t abort_type_to_flags[] = {
+ TEE_PAGER_UNDEF_ABORT,
+ TEE_PAGER_PREF_ABORT,
+ TEE_PAGER_DATA_ABORT,
+ };
+ uint32_t res;
+
+ res = tee_pager_handle_abort(abort_type_to_flags[abort_type],
+ regs->lr, 0);
+ if (res == TEE_PAGER_USER_TA_PANIC) {
+ /*
+ * It was a user exception, stop user execution and return
+ * to TEE Core.
+ */
+ regs->r0 = 0xdeadbeef;
+ regs->lr = (uint32_t)tee_svc_user_ta_panic_from_pager;
+ regs->spsr = read_cpsr();
+ regs->spsr &= ~TEE_PAGER_SPSR_MODE_MASK;
+ regs->spsr |= TEE_PAGER_SPSR_MODE_SVC;
+ /* Select Thumb or ARM mode */
+ if (regs->lr & 1)
+ regs->spsr |= CPSR_T;
+ else
+ regs->spsr &= ~CPSR_T;
+ }
+}
+
+void tee_pager_restore_irq(void)
+{
+ /*
+ * Restores the settings of IRQ as saved when entering secure
+ * world, using something like
+ * INTERRUPT_ENABLE(SEC_ENV_SETTINGS_READ() & SEC_ROM_IRQ_ENABLE_MASK);
+ */
+
+ /* Make a crash on purpose as this is not implemented yet */
+ int *p = 0;
+ *p = 1;
+}
diff --git a/core/arch/arm32/mm/tee_pager_unpg_asm.S b/core/arch/arm32/mm/tee_pager_unpg_asm.S
new file mode 100644
index 00000000000..ed0a09547d3
--- /dev/null
+++ b/core/arch/arm32/mm/tee_pager_unpg_asm.S
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+
+.global TEE_PAGER_INIT_asm
+.global TEE_PAGER_VECT_TABLE
+.global TEE_PAGER_GET_IFAR_asm
+.global TEE_PAGER_GET_DFAR_asm
+.global TEE_PAGER_GET_DFSR_asm
+.global TEE_PAGER_GET_IFSR_asm
+.global TEE_PAGER_GET_CPUID_asm
+.global tee_pager_get_spsr
+.global TEE_PAGER_RES_asm
+
+
+.set TEE_PAGER_reset_addr, 0xffff0000
+.set TEE_PAGER_res_addr, 0xffff0014
+.set TEE_PAGER_fiq_addr, 0xffff001c
+
+.section .text
+.balign 4
+.code 32
+
+.func TEE_PAGER_GET_CPUID_asm
+TEE_PAGER_GET_CPUID_asm:
+ mrc p15,0,r0,c0,c0,5 @ Read Multiprocessor Affinity Register
+ bx lr
+.endfunc
+
+.func TEE_PAGER_GET_IFAR_asm
+TEE_PAGER_GET_IFAR_asm:
+ mrc p15, 0, r0, c6, c0, 2
+ bx lr
+.endfunc
+
+.func TEE_PAGER_GET_DFAR_asm
+TEE_PAGER_GET_DFAR_asm:
+ mrc p15, 0, r0, c6, c0, 0
+ bx lr
+.endfunc
+
+.func TEE_PAGER_GET_DFSR_asm
+TEE_PAGER_GET_DFSR_asm:
+ mrc p15, 0, r0, c5, c0, 0
+ bx lr
+.endfunc
+
+.func TEE_PAGER_GET_IFSR_asm
+TEE_PAGER_GET_IFSR_asm:
+ mrc p15, 0, r0, c5, c0, 1
+ bx lr
+.endfunc
+
+/* uint32_t tee_pager_get_spsr(void); */
+.func tee_pager_get_spsr
+tee_pager_get_spsr:
+ mrs r0, spsr
+ bx lr
+.endfunc
+
+.func TEE_PAGER_INIT_asm
+TEE_PAGER_INIT_asm:
+ push {r4, lr}
+
+ @ Update configuration of TTBR0
+ mrc p15, 0, r4, c2, c0, 0
+ mcr p15, 0, r4, c2, c0, 0
+
+ @ Change to TTBR1
+ mcr p15, 0, r4, c2, c0, 1
+ mrc p15, 0, r4, c2, c0, 2
+ orr r4, r4, #7
+ mcr p15, 0, r4, c2, c0, 2
+
+ pop {r4, pc}
+
+.endfunc
+
diff --git a/core/arch/arm32/plat-orly2/.gitignore b/core/arch/arm32/plat-orly2/.gitignore
new file mode 100644
index 00000000000..49b7bb9eb2b
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/.gitignore
@@ -0,0 +1 @@
+System.map
diff --git a/core/arch/arm32/plat-orly2/asc.S b/core/arch/arm32/plat-orly2/asc.S
new file mode 100644
index 00000000000..4cd3e364092
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/asc.S
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef STXH416_LPM_PERIPH_BASE
+#define STXH416_LPM_PERIPH_BASE 0xFE400000
+#endif
+
+#define ASC_NUM 21
+
+#ifndef ST_ASC21_REGS_BASE
+#define ST_ASC21_REGS_BASE (STXH416_LPM_PERIPH_BASE + 0x00131000)
+#endif
+
+#define ST_32BIT_REG(address) (address)
+
+/* Asynchronous Serial Controller control registers */
+#ifndef ST_ASC_REGS_BASE
+#define ST_ASC_REGS_BASE(n) ST_ASC##n##_REGS_BASE
+#endif /* !ST_ASC_REGS_BASE */
+#define ST_ASC_BAUDRATE(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x00)
+#define ST_ASC_TXBUFFER(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x04)
+#define ST_ASC_RXBUFFER(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x08)
+#define ST_ASC_CONTROL(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x0c)
+#define ST_ASC_INTENABLE(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x10)
+#define ST_ASC_STATUS(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x14)
+#define ST_ASC_GUARDTIME(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x18)
+#define ST_ASC_TIMEOUT(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x1c)
+#define ST_ASC_TXRESET(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x20)
+#define ST_ASC_RXRESET(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x24)
+#define ST_ASC_RETRIES(n) ST_32BIT_REG(ST_ASC_REGS_BASE(n) + 0x28)
+
+#define BOARD_ASC_BAUDRATE_REG ST_ASC_BAUDRATE(ASC_NUM)
+#define BOARD_ASC_CONTROL_REG ST_ASC_CONTROL(ASC_NUM)
+#define BOARD_ASC_TXBUFFER_REG ST_ASC_TXBUFFER(ASC_NUM)
+#define BOARD_ASC_STATUS_REG ST_ASC_STATUS(ASC_NUM)
+#define BOARD_ASC_TXRESET_REG ST_ASC_TXRESET(ASC_NUM)
+#define BOARD_ASC_RXRESET_REG ST_ASC_RXRESET(ASC_NUM)
+
+.section .text
+.align 5
+
+/*
+ * int asc_init(void) - init ASC driver.
+ *
+ * At least only maps (MMU) the ASC register addresses.
+ * We rely on some other SW layer to enable ASC IP (power/clamps/clocks/...)
+ */
+.global asc_init
+.type asc_init, %function
+
+asc_init:
+ /* TODO: insure ASC is mapped (check against core_init_mmu()/core_mmu.c) */
+ ldr r0, =0
+ bx lr
+
+/*
+ * int __asc_xmit(char*) - Transmit a numm terminated string.
+ *
+ * R0 is pointer to null-terminated string
+ * Clobbers r0-r3
+ */
+ .global __asc_xmit
+ .type __asc_xmit, %function
+
+__asc_xmit:
+ LDR r2, =BOARD_ASC_TXBUFFER_REG
+ LDR r3, =BOARD_ASC_STATUS_REG
+
+ /* Output byte */
+nextchr:
+ /* Spin until TX FIFO ready */
+crwait:
+ LDR r1, [r3]
+ ANDS r1, r1, #0x04 /* AND TX FIFO HALF EMPTY flag */
+ BEQ crwait /* ANDS should have set Z bit if zero */
+
+ LDRB r1, [r0], #1
+ ANDS r1, r1, r1
+ BEQ asc_exit
+ CMP r1, #0xa /* r1 == \n (line feed) ? */
+ BNE notlf
+
+ /* Transmit character extra carriage return for each line feed */
+ LDR r1, =0x0d
+ STR r1, [r2]
+
+ LDR r1, =0x0a /* replace line feed */
+
+notlf:
+ /* Transmit character */
+ STR r1, [r2]
+
+ /* Keep going */
+ B nextchr
+asc_exit:
+ LDR r0, =0
+ BX lr
+
+/*
+ * void __asc_flush(void) - flush ASC tx fifo.
+ *
+ * Clobbers r0-r3
+ */
+ .global __asc_flush
+ .type __asc_flush, %function
+
+__asc_flush:
+ LDR r3, =BOARD_ASC_STATUS_REG
+
+flush_wait:
+ LDR r1, [r3]
+ ANDS r1, r1, #0x02 /* AND TX FIFO EMPTY flag */
+ BEQ flush_wait /* ANDS should have set Z bit if zero */
+
+ LDR r0, =0
+ BX lr
+
+/*
+ * int __asc_xmit_char(char) - Transmit a single character.
+ *
+ * R0 is the 1-byte character to be transmited
+ * Clobbers r0-r3
+ */
+ .global __asc_xmit_char
+ .type __asc_xmit_char, %function
+
+__asc_xmit_char:
+ LDR r2, =BOARD_ASC_TXBUFFER_REG
+ LDR r3, =BOARD_ASC_STATUS_REG
+
+ /* Output byte */
+
+ /* Spin until TX FIFO ready */
+__asc_char_crwait:
+ LDR r1, [r3]
+ ANDS r1, r1, #0x04 /* AND TX FIFO HALF EMPTY flag */
+ BEQ __asc_char_crwait /* ANDS should have set Z bit if zero */
+
+ MOVS r1, r0
+ LDR r0, =0xFF
+ AND r1, r1, r0
+ BEQ __asc_char_exit
+ CMP r1, #0xa /* r1 == \n (line feed) ? */
+ BNE __asc_char_notlf
+
+ /* Transmit character extra carriage return for each line feed */
+ LDR r1, =0x0d
+ STR r1, [r2]
+
+ LDR r1, =0x0a /* replace line feed */
+
+__asc_char_notlf:
+ /* Transmit character */
+ STR r1, [r2]
+
+__asc_char_exit:
+ LDR r0, =0
+ BX lr
\ No newline at end of file
diff --git a/core/arch/arm32/plat-orly2/conf.mk b/core/arch/arm32/plat-orly2/conf.mk
new file mode 100644
index 00000000000..893faae617a
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/conf.mk
@@ -0,0 +1,69 @@
+CROSS_PREFIX ?= armv7-linux
+CROSS_COMPILE ?= $(CROSS_PREFIX)-
+include mk/gcc.mk
+
+platform-cpuarch = cortex-a9
+platform-cflags = -mcpu=$(platform-cpuarch) -mthumb
+platform-cflags += -pipe -mthumb-interwork -mlong-calls
+platform-cflags += -fno-short-enums -mno-apcs-float -fno-common
+platform-aflags = -mcpu=$(platform-cpuarch)
+core-platform-cppflags = -I$(arch-dir)/include
+core-platform-cppflags += -DNUM_THREADS=2
+core-platform-cppflags += -DWITH_STACK_CANARIES=1
+user_ta-platform-cflags = -fpie
+
+DEBUG ?= 1
+ifeq ($(DEBUG),1)
+platform-cflags += -O0
+else
+platform-cflags += -Os
+endif
+platform-cflags += -g
+platform-aflags += -g
+
+core-platform-subdirs += \
+ $(addprefix $(arch-dir)/, kernel mm sm tee sta) $(platform-dir)
+
+libutil_with_isoc := y
+
+include mk/config.mk
+include $(platform-dir)/system_config.in
+
+core-platform-cppflags += -DCFG_TEE_CORE_NB_CORE=$(CFG_TEE_CORE_NB_CORE)
+
+CFG_TEE_CORE_EMBED_INTERNAL_TESTS?=1
+core-platform-cppflags += \
+ -DCFG_TEE_CORE_EMBED_INTERNAL_TESTS=$(CFG_TEE_CORE_EMBED_INTERNAL_TESTS)
+
+core-platform-cppflags += \
+ -DCFG_DDR_TEETZ_RESERVED_START=$(CFG_DDR_TEETZ_RESERVED_START) \
+ -DCFG_DDR_TEETZ_RESERVED_SIZE=$(CFG_DDR_TEETZ_RESERVED_SIZE)
+
+core-platform-cppflags += -DTEE_USE_DLMALLOC
+core-platform-cppflags += -D_USE_SLAPORT_LIB
+
+
+# define flag to support booting from GDB
+core-platform-cppflags += -DCONFIG_TEE_GDB_BOOT
+core-platform-cppflags += -DCFG_NO_TA_HASH_SIGN
+
+core-platform-cppflags += -DSTACK_TMP_SIZE=$(STACK_TMP_SIZE)
+core-platform-cppflags += -DSTACK_ABT_SIZE=$(STACK_ABT_SIZE)
+core-platform-cppflags += -DSTACK_THREAD_SIZE=$(STACK_THREAD_SIZE)
+
+ifdef DDR_PHYS_START
+core-platform-cppflags += -DCFG_DDR_START=$(DDR_PHYS_START)
+core-platform-cppflags += -DCFG_DDR_SIZE=$(DDR_SIZE)
+endif
+ifdef DDR1_PHYS_START
+core-platform-cppflags += -DCFG_DDR1_START=$(DDR1_PHYS_START)
+core-platform-cppflags += -DCFG_DDR1_SIZE=$(DDR1_SIZE)
+endif
+
+
+PRIMARY_STARTUP_PHYS = \
+ 0x$(shell grep stext $(platform-dir)/System.map | grep -v _stext | \
+ cut -d' ' -f 1)
+SECONDARY_STARTUP_PHYS = \
+ 0x$(shell grep stm_secondary_startup $(platform-dir)/System.map | \
+ cut -d' ' -f 1)
diff --git a/core/arch/arm32/plat-orly2/core_bootcfg.c b/core/arch/arm32/plat-orly2/core_bootcfg.c
new file mode 100644
index 00000000000..9e14799e4bc
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/core_bootcfg.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include
+
+#ifndef CFG_DDR_TEETZ_RESERVED_START
+#error "TEETZ reserved DDR start address undef: CFG_DDR_TEETZ_RESERVED_START"
+#endif
+#ifndef CFG_DDR_TEETZ_RESERVED_SIZE
+#error "TEETZ reserved DDR siez undefined: CFG_DDR_TEETZ_RESERVED_SIZE"
+#endif
+
+/*
+ * TEE/TZ RAM layout:
+ *
+ * +-----------------------------------------+ <- CFG_DDR_TEETZ_RESERVED_START
+ * | TEETZ private RAM | TEE_RAM | ^
+ * | +--------------------+ |
+ * | | TA_RAM | |
+ * +-----------------------------------------+ | CFG_DDR_TEETZ_RESERVED_SIZE
+ * | | teecore alloc | |
+ * | TEE/TZ and NSec | PUB_RAM --------| |
+ * | shared memory | NSec alloc | |
+ * +-----------------------------------------+ v
+ *
+ * TEE_RAM : 1MByte
+ * PUB_RAM : 1MByte
+ * TA_RAM : all what is left (at least 2MByte !)
+ */
+
+/* define the several memory area sizes */
+#if (CFG_DDR_TEETZ_RESERVED_SIZE < (4 * 1024 * 1024))
+#error "Invalid CFG_DDR_TEETZ_RESERVED_SIZE: at least 4MB expected"
+#endif
+
+#define CFG_PUB_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TEE_RAM_SIZE (1 * 1024 * 1024)
+#define CFG_TA_RAM_SIZE (CFG_DDR_TEETZ_RESERVED_SIZE - \
+ CFG_TEE_RAM_SIZE - CFG_PUB_RAM_SIZE)
+
+/* define the secure/unsecure memory areas */
+#define CFG_DDR_ARMTZ_ONLY_START (CFG_DDR_TEETZ_RESERVED_START)
+#define CFG_DDR_ARMTZ_ONLY_SIZE (CFG_TEE_RAM_SIZE + CFG_TA_RAM_SIZE)
+
+#define CFG_DDR_ARM_ARMTZ_START \
+ (CFG_DDR_ARMTZ_ONLY_START + CFG_DDR_ARMTZ_ONLY_SIZE)
+#define CFG_DDR_ARM_ARMTZ_SIZE (CFG_PUB_RAM_SIZE)
+
+/* define the memory areas (TEE_RAM must start at reserved DDR start addr */
+#define CFG_TEE_RAM_START (CFG_DDR_ARMTZ_ONLY_START)
+#define CFG_TA_RAM_START (CFG_TEE_RAM_START + CFG_TEE_RAM_SIZE)
+#define CFG_PUB_RAM_START (CFG_TA_RAM_START + CFG_TA_RAM_SIZE)
+
+
+/*
+ * define the platform memory Secure layout
+ */
+struct memaccess_area {
+ unsigned long paddr;
+ size_t size;
+};
+#define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
+
+static struct memaccess_area ddr[] = {
+ MEMACCESS_AREA(CFG_DDR_START, CFG_DDR_SIZE),
+#ifdef CFG_DDR1_START
+ MEMACCESS_AREA(CFG_DDR1_START, CFG_DDR1_SIZE),
+#endif
+};
+
+static struct memaccess_area secure_only =
+MEMACCESS_AREA(CFG_DDR_ARMTZ_ONLY_START, CFG_DDR_ARMTZ_ONLY_SIZE);
+
+static struct memaccess_area nsec_shared =
+MEMACCESS_AREA(CFG_DDR_ARM_ARMTZ_START, CFG_DDR_ARM_ARMTZ_SIZE);
+
+/*
+ * buf_inside_area - return true is buffer fits in target area
+ *
+ * @bp: buffer physical address
+ * @bs: buffer size in bytes
+ * @ap: memory physical address
+ * @as: memory size in bytes
+ */
+static bool buf_inside_area(unsigned long bp, size_t bs, unsigned long ap,
+ size_t as)
+{
+ /* not malformed input data */
+ if (((bp + bs - 1) < bp) ||
+ ((ap + as - 1) < ap) ||
+ (bs == 0) ||
+ (as == 0))
+ return false;
+
+ if ((bp < ap) || ((bp + bs) > (ap + as)))
+ return false;
+
+ return true;
+}
+
+/*
+ * buf_overlaps_area - return true is buffer overlaps target area
+ *
+ * @bp: buffer physical address
+ * @bs: buffer size in bytes
+ * @ap: memory physical address
+ * @as: memory size in bytes
+ */
+static bool buf_overlaps_area(unsigned long bp, size_t bs, unsigned long ap,
+ size_t as)
+{
+ /* not malformed input data */
+ if (((bp + bs - 1) < bp) ||
+ ((ap + as - 1) < ap) ||
+ (bs == 0) ||
+ (as == 0))
+ return false;
+
+ if ((bp < ap) || ((bp + bs) > ap))
+ return false;
+
+ if ((bp >= ap) || (bp < (ap + as)))
+ return false;
+
+ return true;
+}
+
+static bool pbuf_is_ddr(unsigned long paddr, size_t size)
+{
+ int i = sizeof(ddr) / sizeof(*ddr);
+
+ while (i--) {
+ if (buf_inside_area(paddr, size, ddr[i].paddr, ddr[i].size))
+ return true;
+ }
+ return false;
+}
+
+static bool pbuf_is_multipurpose(unsigned long paddr, size_t size)
+{
+ if (buf_overlaps_area(paddr, size, secure_only.paddr, secure_only.size))
+ return false;
+ if (buf_overlaps_area(paddr, size, nsec_shared.paddr, nsec_shared.size))
+ return false;
+ if (buf_overlaps_area(paddr, size, nsec_shared.paddr, nsec_shared.size))
+ return false;
+
+ return pbuf_is_ddr(paddr, size);
+}
+
+/*
+ * Wrapper for the platform specific pbuf_is() service.
+ */
+static bool pbuf_is(enum buf_is_attr attr, unsigned long paddr, size_t size)
+{
+ switch (attr) {
+ case CORE_MEM_SEC:
+ if (buf_inside_area
+ (paddr, size, secure_only.paddr, secure_only.size))
+ return true;
+ return false;
+
+ case CORE_MEM_NON_SEC:
+ return buf_inside_area(paddr, size, nsec_shared.paddr,
+ nsec_shared.size);
+
+ case CORE_MEM_MULTPURPOSE:
+ return pbuf_is_multipurpose(paddr, size);
+
+ case CORE_MEM_EXTRAM:
+ return pbuf_is_ddr(paddr, size);
+
+ default:
+ EMSG("unpexted request: attr=%X", attr);
+ return false;
+ }
+}
+
+static struct map_area bootcfg_stih416_memory[] = {
+ { /* teecore execution RAM */
+ .type = MEM_AREA_TEE_RAM,
+ .pa = CFG_TEE_RAM_START, .size = CFG_TEE_RAM_SIZE,
+ .cached = true, .secure = true, .rw = true, .exec = true,
+ },
+
+ { /* teecore TA load/exec RAM - Secure, exec user only! */
+ .type = MEM_AREA_TA_RAM,
+ .pa = CFG_TA_RAM_START, .size = CFG_TA_RAM_SIZE,
+ .cached = true, .secure = true, .rw = true, .exec = false,
+ },
+
+ { /* teecore public RAM - NonSecure, non-exec. */
+ .type = MEM_AREA_NSEC_SHM,
+ .pa = CFG_PUB_RAM_START, .size = SECTION_SIZE,
+ .cached = true, .secure = false, .rw = true, .exec = false,
+ },
+
+ { /* CPU mem map HW registers */
+ .type = MEM_AREA_IO_NSEC,
+ .pa = 0xFFF00000 & ~SECTION_MASK, .size = SECTION_SIZE,
+ .device = true, .secure = true, .rw = true,
+ },
+
+ { /* ASC IP for UART HW tracing */
+ .type = MEM_AREA_IO_NSEC,
+ .pa = (0xFE400000 + 0x00131000) & ~SECTION_MASK, .size = SECTION_SIZE,
+ .device = true, .secure = false, .rw = true,
+ },
+
+ { /* RNG IP for some random support */
+ .type = MEM_AREA_IO_SEC,
+ .pa = (0xFEE80000) & ~SECTION_MASK, .size = SECTION_SIZE,
+ .device = true, .secure = true, .rw = true,
+ },
+
+ {.type = MEM_AREA_NOTYPE}
+};
+
+/*
+ * bootcfg_get_pbuf_is_handler - return the platform specfic pbuf_is
+ */
+unsigned long bootcfg_get_pbuf_is_handler(void)
+{
+ return (unsigned long)pbuf_is;
+}
+
+/*
+ * This routine is called while MMU and core memory management are not init.
+ */
+struct map_area *bootcfg_get_memory(void)
+{
+ struct map_area *map;
+ struct memaccess_area *a, *a2;
+ struct map_area *ret = bootcfg_stih416_memory;
+
+ /* check defined memory access layout */
+ a = (struct memaccess_area *)&secure_only;
+ a2 = (struct memaccess_area *)&nsec_shared;
+ if (buf_overlaps_area(a->paddr, a->size, a2->paddr, a2->size)) {
+ EMSG("invalid memory access configuration: sec/nsec");
+ ret = NULL;
+ }
+ if (ret == NULL)
+ return ret;
+
+ /* check defined mapping (overlapping will be tested later) */
+ map = bootcfg_stih416_memory;
+ while (map->type != MEM_AREA_NOTYPE) {
+ switch (map->type) {
+ case MEM_AREA_TEE_RAM:
+ a = (struct memaccess_area *)&secure_only;
+ if (buf_inside_area
+ (map->pa, map->size, a->paddr, a->size) == false) {
+ EMSG("TEE_RAM does not fit in secure_only");
+ ret = NULL;
+ }
+ break;
+ case MEM_AREA_TA_RAM:
+ a = (struct memaccess_area *)&secure_only;
+ if (buf_inside_area
+ (map->pa, map->size, a->paddr, a->size) == false) {
+ EMSG("TEE_RAM does not fit in secure_only");
+ ret = NULL;
+ }
+ break;
+ case MEM_AREA_NSEC_SHM:
+ a = (struct memaccess_area *)&nsec_shared;
+ if (buf_inside_area
+ (map->pa, map->size, a->paddr, a->size) == false) {
+ EMSG("TEE_RAM does not fit in secure_only");
+ ret = NULL;
+ }
+ break;
+ default:
+ /* other mapped areas are not checked */
+ break;
+ }
+ map++;
+ }
+
+ return ret;
+}
diff --git a/core/arch/arm32/plat-orly2/core_chip.c b/core/arch/arm32/plat-orly2/core_chip.c
new file mode 100644
index 00000000000..6d82709c02d
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/core_chip.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+
+#include
+#include
+#include
+
+/* Cut information for Orly2 */
+/* SYSTEM_STATUS2600 = SAS Device ID :
+ SYSCFGBaseAddress(0xFE830000)+ 0x00000960 */
+/* SYSTEM_STATUS9516 = MPE Device ID :
+ SYSCFGBaseAddress(0xFD690000)+ 0x00000810 */
+/* [31:28] VERSION: Version
+ [27:22] GROUP_ID: Group ID
+ [21:12] DEVICE_ID: Device ID
+ [11:1] MANUFACTURER_ID: Manufacturer ID
+ [0] JTAG_BIT: JTAG b */
+
+/* SYSTEM_STATUS9516 */
+#define CUT_MPE_MAJOR_ADDR 0xfd690810
+#define CUT_MPE_MAJOR_MASK 0xf0000000
+#define CUT_MPE_MAJOR_SHIFT 28
+#define ORLY2_MPE_MASK 0xFFFFFFFE
+#define ORLY2_MPE_VALUE 0x0D44D040
+
+/* SYSTEM_STATUS2600 */
+#define CUT_SAS_MAJOR_ADDR 0xfe830960
+#define CUT_SAS_MAJOR_MASK 0xf0000000
+#define CUT_SAS_MAJOR_SHIFT 28
+#define ORLY2_SAS_MASK 0xFFFFFFFE
+#define ORLY2_SAS_VALUE 0x0D44C040
+
+/* FUSE = MPE SAFMEM : 0xfd6d5000 */
+/* 0x9C: eng_metal_fix_nb<3:0>
+ * => ST Engineering setting. */
+#define CUT_MPE_MINOR_ADDR 0xfd6d509c
+#define CUT_MPE_MINOR_MASK 0xf
+#define CUT_MPE_MINOR_SHIFT 0
+
+uint32_t tee_get_cutid(void)
+{
+ uint32_t sas_major = 0, mpe_minor = 0, mpe_major = 0;
+ uint32_t sas_major_val = 0, mpe_minor_val = 0, mpe_major_val = 0;
+ uint32_t *sas_major_reg, *mpe_minor_reg, *mpe_major_reg;
+ uint32_t result;
+
+ /* Map major and minor registers */
+ mpe_major_reg = tee_mmu_ioremap(CUT_MPE_MAJOR_ADDR, 4);
+ sas_major_reg = tee_mmu_ioremap(CUT_SAS_MAJOR_ADDR, 4);
+ mpe_minor_reg = tee_mmu_ioremap(CUT_MPE_MINOR_ADDR, 4);
+
+ if ((mpe_major_reg != NULL) &&
+ (sas_major_reg != NULL) && (mpe_minor_reg != NULL)) {
+ mpe_major_val = *mpe_major_reg;
+ sas_major_val = *sas_major_reg;
+ mpe_minor_val = *mpe_minor_reg;
+
+ /* Read major revision */
+ mpe_major = ((mpe_major_val & CUT_MPE_MAJOR_MASK) >>
+ CUT_MPE_MAJOR_SHIFT);
+
+ /* Read major revision */
+ sas_major = ((sas_major_val & CUT_SAS_MAJOR_MASK) >>
+ CUT_SAS_MAJOR_SHIFT);
+
+ /* Read minor revision */
+ mpe_minor = ((mpe_minor_val & CUT_MPE_MINOR_MASK) >>
+ CUT_MPE_MINOR_SHIFT);
+ }
+
+ /* Unmap */
+ tee_mmu_iounmap(mpe_major_reg);
+ tee_mmu_iounmap(sas_major_reg);
+ tee_mmu_iounmap(mpe_minor_reg);
+
+ DMSG("mpe_major_reg = 0x%x : 0x%x", (unsigned int)mpe_major_reg,
+ (unsigned int)mpe_major_val);
+ DMSG("sas_major_reg = 0x%x : 0x%x", (unsigned int)sas_major_reg,
+ (unsigned int)sas_major_val);
+ DMSG("mpe_minor_reg = 0x%x : 0x%x", (unsigned int)mpe_minor_reg,
+ (unsigned int)mpe_minor_val);
+
+ /* Return a hex byte where
+ * [31:16] is chip name : 0x416 for orly2
+ * [15:12] is 0
+ * [11: 8] is [A-F] indicating MPE major number
+ * [ 7: 4] is [A-F] indicating SAS major number,
+ * [ 3: 0] is [0-9] indicating MPE minor number */
+ if ((ORLY2_MPE_VALUE == (mpe_major_val & ORLY2_MPE_MASK)) &&
+ (ORLY2_SAS_VALUE == (sas_major_val & ORLY2_SAS_MASK)))
+ result = ORLY2_CUTID_VAL + ((mpe_major * 0x100) +
+ ((sas_major * 0x10) + mpe_minor));
+ else
+ result = 0xFFFFFFFF;
+
+ return result;
+}
diff --git a/core/arch/arm32/plat-orly2/link.mk b/core/arch/arm32/plat-orly2/link.mk
new file mode 100644
index 00000000000..8e7fef233b0
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/link.mk
@@ -0,0 +1,41 @@
+link-out-dir = $(out-dir)/core/
+
+link-script = $(platform-dir)/tz-template.lds
+link-script-pp = $(link-out-dir)/tz.lds
+
+all: $(link-out-dir)tee.elf $(link-out-dir)tee.dmp $(link-out-dir)tee.bin
+all: $(link-out-dir)tee.symb_sizes
+cleanfiles += $(link-out-dir)tee.elf $(link-out-dir)tee.dmp $(link-out-dir)tee.map
+cleanfiles += $(link-out-dir)tee.bin
+cleanfiles += $(link-out-dir)tee.symb_sizes
+cleanfiles += $(link-script-pp)
+
+link-ldflags = $(LDFLAGS)
+link-ldflags += -T $(link-script-pp) -Map=$(link-out-dir)tee.map
+link-ldflags += --sort-section=alignment
+
+link-ldadd = $(LDADD)
+link-ldadd += $(libfiles)
+ldargs-tee.elf := $(link-ldflags) $(objs) $(link-ldadd) $(libgcc)
+
+
+$(link-script-pp): $(link-script) $(MAKEFILE_LIST)
+ @echo PP $<
+ $(q)sed -e "s/%in_TEE_SCATTER_START%/$(TEE_SCATTER_START)/g" < $< > $@
+
+
+$(link-out-dir)tee.elf: $(objs) $(libdeps) $(link-script-pp)
+ @echo LD $@
+ $(q)$(LD) $(ldargs-tee.elf) -o $@
+
+$(link-out-dir)tee.dmp: $(link-out-dir)tee.elf
+ @echo OBJDUMP $@
+ $(q)$(OBJDUMP) -l -x -d $< > $@
+
+$(link-out-dir)tee.bin: $(link-out-dir)tee.elf
+ @echo OBJCOPY $@
+ $(q)$(OBJCOPY) -O binary $< $@
+
+$(link-out-dir)tee.symb_sizes: $(link-out-dir)tee.elf
+ @echo Symb sizes $@
+ $(q)$(NM) --print-size --reverse-sort --size-sort $< > $@
diff --git a/core/arch/arm32/plat-orly2/main.c b/core/arch/arm32/plat-orly2/main.c
new file mode 100644
index 00000000000..009fb3d79b7
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/main.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#ifdef WITH_STACK_CANARIES
+#define STACK_CANARY_SIZE (4 * sizeof(uint32_t))
+#define START_CANARY_VALUE 0xdededede
+#define END_CANARY_VALUE 0xabababab
+#define GET_START_CANARY(name, stack_num) name[stack_num][0]
+#define GET_END_CANARY(name, stack_num) \
+ name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
+#else
+#define STACK_CANARY_SIZE 0
+#endif
+
+#define STACK_ALIGNMENT 8
+
+#define DECLARE_STACK(name, num_stacks, stack_size) \
+ static uint32_t name[num_stacks][(stack_size + STACK_CANARY_SIZE) / \
+ sizeof(uint32_t)] \
+ __attribute__((section(".bss.prebss.stack"), \
+ aligned(STACK_ALIGNMENT)))
+
+#define GET_STACK(stack) \
+ ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2)
+
+
+DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE);
+DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE);
+DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE);
+DECLARE_STACK(stack_thread, NUM_THREADS, STACK_THREAD_SIZE);
+
+const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = {
+ GET_STACK(stack_tmp[0]),
+#if CFG_TEE_CORE_NB_CORE > 1
+ GET_STACK(stack_tmp[1]),
+#endif
+#if CFG_TEE_CORE_NB_CORE > 2
+ GET_STACK(stack_tmp[2]),
+#endif
+#if CFG_TEE_CORE_NB_CORE > 3
+ GET_STACK(stack_tmp[3]),
+#endif
+#if CFG_TEE_CORE_NB_CORE > 4
+#error "Top of tmp stacks aren't defined for more than 4 CPUS"
+#endif
+};
+
+static void main_fiq(void);
+static void main_tee_entry(struct thread_smc_args *args);
+
+static void init_canaries(void)
+{
+ size_t n;
+#define INIT_CANARY(name) \
+ for (n = 0; n < ARRAY_SIZE(name); n++) { \
+ uint32_t *start_canary = &GET_START_CANARY(name, n); \
+ uint32_t *end_canary = &GET_END_CANARY(name, n); \
+ \
+ *start_canary = START_CANARY_VALUE; \
+ *end_canary = END_CANARY_VALUE; \
+ }
+
+ INIT_CANARY(stack_tmp);
+ INIT_CANARY(stack_abt);
+ INIT_CANARY(stack_sm);
+ INIT_CANARY(stack_thread);
+}
+
+void check_canaries(void)
+{
+#ifdef WITH_STACK_CANARIES
+ size_t n;
+
+#define ASSERT_STACK_CANARIES(name) \
+ for (n = 0; n < ARRAY_SIZE(name); n++) { \
+ assert(GET_START_CANARY(name, n) == START_CANARY_VALUE);\
+ assert(GET_END_CANARY(name, n) == END_CANARY_VALUE); \
+ } while (0)
+
+ ASSERT_STACK_CANARIES(stack_tmp);
+ ASSERT_STACK_CANARIES(stack_abt);
+ ASSERT_STACK_CANARIES(stack_sm);
+ ASSERT_STACK_CANARIES(stack_thread);
+#endif /*WITH_STACK_CANARIES*/
+}
+
+static const struct thread_handlers handlers = {
+ .stdcall = main_tee_entry,
+ .fastcall = main_tee_entry,
+ .fiq = main_fiq,
+ .svc = NULL, /* XXX currently using hardcod svc handler */
+ .abort = tee_pager_abort_handler
+};
+
+void main_init(uint32_t nsec_entry); /* called from assembly only */
+void main_init(uint32_t nsec_entry)
+{
+ struct sm_nsec_ctx *nsec_ctx;
+ size_t pos = get_core_pos();
+
+ /*
+ * Mask IRQ and FIQ before switch to the thread vector as the
+ * thread handler requires IRQ and FIQ to be masked while executing
+ * with the temporary stack. The thread subsystem also asserts that
+ * IRQ is blocked when using most if its functions.
+ */
+ write_cpsr(read_cpsr() | CPSR_F | CPSR_I);
+
+ if (pos == 0) {
+ size_t n;
+
+ /* Initialize canries around the stacks */
+ init_canaries();
+
+ /* Assign the thread stacks */
+ for (n = 0; n < NUM_THREADS; n++) {
+ if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
+ panic();
+ }
+ }
+
+ if (!thread_init_stack(THREAD_TMP_STACK, GET_STACK(stack_tmp[pos])))
+ panic();
+ if (!thread_init_stack(THREAD_ABT_STACK, GET_STACK(stack_abt[pos])))
+ panic();
+
+ thread_init_handlers(&handlers);
+
+ /* Initialize secure monitor */
+ sm_init(GET_STACK(stack_sm[pos]));
+ nsec_ctx = sm_get_nsec_ctx();
+ nsec_ctx->mon_lr = nsec_entry;
+ nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
+}
+
+static void main_fiq(void)
+{
+ panic();
+}
+
+
+static void main_tee_entry(struct thread_smc_args *args)
+{
+ /*
+ * This function first catches all ST specific SMC functions
+ * if none matches, the generic tee_entry is called.
+ */
+
+ /* TODO move to main_init() */
+ if (init_teecore() != TEE_SUCCESS)
+ panic();
+
+ if (args->a0 == TEESMC32_ST_FASTCALL_GET_SHM_CONFIG) {
+ args->a0 = TEESMC_RETURN_OK;
+ args->a1 = default_nsec_shm_paddr;
+ args->a2 = default_nsec_shm_size;
+ /* Should this be TEESMC cache attributes instead? */
+ args->a3 = core_mmu_is_shm_cached();
+ return;
+ }
+
+ if (args->a0 == TEESMC32_ST_FASTCALL_L2CC_MUTEX) {
+ switch (args->a1) {
+ case TEESMC_ST_L2CC_MUTEX_GET_ADDR:
+ case TEESMC_ST_L2CC_MUTEX_SET_ADDR:
+ case TEESMC_ST_L2CC_MUTEX_ENABLE:
+ case TEESMC_ST_L2CC_MUTEX_DISABLE:
+ /* TODO call the appropriate internal functions */
+ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
+ return;
+ default:
+ args->a0 = TEESMC_RETURN_EBADCMD;
+ return;
+ }
+ }
+
+ tee_entry(args);
+}
+
+
+/* Override weak function in tee/entry.c */
+void tee_entry_get_api_call_count(struct thread_smc_args *args)
+{
+ args->a0 = tee_entry_generic_get_api_call_count() + 2;
+}
+
+/* Override weak function in tee/entry.c */
+void tee_entry_get_api_uuid(struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_ST_UID_R0;
+ args->a1 = TEESMC_ST_UID_R1;
+ args->a2 = TEESMC_ST_UID_R2;
+ args->a3 = TEESMC_ST_UID32_R3;
+}
+
+/* Override weak function in tee/entry.c */
+void tee_entry_get_api_revision(struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_ST_REVISION_MAJOR;
+ args->a1 = TEESMC_ST_REVISION_MINOR;
+}
+
+/* Override weak function in tee/entry.c */
+void tee_entry_get_os_uuid(struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_OS_OPTEE_UUID_R0;
+ args->a1 = TEESMC_OS_OPTEE_UUID_R1;
+ args->a2 = TEESMC_OS_OPTEE_UUID_R2;
+ args->a3 = TEESMC_OS_OPTEE_UUID_R3;
+}
+
+/* Override weak function in tee/entry.c */
+void tee_entry_get_os_revision(struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_OS_OPTEE_REVISION_MAJOR;
+ args->a1 = TEESMC_OS_OPTEE_REVISION_MINOR;
+}
diff --git a/core/arch/arm32/plat-orly2/rng_support.c b/core/arch/arm32/plat-orly2/rng_support.c
new file mode 100644
index 00000000000..e3ebe2d8d31
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/rng_support.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+/* Use the RNG of the HW on this platform */
+#define USE_RNG_HW
+
+#define USE_FULLY_RNG_HW_IMP 0
+#define USE_SW_DELAY 0
+
+/*
+ * if a HW issue is detected, infinite loop is started until valid data are
+ * available.
+ * - User-side timeout is expected to detect the issue.
+ * else error is logged and 0x00 is returned
+ */
+#define USE_USER_TIMEOUT 1
+
+#ifdef USE_RNG_HW
+
+/*
+ * Base address of the RNG on Orly-2, taken from document
+ * http://wave.st.com/chd/SOC_HW_Design/default.aspx
+ * SOC HW Design > ORLY > ORLY2_cut1.0 > SoC_Reg_Spec
+ * top_mpe42.xls
+ */
+#define RNG_BASE_ADDRESS 0xFEE80000
+
+/* Address of the register to read in the RNG IP */
+#define RNG_VAL (RNG_BASE_ADDRESS + 0x24)
+#define RNG_STATUS (RNG_BASE_ADDRESS + 0x20)
+
+static volatile uint32_t *_p_addr_val = (uint32_t *)RNG_VAL;
+static volatile uint32_t *_p_addr_status = (uint32_t *)RNG_STATUS;
+
+static inline int hwrng_waithost_fifo_full(void)
+{
+ int res = 0;
+ volatile uint32_t status;
+
+ /* Wait HOST FIFO FULL (see rng_fspec_revG_120720.pdf) */
+ do {
+ status = *_p_addr_status;
+ } while ((status & 0x20) != 0x20);
+
+ /* Check STATUS (see rng_fspec_revG_120720.pdf) */
+ if ((status & 0x3) != 0) {
+ EMSG("generated HW random data are not valid");
+ res = -1;
+ }
+
+#if (USE_USER_TIMEOUT == 1)
+ if (res != 0)
+ while (1)
+ ;
+#endif
+
+ return res;
+}
+
+uint8_t hw_get_random_byte(void)
+#if (USE_FULLY_RNG_HW_IMP == 1)
+{
+ /*
+ * Only the HW RNG IP is used to generate the value through the
+ * HOST interface.
+ *
+ * @see the document rng_fspec_revG_120720.pdf for details
+ *
+ * - HOST FIFO size = 8x8b (64b)
+ * - LSB (16b) of the RNG_VAL register allows to read 16b
+ * - bit5 of the RNG_STATUS register allows to known if the HOST
+ * FIFO is full or not.
+ * - bit1,0 of the RNG_STATUS register allows to known if the
+ * data are valid.
+ *
+ * Main principle:
+ * For performance reason, a local SW fifo is used to store the
+ * content of the HOST FIFO (max size = 8bytes). When a random
+ * value is expected, this SW fifo is used to return a stored value.
+ * When the local SW fifo is empty, it is filled with the HOST FIFO
+ * according the following sequence:
+ *
+ * - wait HOST FIFO full
+ * o Indicates that max 8-bytes (64b) are available
+ * o This is mandatory to guarantee that a valid data is
+ * available. No STATUS bit to indicate that the HOST FIFO
+ * is empty is provided.
+ * - check STATUS bits
+ * - update the local SW fifo with the HOST FIFO
+ *
+ * This avoid to wait at each iteration that a valid random value is
+ * available. _LOCAL_FIFO_SIZE indicates the size of the local SW fifo.
+ *
+ */
+
+
+#define _LOCAL_FIFO_SIZE 8 /* min 2, 4, 6, max 8 */
+
+ static uint8_t lfifo[_LOCAL_FIFO_SIZE]; /* local fifo */
+ static int pos = -1;
+
+ static int nbcall; /* debug purpose - 0 is the initial value*/
+
+ volatile uint32_t tmpval[_LOCAL_FIFO_SIZE/2];
+ uint8_t value;
+ int i;
+ int res;
+
+ nbcall++;
+
+ /* Retrieve data from local fifo */
+ if (pos >= 0) {
+ pos++;
+ value = lfifo[pos];
+ if (pos == (_LOCAL_FIFO_SIZE - 1))
+ pos = -1;
+ return value;
+ }
+
+ /* Wait HOST FIFO full */
+ res = hwrng_waithost_fifo_full();
+ if (res < 0)
+ return 0x00;
+
+ /* Read the FIFO according the number of expected element */
+ for (i = 0; i < _LOCAL_FIFO_SIZE / 2; i++) {
+ tmpval[i] = *_p_addr_val & 0xFFFF;
+#if (USE_SW_DELAY == 1)
+ /* Wait 0.667 us (fcpu = 600Mhz -> 400 cycles) @see doc */
+ volatile int ll = 200;
+ while (ll--)
+ ;
+#endif
+ }
+ /* Update the local SW fifo for next request */
+ pos = 0;
+ for (i = 0; i < _LOCAL_FIFO_SIZE / 2; i++) {
+ lfifo[pos] = tmpval[i] & 0xFF;
+ pos++;
+ lfifo[pos] = (tmpval[i] >> 8) & 0xFF;
+ pos++;
+ };
+
+ pos = 0;
+ return lfifo[pos];
+}
+#else /* USE_FULLY_RNG_HW_IMP != 1 */
+{
+ /*
+ * The HW RNG IP is used to generate a seed periodically
+ * (MAX_SOFT_RNG) through the HOST interface.
+ *
+ * @see the document rng_fspec_revG_120720.pdf for details
+ *
+ * - Pseudo SW Random generator is used to generate the random
+ * value.
+ */
+
+ static uint32_t _lcg_state;
+ static uint32_t _nb_soft; /* 0 is the initial value */
+ int res;
+
+#define MAX_SOFT_RNG 512
+
+ static const uint32_t _a = 1664525;
+ static const uint32_t _c = 1013904223;
+
+ if (_nb_soft == 0) {
+ /* Update the seed as a "real" HW random generated number */
+ do {
+ res = hwrng_waithost_fifo_full();
+ if (res < 0)
+ return 0x00;
+ _lcg_state = *_p_addr_val & 0xFFFF;
+ _lcg_state <<= 16;
+
+#if (USE_SW_DELAY == 1)
+ /*
+ * Wait 0.667 us (fcpu = 600Mhz -> 400 cycles)
+ * @see doc
+ */
+ volatile int ll = 200;
+ while (ll--)
+ ;
+#endif
+ _lcg_state |= *_p_addr_val & 0xFFFF;
+ } while (_lcg_state == 0);
+ }
+ _nb_soft = (_nb_soft + 1) % MAX_SOFT_RNG;
+ _lcg_state = (_a * _lcg_state + _c);
+
+ return (uint8_t) (_lcg_state >> 24);
+}
+#endif
+
+#else
+/* Software version. Comes from the compiler */
+uint8_t hw_get_random_byte(void)
+{
+ static uint8_t value = 1;
+ static uint32_t ite; /* 0 is the initial value */
+
+ ite++;
+ srand(ite);
+ value = (256 * ((double)rand() / RAND_MAX));
+ /* AMSG("SW Random value = 0x%02x", value); */
+ return value;
+}
+
+#endif
+
diff --git a/core/arch/arm32/plat-orly2/sub.mk b/core/arch/arm32/plat-orly2/sub.mk
new file mode 100644
index 00000000000..b177fa460cd
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/sub.mk
@@ -0,0 +1,17 @@
+srcs-y += tee_common_otp.c
+cflags-tee_common_otp.c-y += -Wno-unused-parameter
+
+srcs-y += core_bootcfg.c
+srcs-y += core_chip.c
+srcs-y += rng_support.c
+
+srcs-y += asc.S
+srcs-y += tz_a9init.S
+srcs-y += main.c
+
+srcs-y += tz_sinit.S
+aflags-tz_sinit.S-y += \
+ -Xassembler --defsym \
+ -Xassembler STM_SECONDARY_STARTUP=$(SECONDARY_STARTUP_PHYS) \
+ -Xassembler --defsym \
+ -Xassembler STEXT=$(PRIMARY_STARTUP_PHYS)
diff --git a/core/arch/arm32/plat-orly2/system_config.in b/core/arch/arm32/plat-orly2/system_config.in
new file mode 100644
index 00000000000..00302fc96ec
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/system_config.in
@@ -0,0 +1,40 @@
+# this script defines platform specficis settings for TEE armv7 build
+
+DDR_PHYS_START := 0x40000000
+DDR_SIZE := 0x40000000
+
+DDR1_PHYS_START := 0x80000000
+DDR1_SIZE := 0x40000000
+
+
+# Hard coded NB cores
+CFG_TEE_CORE_NB_CORE := 2
+
+# Static configuration of DDR reserved to TEE/TZ
+# Provide here only start address and size. TEEcore will adapt.
+# Allow these settings to be overridden.
+CFG_DDR_TEETZ_RESERVED_START ?= 0xBF800000
+CFG_DDR_TEETZ_RESERVED_SIZE ?= 0x00800000
+
+TEE_SCATTER_START := $(CFG_DDR_TEETZ_RESERVED_START)
+
+
+# Plaform/Project/Board specfic static configuration
+#
+ifeq ($(BUILD_FOR_ANDROID),true)
+
+# STDroid setup
+LINUX_PAGE_OFFSET := 0x80000000
+LINUX_LOAD_ADDR := 0x70000000
+
+else
+
+# SDK2 setup
+LINUX_PAGE_OFFSET := 0x40000000
+LINUX_LOAD_ADDR := 0x40000000
+
+endif
+
+STACK_TMP_SIZE := 1024 # TODO minimize
+STACK_ABT_SIZE := 1024
+STACK_THREAD_SIZE := 8192
diff --git a/core/arch/arm32/plat-orly2/tee_common_otp.c b/core/arch/arm32/plat-orly2/tee_common_otp.c
new file mode 100644
index 00000000000..9b4705daa79
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/tee_common_otp.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+
+#define SHA256_HASH_SIZE 32
+uint8_t hw_key_digest[SHA256_HASH_SIZE];
+
+/*---------------------------------------------------------------------------*/
+/* tee_otp_get_hw_unique_key */
+/*---------------------------------------------------------------------------*/
+/*
+ This function reads out a hw unique key.
+
+ \param[in] hwkey data place holder for the key data read
+ \param[out] None.
+ \return None.
+
+ */
+/*---------------------------------------------------------------------------*/
+void tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey)
+{
+ /* Copy the first part of the new hw key */
+ memcpy(&hwkey->data[0], &hw_key_digest[0],
+ sizeof(struct tee_hw_unique_key));
+}
+
+int tee_otp_get_die_id(uint8_t *buffer, size_t len)
+{
+ size_t i;
+
+ char pattern[4] = { 'B', 'E', 'E', 'F' };
+ for (i = 0; i < len; i++)
+ buffer[i] = pattern[i % 4];
+
+ return 0;
+}
diff --git a/core/arch/arm32/plat-orly2/tz-template.lds b/core/arch/arm32/plat-orly2/tz-template.lds
new file mode 100644
index 00000000000..ac49b0df8c4
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/tz-template.lds
@@ -0,0 +1,62 @@
+OUTPUT_ARCH(arm)
+ENTRY(tz_sinit)
+
+MEMORY
+{
+ /* 1 MByte is allocated for teecore execution */
+ EXEC_MEM (rw) : ORIGIN = (%in_TEE_SCATTER_START% + 0x00000000), LENGTH = 0x000D0000
+ STACKS_MEM (rw) : ORIGIN = (%in_TEE_SCATTER_START% + 0x000D0000), LENGTH = 0x00010000
+ CTX_MEM (rw) : ORIGIN = (%in_TEE_SCATTER_START% + 0x000E0000), LENGTH = 0x00010000
+ MMU_MEM (rw) : ORIGIN = (%in_TEE_SCATTER_START% + 0x000F0000), LENGTH = 0x00010000
+}
+
+SECTIONS
+{
+ .teecore_exec :
+ {
+ *(.vector_table)
+ *(.text); *(.text.*)
+ *(.rodata); *(.rodata.*)
+ *(.got); *(.got.*)
+ *(.data); *(.data.*)
+
+ __start_ta_head_section = . ;
+ *(ta_head_section)
+ __stop_ta_head_section = . ;
+
+ __bss_start = (.) ;
+ *(.bss)
+ __bss_end = (.) ;
+
+ *(COMMON)
+
+ } > EXEC_MEM
+
+ .stacks :
+ {
+ __stacks_start = (.);
+ *(.bss.prebss.stack)
+ } > STACKS_MEM
+
+ /* NSec and Secure context storage (TODO: move in C-code) */
+ .context_backup :
+ {
+ PUB_CONTEXT_BACKUP = (. + 0x000); /* Public context backup area */
+ SEC_CONTEXT_BACKUP = (. + 0x400); /* Secure context backup area */
+ } > CTX_MEM
+
+ /* MMU pages tables (TODO: should we keep them cachable?) */
+ .pagestables :
+ {
+ SEC_MMU_TTB_FLD = (.); /* MMU L1 table for teecore: 16kB */
+ SEC_MMU_TTB_SLD = (. + 0x4000); /* MMU L2 tables for teecore: 16*1kB (16MB mappeable) */
+ SEC_TA_MMU_TTB_FLD = (. + 0x8000); /* MMU L1 table for TAs: 16kB */
+ SEC_TA_MMU_TTB_SLD = (. + 0xC000); /* MMU L2 tables for TAs:: 16*1kB (16MB mappeable) */
+ SEC_MMU_TTB_END = (. + 0x10000); /* end of teecore and TAs MMU tables */
+ } > MMU_MEM
+}
+
+ /* 64kB aligned heap start between BSS and secure stacks */
+ PROVIDE(teecore_heap_start = __bss_end);
+ PROVIDE(teecore_heap_end = __stacks_start);
+
diff --git a/core/arch/arm32/plat-orly2/tz_a9init.S b/core/arch/arm32/plat-orly2/tz_a9init.S
new file mode 100644
index 00000000000..1dad99c787e
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/tz_a9init.S
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Entry points for the A9 inits, A9 revision specific or not.
+ * It is assume no stack is available when these routines are called.
+ * It is assume each routine is called with return address in LR
+ * and with ARM registers R0, R1, R2, R3 being scratchable.
+ */
+
+.global arm_secboot_identify_cpu
+.global arm_secboot_early
+.global arm_secboot_errata
+.global arm_cl2_config
+.global arm_cl2_enable
+.global arm_secboot_late
+
+#define CPUID_A9_R2P2_H 0x412f
+#define CPUID_A9_R2P2_L 0xc092
+
+#define CPUID_A9_R3P0_H 0x413f
+#define CPUID_A9_R3P0_L 0xc090
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * arm_secboot_identify_cpu - identify and save CPU version
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_secboot_identify_cpu
+arm_secboot_identify_cpu:
+
+ mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
+ movw r1, #CPUID_A9_R2P2_L
+ movt r1, #CPUID_A9_R2P2_H
+ cmp r0, r1
+ beq _ident_a9_r2p2
+ movw r1, #CPUID_A9_R3P0_L
+ movt r1, #CPUID_A9_R3P0_H
+ cmp r0, r1
+ beq _ident_a9_r3p0
+ b . /* TODO: unknown id: reset? log? */
+
+_ident_a9_r2p2:
+ /* unsupported version. TODO: needs to be supported */
+ b . /* TODO: unknown id: reset? log? */
+
+_ident_a9_r3p0:
+ mov pc, lr /* back to tzinit */
+
+.endfunc
+
+/*
+ * Memory Cache Level2 Configuration Function
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_cl2_config
+arm_cl2_config:
+
+ mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
+ movw r1, #CPUID_A9_R3P0_L
+ movt r1, #CPUID_A9_R3P0_H
+ cmp r0, r1
+ beq _config_l2cc_r3p0
+ b . /* TODO: unknown id: reset? log? */
+
+_config_l2cc_r3p0:
+ /*
+ * reg1_tag_ram_control (cache_l2x0.c)
+ * bit[10:8]:1 - 2 cycle of write accesses latency
+ * bit[6:4]:1 - 2 cycle of read accesses latency
+ * bit[2:0]:1 - 2 cycle of setup latency
+ */
+ movw r0, #0x2108
+ movt r0, #0xFFFE
+ ldr r2, [r0]
+ movw r1, #0xf888
+ movt r1, #0xffff
+ and r2,r2,r1
+ movw r1, #0xf999
+ movt r1, #0xffff
+ orr r2,r2,r1
+ str r2, [r0]
+
+ /*
+ * reg1_data_ram_control (cache_l2x0.c)
+ * bit[10:8]:2 - 3 cycle of write accesses latency
+ * bit[6:4]:2 - 3 cycle of read accesses latency
+ * bit[2:0]:2 - 3 cycle of setup latency
+ */
+ movw r0, #0x210C
+ movt r0, #0xFFFE
+ ldr r2, [r0]
+ movw r1, #0xf888
+ movt r1, #0xffff
+ and r2,r2,r1
+ movw r1, #0xfaaa
+ movt r1, #0xffff
+ orr r2,r2,r1
+ str r2, [r0]
+
+ /*
+ * reg1_aux_control
+ */
+ movw r0, #0x2104
+ movt r0, #0xFFFE
+ movw r1, #0x0801
+ movt r1, #0x3C48
+ str r1, [r0]
+
+ /*
+ * reg15_prefetch_ctrl
+ */
+ movw r0, #0x2F60
+ movt r0, #0xFFFE
+ movw r1, #0x0000
+ movt r1, #0x3100
+ str r1, [r0]
+
+ /*
+ * reg15_power_ctrl
+ */
+ movw r0, #0x2F80
+ movt r0, #0xFFFE
+ movw r1, #0x0003
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /* invalidate all cache ways PL310_BASE + invalidate by way offset = 0xFFFE2000 + 0x77C) */
+ movw r0, #0x277C
+ movt r0, #0xFFFE
+ movw r1, #0x00FF
+ movt r1, #0x0000
+ str r1, [r0]
+
+ mov pc, lr
+
+.endfunc
+/* End of arm_cl2_config */
+
+
+/*
+ * Memory Cache Level2 Enable Function
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_cl2_enable
+arm_cl2_enable:
+
+ /* Enable L2 ctrl (PL310_BASE + L2x0_CTRL = 0xFFFE2000 + 0x100) --> only set lsb bit */
+ movw r0, #0x2100
+ movt r0, #0xFFFE
+ movw r1, #0x0001
+ movt r1, #0x0000
+ str r1, [r0]
+
+ mov pc, lr
+
+.endfunc
+/* End of arm_cl2_enable */
+
+/*
+ * Cortex A9 configuration early configuration
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_secboot_early
+arm_secboot_early:
+
+ /* only r3p0 is supported */
+ mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
+ movw r1, #CPUID_A9_R3P0_L
+ movt r1, #CPUID_A9_R3P0_H
+ cmp r0, r1
+ beq _early_a9_r3p0
+ b . /* TODO: unknown id: reset? log? */
+
+_early_a9_r3p0:
+ /*
+ * Register SCTLR
+ * RR Bit[14]=1
+ */
+ movw r0, #0x4000
+ movt r0, #0x0000
+ mcr p15, 0, r0, c1, c0, 0
+
+ /*
+ * Register ACTLR
+ * FW Bit[0]=1
+ * WFLZ Bit[3]=1
+ * SMP Bit[6]=1
+ */
+ movw r0, #0x0049
+ movt r0, #0x0000
+ mcr p15, 0, r0, c1, c0, 1
+
+ /*
+ * Register NSACR
+ * TL Bit[17]=1
+ * CPU11 Bit[11]=1
+ * CPU10 Bit[10]=1
+ */
+ movw r0, #0x0C00
+ movt r0, #0x0002
+ mcr p15, 0, r0, c1, c1, 2
+
+ /*
+ * Register PCR
+ * ECG Bit[0]=1
+ */
+ movw r0, #0x0000
+ movt r0, #0x0001
+ mcr p15, 0, r0, c15, c0, 0
+
+ /*
+ * GIC configuration
+ */
+ /*
+ * Register ICDISR0
+ */
+ movw r0, #0x1080
+ movt r0, #0xFFFE
+ mov r1, #0xFFFFFFFF
+ str r1, [r0]
+
+ /*
+ * Register ICCPMR
+ */
+ movw r0, #0x0104
+ movt r0, #0xFFFE
+ mov r1, #0xFFFFFFFF
+ str r1, [r0]
+
+ mov pc, lr /* back to tzinit */
+
+.endfunc
+
+/*
+ * arm_secboot_errata - arm errata, specific per core revision
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_secboot_errata
+arm_secboot_errata:
+
+ mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
+ movw r1, #CPUID_A9_R2P2_L
+ movt r1, #CPUID_A9_R2P2_H
+ cmp r0, r1
+ beq _errata_a9_r2p2
+ movw r1, #CPUID_A9_R3P0_L
+ movt r1, #CPUID_A9_R3P0_H
+ cmp r0, r1
+ beq _errata_a9_r3p0
+ b . /* TODO: unknown id: reset? log? */
+
+_errata_a9_r2p2:
+ /* unsupported version. TODO: needs to be supported */
+ b . /* TODO: unknown id: reset? log? */
+
+_errata_a9_r3p0:
+ mov pc, lr
+
+.endfunc
+
+/*
+ * A9 secured config, needed only from a single core
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage.
+ * LR store return address.
+ * Trap CPU in case of error.
+ */
+.func arm_secboot_late
+arm_secboot_late:
+
+ /*
+ * Snoop Control Unit configuration
+ *
+ * SCU is enabled with filtering off.
+ * Both Secure/Unsecure can access SCU and timers
+ *
+ * 0x00 SCUControl = 0x00000060 !!! should be 0x5 ! A NETTOYER !!!!!!!!!!!!!!!!!!!!!!!!!
+ * 0x04 SCUConfiguration = ??? A NETTOYER !!!!!!!!!!!!!!!!!!!!!!!!!
+ * 0x0C SCUInvalidateAll (Secure cfg)
+ * 0x40 FilteringStartAddress = 0x40000000
+ * 0x44 FilteeringEndAddress - 0x80000000
+ * 0x50 SCUAccessControl
+ * 0x54 SCUSecureAccessControl
+ */
+
+ /*
+ * Register SAC: both secure CPU access SCU
+ */
+ movw r0, #0x0050 /* LSB */
+ movt r0, #0xFFFE /* MSB */
+ movw r1, #0x0003
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /*
+ * Register SNSAC: both nonsec cpu access SCU, private and global timer
+ */
+ movw r0, #0x0054 /* LSB */
+ movt r0, #0xFFFE /* MSB */
+ movw r1, #0x0333
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /*
+ * Register SFEA
+ */
+ movw r0, #0x0044 /* LSB */
+ movt r0, #0xFFFE /* MSB */
+ movw r1, #0x0000
+ movt r1, #0x8000
+ str r1, [r0]
+
+ /*
+ * Register SFSA
+ */
+ movw r0, #0x0040 /* LSB */
+ movt r0, #0xFFFE /* MSB */
+ movw r1, #0x0000
+ movt r1, #0x4000
+ str r1, [r0]
+
+ /*
+ * Register SCU_CTRL
+ */
+ movw r0, #0x0000 /* LSB */
+ movt r0, #0xFFFE /* MSB */
+ movw r1, #0x0065
+ movt r1, #0x0000
+ str r1, [r0]
+
+ /*- GIC secure configuration ---*/
+
+ /*
+ * Register ICDISR[0-31]
+ */
+ movw r0, #0x1084
+ movt r0, #0xFFFE
+ mov r2, #0xFFFFFFFF
+ mov r1, #31 /* Nb of loop rounds */
+loop_1:
+ str r2, [r0]
+ add r0, #4
+ sub r1, r1, #1
+ cmp r1, #0
+ bne loop_1
+
+
+ /*- L2 Memory Controller (Note: should be done with NS=1) ---*/
+
+ /*
+ * reg12_addr_filtering_end
+ */
+ movw r0, #0x2C04
+ movt r0, #0xFFFE
+ movw r1, #0x0000
+ movt r1, #0x8000
+ str r1, [r0]
+
+ /*
+ * reg12_addr_filtering_start
+ */
+ movw r0, #0x2C00
+ movt r0, #0xFFFE
+ movw r1, #0x0001
+ movt r1, #0x4000
+ str r1, [r0]
+
+ /* Allow NSec to manage FIQ/Imprecise abort */
+ mrc p15, 0, r0, c1, c1, 0 /* read Secure Configuration Register */
+ orr r0, r0, #0x30 /* SCR[FW]=1, SCR[AW]=1 */
+ mcr p15, 0, r0, c1, c1, 0 /* write updated value in Secure Configuration Register */
+
+ mov pc, lr
+
+.endfunc
+
diff --git a/core/arch/arm32/plat-orly2/tz_sinit.S b/core/arch/arm32/plat-orly2/tz_sinit.S
new file mode 100644
index 00000000000..f25b675995a
--- /dev/null
+++ b/core/arch/arm32/plat-orly2/tz_sinit.S
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Include(s)
+ */
+#include
+#include
+
+/*
+ * Booting on 1 or 2 cores ?
+ * - currently teecore boot on both cores. It expected 2 cores and synchronise
+ * between the 2 cores execution during boot sequence, mainly to enable
+ * mmu and cache in a coherent way.
+ *
+ * - If FORCE_UNIPROC is set (internal switch, below), once both core are
+ * booted, only the primary CPU will branch to the next boot stage.
+ */
+
+/*
+ * Booting linux
+ * -------------
+ *
+ * Current tz init routine is designed to init Secure Mode/Monitor, switch to
+ * NSec world and boot a linux kernel image pre-loaded in RAM.
+ *
+ * 1) jumping into linux entries
+ *
+ * physical address of linux entry routine =
+ * virtual address of the linux entry routine read from vmlinux
+ * - linux configuration value for CONFIG_PAGE_OFFSET
+ * + physical address where linux is loaded by bootloader or GDB
+ *
+ * PRIMARY_STARTUP_PHYS is set by build env and defines linux boot CPU physical entry.
+ * SECONDARY_STARTUP_PHYS is set by build env and defines linux econdary CPUs
+ * entry physical address.
+ *
+ * 2) linux boot arguments (CONFIG_TEE_GDB_BOOT)
+ *
+ * In current setup (GDB boot), when here, DDR is ready, linux and tee images
+ * are loaded in DDR, linux boot arguments are stored in DDR. CPU0 register
+ * R0 = 0, R1 = linux machine ID, R2 = devtree/atags address.
+ *
+ * Since the current GDB boot, the content of registers R0, R1, and R2 should
+ * be preserved (backed-up) for restore once Secure inits are done and CPUs
+ * jump to linux kernel entry in Nsec mode.
+ * To do so, save R0/R1/R2 to R10/R11/R12, which are preserved by all
+ * execution code until a stack is setup for the inits, for CPU0.
+ */
+
+/* Image */
+.equ stext, STEXT
+.equ stm_secondary_startup, STM_SECONDARY_STARTUP
+
+/* Global Functions */
+.global tz_sinit
+
+
+/*
+ * Semaphore to let primary CPU runs its inits
+ * before secondary CPUs run their own inits
+ */
+.equ SEM_NOT_READY, 2
+.equ SEM_CPU0_READY, 4
+.equ SEM_CPU1_READY, 8
+
+.section .data
+.balign 4
+
+sem_cpu_sync:
+ .word SEM_NOT_READY;
+
+#ifdef CONFIG_TEE_GDB_BOOT
+gdb_bootargs:
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+#endif
+
+ .section .vector_table
+ .align 5
+ .code 32
+
+initial_vector:
+ b tz_sinit
+ b . /* Undef */
+ b . /* Syscall */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b . /* FIQ */
+
+/*
+ * Vector Table Initialization Function
+ *
+ * Use scratables registers R0-R3.
+ * No stack usage. stack is setup by this routine
+ * LR store return address.
+ */
+.section .text
+.code 32
+.balign 4
+
+secure_init_vector_table:
+ /* Set initial vector table base registers (VBAR and MVBAR) */
+ ldr r0, =initial_vector
+
+ /* write VBAR (Secure Mode Vector Base Address Register) */
+ mcr p15, 0, r0, c12, c0, 0
+
+ /* write MVBAR (Monitor Vector Base Address Register) */
+ mcr p15, 0, r0, c12, c0, 1
+
+ bx lr
+/* End of secure_init_vector_table */
+
+
+/*
+ * TrustZone Initialization Function
+ */
+.section .text
+.code 32
+.balign 4
+
+tz_sinit:
+
+#ifdef CONFIG_TEE_GDB_BOOT
+ /* boot preloaded linux: save linux boot args from GDB */
+ mov r10, r0
+ mov r11, r1
+ mov r12, r2
+ /* all following routines, until stack is setup, preserve R10/R11/R12 */
+#endif
+
+ /*
+ * Primary CPU and secondary CPUs internal initialization
+ */
+ bl arm_secboot_identify_cpu
+ bl arm_secboot_early
+ bl arm_secboot_errata
+ bl secure_init_vector_table
+
+ /* Setup tmp stack */
+ bl get_core_pos
+ lsl r0, #2
+ ldr r1, =stack_tmp_top
+ ldr sp, [r1, r0]
+
+ /*
+ * Case Primary CPU: proceed common cfg
+ * Case secondary CPUs: wait CPU0 is done
+ */
+ bl secure_get_cpu_id
+ cmp r0, #CPU_ID0
+ bne _BootCPU1
+
+_BootCPU0:
+
+ /* complete ARM secure MP common configuration */
+ bl arm_secboot_late
+
+#ifdef CONFIG_TEE_GDB_BOOT
+ ldr r0, =gdb_bootargs
+ str r10, [r0, #0]
+ str r11, [r0, #4]
+ str r12, [r0, #8]
+#endif
+
+ /* Cache/MMU Initialization */
+ bl arm_cl2_config
+ bl arm_cl1_d_invbysetway
+ bl arm_cl2_invbyway
+ bl arm_cl2_enable
+
+ bl secure_mmu_init
+ bl mmu_enable
+ bl mmu_enable_icache
+ bl mmu_enable_dcache
+
+ /* init BSS */
+init_bss:
+ ldr r0, =__bss_start
+ ldr r2, =__bss_end
+ sub r2, r2, r0
+ ldr r1, =0
+ bl memset
+
+ /* Initialize thread handling and secure monitor */
+ ldr r0, =stext
+ bl main_init
+
+ /* release secondary CPUs */
+ ldr r0,=sem_cpu_sync
+ ldr r1, =SEM_CPU0_READY
+ str r1, [r0]
+ dsb
+
+ /* Flush all caches before secondary CPUs setup */
+ bl arm_cl1_d_cleaninvbysetway
+ bl arm_cl2_cleaninvbyway
+ bl arm_cl1_d_cleaninvbysetway
+
+ sev
+
+ /* Primary CPU waits secondary */
+ ldr r0,=sem_cpu_sync
+ mov r2, #SEM_CPU1_READY
+_wait_cpu1:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne _wait_cpu1
+
+ /* TODO: call teecore inits */
+
+ /* ask monitor to enter NSec from TEE boot sequence */
+#ifdef CONFIG_TEE_GDB_BOOT
+ /* restore linux boot arguments */
+ ldr r4, =gdb_bootargs
+ ldr r0, [r4, #0]
+ ldr r1, [r4, #4]
+ ldr r2, [r4, #8]
+#endif
+
+ smc #0
+ b thread_recv_smc_call
+
+_BootCPU1:
+
+ /* secondary CPUs wait that primary boot CPU have made the MMU/bss inits */
+ ldr r0,=sem_cpu_sync
+ mov r2, #SEM_CPU0_READY
+_wait_cpu0:
+ ldr r1, [r0]
+ cmp r1, r2
+ wfene
+ bne _wait_cpu0
+
+ bl arm_cl1_d_cleaninvbysetway
+ bl arm_cl2_cleaninvbyway
+
+ bl secure_mmu_init
+ bl mmu_enable
+
+ bl mmu_enable_icache
+ bl mmu_enable_dcache
+
+ /* syncrhonise with CPU0 */
+ ldr r0,=sem_cpu_sync
+ ldr r1, =SEM_CPU1_READY
+ str r1, [r0]
+ dsb
+ sev
+
+ /* Comment out next line to force booting NSec on 1 core only */
+//#define FORCE_UNIPROC
+#ifdef FORCE_UNIPROC
+ b .
+#endif
+
+ /* Initialize thread handling and secure monitor */
+ ldr r0, =stm_secondary_startup
+ bl main_init
+
+ smc #0
+ b thread_recv_smc_call
+
+/* End of tz_sinit */
diff --git a/core/arch/arm32/sm/sm.c b/core/arch/arm32/sm/sm.c
new file mode 100644
index 00000000000..835fa2fb951
--- /dev/null
+++ b/core/arch/arm32/sm/sm.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+
+#include
+
+#include
+
+
+static struct sm_nsec_ctx sm_nsec_ctx[CFG_TEE_CORE_NB_CORE];
+static struct sm_sec_ctx sm_sec_ctx[CFG_TEE_CORE_NB_CORE];
+
+struct sm_nsec_ctx *sm_get_nsec_ctx(void)
+{
+ return &sm_nsec_ctx[get_core_pos()];
+}
+
+struct sm_sec_ctx *sm_get_sec_ctx(void)
+{
+ return &sm_sec_ctx[get_core_pos()];
+}
diff --git a/core/arch/arm32/sm/sm_asm.S b/core/arch/arm32/sm/sm_asm.S
new file mode 100644
index 00000000000..6747159e8a0
--- /dev/null
+++ b/core/arch/arm32/sm/sm_asm.S
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+
+LOCAL_FUNC sm_save_modes_regs , :
+ /* User mode registers has to be saved from system mode */
+ cps #CPSR_MODE_SYS
+ stm r0!, {sp, lr}
+
+ cps #CPSR_MODE_IRQ
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_SVC
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_ABT
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_UND
+ mrs r2, spsr
+ stm r0!, {r2, sp, lr}
+
+ cps #CPSR_MODE_MON
+ ldm r1, {r2-r3} /* Load SPSR and LR from the stack */
+ stm r0!, {r2-r3} /* Store SPSR and LR in context */
+ bx lr
+END_FUNC sm_save_modes_regs
+
+/* Restores the mode specific registers */
+LOCAL_FUNC sm_restore_modes_regs , :
+ /* User mode registers has to be saved from system mode */
+ cps #CPSR_MODE_SYS
+ ldm r0!, {sp, lr}
+
+ cps #CPSR_MODE_IRQ
+ ldm r0!, {r2, sp, lr}
+ msr spsr, r2
+
+ cps #CPSR_MODE_SVC
+ ldm r0!, {r2, sp, lr}
+ msr spsr, r2
+
+ cps #CPSR_MODE_ABT
+ ldm r0!, {r2, sp, lr}
+ msr spsr, r2
+
+ cps #CPSR_MODE_UND
+ ldm r0!, {r2, sp, lr}
+ msr spsr, r2
+
+ cps #CPSR_MODE_MON
+ ldm r0!, {r2-r3} /* Load SPSR and LR from context */
+ stm r1, {r2-r3} /* Store SPSR and LR in stack */
+ bx lr
+END_FUNC sm_restore_modes_regs
+
+LOCAL_FUNC sm_smc_entry , :
+ srsdb sp!, #CPSR_MODE_MON
+ push {r0-r3}
+/* Positions relative to stack pointer */
+#define SMC_ENTRY_R0R3_OFFS 0
+#define SMC_ENTRY_SRS_OFFS (4 * 4 + SMC_ENTRY_R0R3_OFFS)
+
+ /* Clear the exclusive monitor */
+ clrex
+
+ /* Find out if we're doing an secure or non-secure entry */
+ read_scr r1
+ tst r1, #SCR_NS
+ bne .smc_ret_to_sec
+
+.smc_ret_to_nsec:
+ /* Save secure context */
+ bl sm_get_sec_ctx
+ add r1, sp, #SMC_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_save_modes_regs
+
+ /* Restore non-secure context */
+ bl sm_get_nsec_ctx
+ add r1, sp, #SMC_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_restore_modes_regs
+ ldm r0!, {r4-r12}
+
+ /* Update SCR */
+ read_scr r0
+ orr r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
+ write_scr r0
+
+ b .smc_exit
+
+.smc_ret_to_sec:
+ bic r1, r1, #(SCR_NS | SCR_FIQ)/* Clear NS and FIQ bit in SCR */
+ write_scr r1
+
+ /* Save non-secure context */
+ push {r12, lr}
+ bl sm_get_nsec_ctx
+ pop {r12, lr}
+ add r1, sp, #SMC_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_save_modes_regs
+ stm r0!, {r4-r12}
+
+ /* Restore secure context */
+ bl sm_get_sec_ctx
+ add r1, sp, #SMC_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_restore_modes_regs
+
+.smc_exit:
+ pop {r0-r3}
+ rfefd sp!
+END_FUNC sm_smc_entry
+
+/*
+ * FIQ handling
+ *
+ * Saves CPU context in per core structure sm_pre_fiq_ctx which
+ * later will be restored in the smc handler when handling a return
+ * from FIQ.
+ */
+LOCAL_FUNC sm_fiq_entry , :
+ /* FIQ has a +4 offset for lr compared to preferred return address */
+ sub lr, lr, #4
+ srsdb sp!, #CPSR_MODE_MON
+ push {r0-r3}
+/* Positions relative to stack pointer */
+#define FIQ_ENTRY_R0R3_OFFS 0
+#define FIQ_ENTRY_SRS_OFFS (4 * 4 + SMC_ENTRY_R0R3_OFFS)
+
+ /* Update SCR */
+ read_scr r1
+ bic r1, r1, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
+ write_scr r1
+
+ /* Save non-secure context */
+ push {r12, lr}
+ bl sm_get_nsec_ctx
+ pop {r12, lr}
+ add r1, sp, #FIQ_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_save_modes_regs
+ stm r0!, {r4-r12}
+
+ /* Restore secure context */
+ bl sm_get_sec_ctx
+ add r1, sp, #FIQ_ENTRY_SRS_OFFS /* Where srsdb wrote */
+ bl sm_restore_modes_regs
+
+ /*
+ * FIQ handling (return from TEESMC_CALL_HANDLE_FIQ) has to return
+ * supplied R1-R4 in R0-R3 since R0-R3 aren't saved anywhere else.
+ */
+ pop {r1-r4} /* R0-R3 pushed at entry */
+ ldr r0, =TEESMC32_CALL_HANDLE_FIQ /* Let Trusted OS handle FIQ */
+
+ rfefd sp!
+END_FUNC sm_fiq_entry
+
+ .align 5
+LOCAL_FUNC sm_vect_table , :
+ b . /* Reset */
+ b . /* Undefined instruction */
+ b sm_smc_entry /* Secure monitor call */
+ b . /* Prefetch abort */
+ b . /* Data abort */
+ b . /* Reserved */
+ b . /* IRQ */
+ b sm_fiq_entry /* FIQ */
+END_FUNC sm_vect_table
+
+/* void sm_init(vaddr_t stack_pointer); */
+FUNC sm_init , :
+ push {r0, lr}
+
+ /* Set monitor stack */
+ mrs r1, cpsr
+ cps #CPSR_MODE_MON
+ mov sp, r0
+ msr cpsr, r1
+
+ /* Set monitor vector (MVBAR) */
+ ldr r0, =sm_vect_table
+ write_mvbar r0
+
+ pop {r0, pc}
+END_FUNC sm_init
diff --git a/core/arch/arm32/sm/sub.mk b/core/arch/arm32/sm/sub.mk
new file mode 100644
index 00000000000..a4f055f2727
--- /dev/null
+++ b/core/arch/arm32/sm/sub.mk
@@ -0,0 +1,5 @@
+srcs-y += tee_mon.c
+cflags-tee_mon.c-y += -Wno-unused-parameter
+
+srcs-y += sm_asm.S
+srcs-y += sm.c
diff --git a/core/arch/arm32/sm/tee_mon.c b/core/arch/arm32/sm/tee_mon.c
new file mode 100644
index 00000000000..c67cc6d764e
--- /dev/null
+++ b/core/arch/arm32/sm/tee_mon.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include /* required for inits */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+
+#ifndef WITH_UART_DRV
+#include
+#endif
+
+#define TEE_MON_MAX_NUM_ARGS 8
+
+TEE_Result init_teecore(void)
+{
+ static int is_first = 1;
+ unsigned long a, s;
+
+ /* (DEBUG) for inits at 1st TEE service: when UART is setup */
+ if (!is_first)
+ return TEE_SUCCESS;
+ is_first = 0;
+
+#ifndef WITH_UART_DRV
+ /* UART tracing support */
+ asc_init();
+ IMSG("teecore: uart trace init");
+#endif
+
+ /* core malloc pool init */
+#ifdef CFG_TEE_MALLOC_START
+ a = CFG_TEE_MALLOC_START;
+ s = CFG_TEE_MALLOC_SIZE;
+#else
+ a = (unsigned long)&teecore_heap_start;
+ s = (unsigned long)&teecore_heap_end;
+ a = ((a + 1) & ~0x0FFFF) + 0x10000; /* 64kB aligned */
+ s = s & ~0x0FFFF; /* 64kB aligned */
+ s = s - a;
+#endif
+ IMSG("teecore heap: paddr=0x%lX size=0x%lX (%ldkB)", a, s, s / 1024);
+ malloc_init((void *)a, s);
+
+ /* init support for futur mapping of TAs */
+ tee_mmu_kmap_init();
+ teecore_init_ta_ram();
+ teecore_init_pub_ram();
+
+ /* Libtomcrypt initialization */
+ tee_ltc_init();
+
+ IMSG("teecore inits done");
+ return TEE_SUCCESS;
+}
diff --git a/core/arch/arm32/sta/core_dirty_tests.c b/core/arch/arm32/sta/core_dirty_tests.c
new file mode 100644
index 00000000000..44e4d5c6dc4
--- /dev/null
+++ b/core/arch/arm32/sta/core_dirty_tests.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include
+
+#include "core_dirty_tests.h"
+
+/*
+ * Enable expect LOG macro to enable/disable dirty tests traces.
+ *
+ * #define LOG SMSG
+ * #define LOG(...)
+ */
+#define LOG(...)
+
+static int dirty_test_division(void);
+static int dirty_test_malloc(void);
+
+/* exported entry points for some basic test */
+TEE_Result core_dirty_tests(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS])
+{
+ (void)&nParamTypes;
+ (void)&pParams;
+
+ if (dirty_test_division() || dirty_test_malloc()) {
+ EMSG("some dirty_test_xxx failed! you should enable local LOG");
+ return TEE_ERROR_GENERIC;
+ }
+ return TEE_SUCCESS;
+}
+
+/* test division support. resulting trace shall be manually checked */
+static int dirty_test_division(void)
+{
+ signed a, b, c, d;
+ bool r;
+ int ret = 0;
+
+ LOG("");
+ LOG("division tests (division and modulo):");
+ /* get some unpredicted values to prevent compilation optimizations: */
+ /* => use the stack address */
+
+ LOG("- test with unsigned small integers:");
+ a = (signed)((unsigned)(&a) & 0xFFFFF);
+ b = (signed)((unsigned)(&b) & 0x00FFF) + 1;
+ c = a / b;
+ d = a % b;
+ r = ((b * c + d) == a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with signed small integers, negative numerator:");
+ a = (signed)(&a);
+ b = (signed)((unsigned)(&b) & 0x00FFF) - 1;
+ c = a / b;
+ d = a % b;
+ r = ((b * c + d) == a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %d / %d = %d = 0x%x)",
+ (unsigned)a, (unsigned)b, (signed)a, (signed)b, (signed)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %d %% %d = %d = 0x%x)", (unsigned)a,
+ (unsigned)b, (signed)a, (signed)b, (signed)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with signed small integers, negative denominator:");
+ a = (signed)((unsigned)(&a) & 0xFFFFF);
+ b = -(signed)((unsigned)(&b) & 0x00FFF) + 1;
+ c = a / b;
+ d = a % b;
+
+ LOG("- test with unsigned integers, big numerator (> 0x80000000):");
+ a = (signed)(&a);
+ b = (signed)((unsigned)(&b) & 0x00FFF) + 1;
+ c = (signed)((unsigned)a / (unsigned)b);
+ d = (signed)((unsigned)a % (unsigned)b);
+ r = (((unsigned)b * (unsigned)c + (unsigned)d) == (unsigned)a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ LOG("- test with unsigned integers, big num. & denom. (> 0x80000000):");
+ a = (signed)(&a);
+ b = (signed)((unsigned)(&a) - 1);
+ c = (signed)((unsigned)a / (unsigned)b);
+ d = (signed)((unsigned)a % (unsigned)b);
+ r = (((unsigned)b * (unsigned)c + (unsigned)d) == (unsigned)a);
+ if (!r)
+ ret = -1;
+ LOG(" 0x%08x / 0x%08x = %u / %u = %u = 0x%x)",
+ (unsigned)a, (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)c,
+ (unsigned)c);
+ LOG(" 0x%08x %% 0x%08x = %u %% %u = %u = 0x%x)", (unsigned)a,
+ (unsigned)b, (unsigned)a, (unsigned)b, (unsigned)d, (unsigned)d);
+ LOG(" check results => %s", r ? "ok" : "FAILED !!!");
+ LOG("");
+
+ return ret;
+}
+
+/* test malloc support. resulting trace shall be manually checked */
+static int dirty_test_malloc(void)
+{
+ char *p1 = NULL, *p2 = NULL;
+ int *p3 = NULL, *p4 = NULL;
+ bool r;
+ int ret = 0;
+
+ LOG("malloc tests (malloc, free, calloc, realloc, memalign):");
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ /* test malloc */
+ p1 = malloc(1024);
+ LOG("- p1 = malloc(1024)");
+ p2 = malloc(1024);
+ LOG("- p2 = malloc(1024)");
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ r = (p1 && p2);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+
+ /* test realloc */
+ p1 = realloc(p1, 3 * 1024);
+ LOG("- p1 = realloc(p1, 3*1024)");
+ LOG("- free p2");
+ free(p2);
+ p2 = malloc(1024);
+ LOG("- p2 = malloc(1024)");
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ r = (p1 && p2);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p1, p2");
+ free(p1);
+ free(p2);
+ p1 = NULL;
+ p2 = NULL;
+
+ /* test calloc */
+ p3 = calloc(0x10, 1024);
+ p4 = calloc(0x100, 1024 * 1024);
+ LOG("- p3 = calloc(0x100, 1024)");
+ LOG("- p4 = calloc(0x100, 1024*1024) too big: should fail!");
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ r = (p3 && !p4);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p3, p4");
+ free(p3);
+ free(p4);
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test memalign */
+ p3 = memalign(0x10000, 1024);
+ LOG("- p3 = memalign(%d, 1024)", 0x10000);
+ p1 = malloc(1024);
+ LOG("- p1 = malloc(1024)");
+ p4 = memalign(0x10000, 1024);
+ LOG("- p4 = memalign(%d, 1024)", 0x10000);
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ r = (p1 && p3 && p4);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p1, p3, p4");
+ free(p1);
+ free(p3);
+ free(p4);
+ p1 = NULL;
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test memalign with invalid alignments */
+ p3 = memalign(100, 1024);
+ LOG("- p3 = memalign(%d, 1024)", 100);
+ p4 = memalign(0, 1024);
+ LOG("- p4 = memalign(%d, 1024)", 0);
+ LOG(" p1=%p p2=%p p3=%p p4=%p", p1, p2, p3, p4);
+ r = (!p3 && !p4);
+ if (!r)
+ ret = -1;
+ LOG(" => test %s", r ? "ok" : "FAILED");
+ LOG("");
+ LOG("- free p3, p4");
+ free(p3);
+ free(p4);
+ p3 = NULL;
+ p4 = NULL;
+
+ /* test free(NULL) */
+ LOG("- free NULL");
+ free(NULL);
+ LOG("");
+ LOG("malloc test done");
+
+ return ret;
+}
diff --git a/core/arch/arm32/sta/core_dirty_tests.h b/core/arch/arm32/sta/core_dirty_tests.h
new file mode 100644
index 00000000000..0a9cc7b155c
--- /dev/null
+++ b/core/arch/arm32/sta/core_dirty_tests.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CORE_DIRTY_TESTS_H
+#define CORE_DIRTY_TESTS_H
+
+#include
+#include
+
+/* basic run-time tests */
+TEE_Result core_dirty_tests(uint32_t nParamTypes,
+ TEE_Param pParams[TEE_NUM_PARAMS]);
+
+#endif /*CORE_DIRTY_TESTS_H*/
diff --git a/core/arch/arm32/sta/sta_helloworld.c b/core/arch/arm32/sta/sta_helloworld.c
new file mode 100644
index 00000000000..b49ae454bfc
--- /dev/null
+++ b/core/arch/arm32/sta/sta_helloworld.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+#include
+#include
+#include "core_dirty_tests.h"
+
+#define TA_NAME "sta_helloworld.ta"
+
+#define STA_HELLOWORLD_UUID \
+ { 0xd96a5b40, 0xc3e5, 0x21e3, \
+ { 0x87, 0x94, 0x10, 0x02, 0xa5, 0xd5, 0xc6, 0x1b } }
+
+#define CMD_TRACE 0
+#define CMD_PARAMS 1
+#define CMD_DIRTY_TESTS 2
+
+static TEE_Result test_trace(uint32_t param_types, TEE_Param params[4])
+{
+ IMSG("static TA \"%s\" says \"Hello world !\"", TA_NAME);
+
+ return TEE_SUCCESS;
+}
+
+/*
+ * Supported tests on parameters
+ * (I, J, K, L refer to param index)
+ *
+ * Case 1: command parameters type are: 1 in/out value, 3 empty.
+ * => process outI.a = inI.a + inI.b
+ * Case 2: command parameters type are: 3 input value, 1 output value
+ * => process = outI.a = inJ.a + inK.a + inL.a
+ * Case 3: command parameters type are: 1 in/out memref, 3 empty.
+ * => process = outI[0] = sum(inI[0..len-1])
+ */
+static TEE_Result test_entry_params(uint32_t type, TEE_Param p[4])
+{
+ size_t i;
+ uint8_t d8, *in;
+
+ /* case 1a: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[0].value.a = p[0].value.a + p[0].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1b: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[1].value.a = p[1].value.a + p[1].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1c: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ p[2].value.a = p[2].value.a + p[2].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 1d: 1 input/output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INOUT)) {
+ p[3].value.a = p[3].value.a + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[0].value.a = p[1].value.a + p[2].value.a + p[3].value.a;
+ p[0].value.b = p[1].value.b + p[2].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[1].value.a = p[0].value.a + p[2].value.a + p[3].value.a;
+ p[1].value.b = p[0].value.b + p[2].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_OUTPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_INPUT)) {
+ p[2].value.a = p[0].value.a + p[1].value.a + p[3].value.a;
+ p[2].value.b = p[0].value.b + p[1].value.b + p[3].value.b;
+ return TEE_SUCCESS;
+ }
+ /* case 2a: 3 input value arguments, 1 output value argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_VALUE_INPUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_VALUE_OUTPUT)) {
+ p[3].value.a = p[0].value.a + p[1].value.a + p[2].value.a;
+ p[3].value.b = p[0].value.b + p[1].value.b + p[2].value.b;
+ return TEE_SUCCESS;
+ }
+
+ DMSG("expect memref params: 0x%X/%d - 0x%X/%d - 0x%X/%d - 0x%X/%d",
+ p[0].memref.buffer, p[0].memref.size,
+ p[1].memref.buffer, p[1].memref.size,
+ p[2].memref.buffer, p[2].memref.size,
+ p[3].memref.buffer, p[3].memref.size);
+
+ /* case 3a: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[0].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[0].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[0].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3b: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[1].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[1].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[1].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3c: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_MEMREF_INOUT) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_NONE)) {
+ in = (uint8_t *)p[2].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[2].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[2].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+ /* case 3d: 1 in/out memref argument */
+ if ((TEE_PARAM_TYPE_GET(type, 0) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 1) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 2) == TEE_PARAM_TYPE_NONE) &&
+ (TEE_PARAM_TYPE_GET(type, 3) == TEE_PARAM_TYPE_MEMREF_INOUT)) {
+ in = (uint8_t *)p[3].memref.buffer;
+ d8 = 0;
+ for (i = 0; i < p[3].memref.size; i++)
+ d8 += in[i];
+ *(uint8_t *)p[3].memref.buffer = d8;
+ return TEE_SUCCESS;
+ }
+
+ EMSG("unexpected parameters");
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+/*
+ * Trusted Application Entry Points
+ */
+
+static TEE_Result create_ta(void)
+{
+ DMSG("create entry point for static ta \"%s\"", TA_NAME);
+ return TEE_SUCCESS;
+}
+
+static void destroy_ta(void)
+{
+ DMSG("destroy entry point for static ta \"%s\"", TA_NAME);
+}
+
+static TEE_Result open_session(uint32_t nParamTypes, TEE_Param pParams[4],
+ void **ppSessionContext)
+{
+ DMSG("open entry point for static ta \"%s\"", TA_NAME);
+ return TEE_SUCCESS;
+}
+
+static void close_session(void *pSessionContext)
+{
+ DMSG("close entry point for static ta \"%s\"", TA_NAME);
+}
+
+static TEE_Result invoke_command(void *pSessionContext, uint32_t nCommandID,
+ uint32_t nParamTypes, TEE_Param pParams[4])
+{
+ DMSG("command entry point for static ta \"%s\"", TA_NAME);
+
+ switch (nCommandID) {
+ case CMD_TRACE:
+ return test_trace(nParamTypes, pParams);
+ case CMD_PARAMS:
+ return test_entry_params(nParamTypes, pParams);
+ case CMD_DIRTY_TESTS:
+ return core_dirty_tests(nParamTypes, pParams);
+ default:
+ break;
+ }
+ return TEE_ERROR_BAD_PARAMETERS;
+}
+
+__attribute__ ((section("ta_head_section")))
+ const ta_static_head_t sta_helloworld_head = {
+
+ .uuid = STA_HELLOWORLD_UUID,
+ .name = (char *)TA_NAME,
+ .create_entry_point = create_ta,
+ .destroy_entry_point = destroy_ta,
+ .open_session_entry_point = open_session,
+ .close_session_entry_point = close_session,
+ .invoke_command_entry_point = invoke_command,
+
+};
diff --git a/core/arch/arm32/sta/sub.mk b/core/arch/arm32/sta/sub.mk
new file mode 100644
index 00000000000..a1a2c4b2890
--- /dev/null
+++ b/core/arch/arm32/sta/sub.mk
@@ -0,0 +1,6 @@
+srcs-y += sta_helloworld.c
+cflags-sta_helloworld.c-y += -Wno-unused-parameter
+
+srcs-y += core_dirty_tests.c
+cflags-core_dirty_tests.c-y += -Wno-format
+cflags-core_dirty_tests.c-y += -Wno-format-nonliteral -Wno-format-security
diff --git a/core/arch/arm32/tee/entry.c b/core/arch/arm32/tee/entry.c
new file mode 100644
index 00000000000..710ee134f0f
--- /dev/null
+++ b/core/arch/arm32/tee/entry.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#define SHM_CACHE_ATTRS \
+ (core_mmu_is_shm_cached() ? \
+ (TEESMC_ATTR_CACHE_DEFAULT << TEESMC_ATTR_CACHE_SHIFT) : 0 )
+
+static bool copy_in_params(const struct teesmc32_param *params,
+ uint32_t num_params, uint32_t *param_types,
+ uint32_t param_attr[TEE_NUM_PARAMS],
+ TEE_Param tee_params[TEE_NUM_PARAMS])
+{
+ size_t n;
+ uint8_t pt[4];
+
+ *param_types = 0;
+
+ if (num_params > TEE_NUM_PARAMS)
+ return false;
+
+ for (n = 0; n < num_params; n++) {
+ if (params[n].attr & TEESMC_ATTR_META)
+ return false;
+
+ pt[n] = params[n].attr & TEESMC_ATTR_TYPE_MASK;
+
+ param_attr[n] = (params[n].attr >> TEESMC_ATTR_CACHE_SHIFT) &
+ TEESMC_ATTR_CACHE_MASK;
+
+ if ((params[n].attr & TEESMC_ATTR_TYPE_MASK) ==
+ TEESMC_ATTR_TYPE_NONE) {
+ tee_params[n].value.a = 0;
+ tee_params[n].value.b = 0;
+ } else {
+ tee_params[n].value.a = params[n].u.value.a;
+ tee_params[n].value.b = params[n].u.value.b;
+ }
+ }
+ for (; n < TEE_NUM_PARAMS; n++) {
+ pt[n] = TEE_PARAM_TYPE_NONE;
+ param_attr[n] = 0;
+ tee_params[n].value.a = 0;
+ tee_params[n].value.b = 0;
+ }
+
+ *param_types = TEE_PARAM_TYPES(pt[0], pt[1], pt[2], pt[3]);
+
+ return true;
+}
+
+static void copy_out_param(const TEE_Param tee_params[TEE_NUM_PARAMS],
+ uint32_t param_types, uint32_t num_params,
+ struct teesmc32_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ switch (TEE_PARAM_TYPE_GET(param_types, n)) {
+ case TEE_PARAM_TYPE_MEMREF_OUTPUT:
+ case TEE_PARAM_TYPE_MEMREF_INOUT:
+ params[n].u.memref.size = tee_params[n].memref.size;
+ break;
+ case TEE_PARAM_TYPE_VALUE_OUTPUT:
+ case TEE_PARAM_TYPE_VALUE_INOUT:
+ params[n].u.value.a = tee_params[n].value.a;
+ params[n].u.value.b = tee_params[n].value.b;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * Extracts mandatory parameter for open session.
+ *
+ * Returns
+ * false : mandatory parameter wasn't found or malformatted
+ * true : paramater found and OK
+ */
+static bool get_open_session_meta(struct teesmc32_arg *arg32,
+ uint32_t num_params, size_t *num_meta,
+ struct teesmc_meta_open_session **meta)
+{
+ struct teesmc32_param *params = TEESMC32_GET_PARAMS(arg32);
+ uint32_t phmeta;
+ const uint8_t req_attr = TEESMC_ATTR_META |
+ TEESMC_ATTR_TYPE_MEMREF_INPUT |
+ SHM_CACHE_ATTRS;
+
+ if (num_params < (*num_meta + 1))
+ return false;
+
+ if (params[*num_meta].attr != req_attr)
+ return false;
+
+ if (params[*num_meta].u.memref.size !=
+ sizeof(struct teesmc_meta_open_session))
+ return false;
+
+ phmeta = params[*num_meta].u.memref.buf_ptr;
+ if (!tee_pbuf_is_non_sec(phmeta,
+ sizeof(struct teesmc_meta_open_session)))
+ return false;
+
+ if (core_pa2va(phmeta, (void *)meta))
+ return false;
+
+ (*num_meta)++;
+ return true;
+}
+
+/*
+ * Extracts optional pointer to a Trusted Application.
+ *
+ * Returns
+ * false : malformatted TA parameter
+ * true : if TA parameter wasn't found or if it was found and OK
+ */
+static bool get_open_session_ta(struct teesmc32_arg *arg32, size_t num_params,
+ size_t *num_meta, kta_signed_header_t **ta)
+{
+ struct teesmc32_param *params = TEESMC32_GET_PARAMS(arg32);
+ uint32_t ph;
+ size_t len;
+ const uint8_t req_attr = TEESMC_ATTR_META |
+ TEESMC_ATTR_TYPE_MEMREF_INPUT |
+ SHM_CACHE_ATTRS;
+
+ if (num_params < (*num_meta + 1))
+ return false;
+
+ if (!(params[*num_meta].attr & TEESMC_ATTR_META))
+ return true;
+
+ if (params[*num_meta].attr != req_attr)
+ return false;
+
+ ph = params[*num_meta].u.memref.buf_ptr;
+ if (params[*num_meta].u.memref.size < sizeof(kta_signed_header_t))
+ return false;
+
+ if (!tee_pbuf_is_non_sec(ph, sizeof(kta_signed_header_t)))
+ return false;
+
+ if (core_pa2va(ph, (void *)ta))
+ return false;
+
+ len = (*ta)->size_of_signed_header + (*ta)->size_of_payload;
+ if (params[*num_meta].u.memref.size < len)
+ return false;
+
+ if (!tee_pbuf_is_non_sec(ph, len))
+ return false;
+
+ (*num_meta)++;
+ return true;
+}
+
+static void entry_open_session(struct thread_smc_args *args,
+ struct teesmc32_arg *arg32, uint32_t num_params)
+{
+ struct tee_dispatch_open_session_in in;
+ struct tee_dispatch_open_session_out out;
+ struct teesmc_meta_open_session *meta;
+ struct teesmc32_param *params = TEESMC32_GET_PARAMS(arg32);
+ size_t num_meta = 0;
+
+ if (!get_open_session_meta(arg32, num_params, &num_meta, &meta))
+ goto bad_params;
+
+ in.ta = NULL;
+ if (!get_open_session_ta(arg32, num_params, &num_meta, &in.ta))
+ goto bad_params;
+
+ TEE_COMPILE_TIME_ASSERT(sizeof(TEE_UUID) == TEESMC_UUID_LEN);
+ memcpy(&in.uuid, &meta->uuid, sizeof(TEE_UUID));
+ memcpy(&in.clnt_id.uuid, &meta->clnt_uuid, sizeof(TEE_UUID));
+ in.clnt_id.login = meta->clnt_login;
+
+ if (!copy_in_params(params + num_meta, num_params - num_meta,
+ &in.param_types, in.param_attr, in.params))
+ goto bad_params;
+
+ (void)tee_dispatch_open_session(&in, &out);
+ if (out.msg.res == TEE_STE_ERROR_SYSTEM_BUSY) {
+ args->a0 = TEESMC_RETURN_EBUSY;
+ return;
+ }
+
+ copy_out_param(out.params, in.param_types, num_params - num_meta,
+ params + num_meta);
+
+ arg32->session = (uint32_t)out.sess;
+ arg32->ret = out.msg.res;
+ arg32->ret_origin = out.msg.err;
+ args->a0 = TEESMC_RETURN_OK;
+ return;
+
+bad_params:
+ DMSG("Bad params");
+ arg32->ret = TEE_ERROR_BAD_PARAMETERS;
+ arg32->ret_origin = TEE_ORIGIN_TEE;
+ args->a0 = TEESMC_RETURN_OK;
+}
+
+static void entry_close_session(struct thread_smc_args *args,
+ struct teesmc32_arg *arg32, uint32_t num_params)
+{
+
+ if (num_params == 0) {
+ struct tee_close_session_in in;
+ uint32_t ret;
+
+ in.sess = arg32->session;
+ ret = tee_dispatch_close_session(&in);
+ if (ret == TEE_STE_ERROR_SYSTEM_BUSY) {
+ args->a0 = TEESMC_RETURN_EBUSY;
+ return;
+ }
+ arg32->ret = ret;
+ } else {
+ arg32->ret = TEE_ERROR_BAD_PARAMETERS;
+ }
+
+ arg32->ret_origin = TEE_ORIGIN_TEE;
+ args->a0 = TEESMC_RETURN_OK;
+}
+
+static void entry_invoke_command(struct thread_smc_args *args,
+ struct teesmc32_arg *arg32, uint32_t num_params)
+{
+ struct tee_dispatch_invoke_command_in in;
+ struct tee_dispatch_invoke_command_out out;
+ struct teesmc32_param *params = TEESMC32_GET_PARAMS(arg32);
+
+ if (!copy_in_params(params, num_params,
+ &in.param_types, in.param_attr, in.params)) {
+ arg32->ret = TEE_ERROR_BAD_PARAMETERS;
+ arg32->ret_origin = TEE_ORIGIN_TEE;
+ args->a0 = TEESMC_RETURN_OK;
+ return;
+ }
+
+ in.sess = (TEE_Session *)arg32->session;
+ in.cmd = arg32->ta_func;
+ (void)tee_dispatch_invoke_command(&in, &out);
+ if (out.msg.res == TEE_STE_ERROR_SYSTEM_BUSY) {
+ args->a0 = TEESMC_RETURN_EBUSY;
+ return;
+ }
+
+ copy_out_param(out.params, in.param_types, num_params, params);
+
+ arg32->ret = out.msg.res;
+ arg32->ret_origin = out.msg.err;
+ args->a0 = TEESMC_RETURN_OK;
+}
+
+static void entry_cancel(struct thread_smc_args *args,
+ struct teesmc32_arg *arg32, uint32_t num_params)
+{
+
+ if (num_params == 0) {
+ struct tee_dispatch_cancel_command_in in;
+ struct tee_dispatch_cancel_command_out out;
+
+ in.sess = (TEE_Session *)arg32->session;
+ (void)tee_dispatch_cancel_command(&in, &out);
+
+ if (out.msg.res == TEE_STE_ERROR_SYSTEM_BUSY) {
+ args->a0 = TEESMC_RETURN_EBUSY;
+ return;
+ }
+
+ arg32->ret = out.msg.res;
+ arg32->ret_origin = out.msg.err;
+ } else {
+ arg32->ret = TEE_ERROR_BAD_PARAMETERS;
+ arg32->ret_origin = TEE_ORIGIN_TEE;
+ }
+
+ args->a0 = TEESMC_RETURN_OK;
+}
+
+
+
+static void tee_entry_call_with_arg(struct thread_smc_args *args)
+{
+ struct teesmc32_arg *arg32;
+ uint32_t num_params;
+
+ if (args->a0 != TEESMC32_CALL_WITH_ARG &&
+ args->a0 != TEESMC32_FASTCALL_WITH_ARG) {
+ EMSG("Unknown SMC 0x%x\n", args->a0);
+ DMSG("Expected 0x%x or 0x%x\n",
+ TEESMC32_CALL_WITH_ARG, TEESMC32_FASTCALL_WITH_ARG);
+ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
+ return;
+ }
+
+ if (args->a0 == TEESMC32_CALL_WITH_ARG)
+ thread_set_irq(true); /* Enable IRQ for STD calls */
+
+ if (!tee_pbuf_is_non_sec(args->a1, sizeof(struct teesmc32_arg)) ||
+ !TEE_ALIGNMENT_IS_OK(args->a1, struct teesmc32_arg) ||
+ core_pa2va(args->a1, (void *)&arg32)) {
+ EMSG("Bad arg address 0x%x\n", args->a1);
+ args->a0 = TEESMC_RETURN_EBADADDR;
+ return;
+ }
+
+ num_params = arg32->num_params;
+ if (!tee_pbuf_is_non_sec(args->a1, TEESMC32_GET_ARG_SIZE(num_params))) {
+ EMSG("Bad arg address 0x%x\n", args->a1);
+ args->a0 = TEESMC_RETURN_EBADADDR;
+ return;
+ }
+
+ if (args->a0 == TEESMC32_CALL_WITH_ARG) {
+ switch (arg32->cmd) {
+ case TEESMC_CMD_OPEN_SESSION:
+ entry_open_session(args, arg32, num_params);
+ break;
+ case TEESMC_CMD_CLOSE_SESSION:
+ entry_close_session(args, arg32, num_params);
+ break;
+ case TEESMC_CMD_INVOKE_COMMAND:
+ entry_invoke_command(args, arg32, num_params);
+ break;
+ case TEESMC_CMD_CANCEL:
+ entry_cancel(args, arg32, num_params);
+ break;
+ default:
+ EMSG("Unknown cmd 0x%x\n", arg32->cmd);
+ args->a0 = TEESMC_RETURN_EBADCMD;
+ }
+ } else {
+ EMSG("Unknown fastcall cmd 0x%x\n", arg32->cmd);
+ args->a0 = TEESMC_RETURN_EBADCMD;
+ }
+}
+
+void tee_entry(struct thread_smc_args *args)
+{
+ switch (args->a0) {
+ case TEESMC32_CALLS_COUNT:
+ tee_entry_get_api_call_count(args);
+ break;
+ case TEESMC32_CALLS_UID:
+ tee_entry_get_api_uuid(args);
+ break;
+ case TEESMC32_CALLS_REVISION:
+ tee_entry_get_api_revision(args);
+ break;
+ case TEESMC32_CALL_GET_OS_UUID:
+ tee_entry_get_os_uuid(args);
+ break;
+ case TEESMC32_CALL_GET_OS_REVISION:
+ tee_entry_get_os_revision(args);
+ break;
+ case TEESMC32_CALL_WITH_ARG:
+ case TEESMC64_CALL_WITH_ARG:
+ tee_entry_call_with_arg(args);
+ break;
+ default:
+ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
+ break;
+ }
+}
+
+size_t tee_entry_generic_get_api_call_count(void)
+{
+ /*
+ * All the differnt calls handled in this file. If the specific
+ * target has additional calls it will call this function and
+ * add the number of calls the target has added.
+ */
+ return 7;
+}
+
+void __attribute__((weak)) tee_entry_get_api_call_count(
+ struct thread_smc_args *args)
+{
+ args->a0 = tee_entry_generic_get_api_call_count();
+}
+
+void __attribute__((weak)) tee_entry_get_api_uuid(struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_UID_R0;
+ args->a1 = TEESMC_UID_R1;
+ args->a2 = TEESMC_UID_R2;
+ args->a3 = TEESMC_UID32_R3;
+}
+
+void __attribute__((weak)) tee_entry_get_api_revision(
+ struct thread_smc_args *args)
+{
+ args->a0 = TEESMC_REVISION_MAJOR;
+ args->a1 = TEESMC_REVISION_MINOR;
+}
+
+void __attribute__((weak)) tee_entry_get_os_uuid(struct thread_smc_args *args)
+{
+ /* Not implemented */
+ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
+}
+
+void __attribute__((weak)) tee_entry_get_os_revision(
+ struct thread_smc_args *args)
+{
+ /* Not implemented */
+ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION;
+}
diff --git a/core/arch/arm32/tee/sub.mk b/core/arch/arm32/tee/sub.mk
new file mode 100644
index 00000000000..9b9b940c908
--- /dev/null
+++ b/core/arch/arm32/tee/sub.mk
@@ -0,0 +1,2 @@
+srcs-y += tee_svc_asm.S
+srcs-y += entry.c
diff --git a/core/arch/arm32/tee/tee_svc_asm.S b/core/arch/arm32/tee/tee_svc_asm.S
new file mode 100644
index 00000000000..a4156d496b4
--- /dev/null
+++ b/core/arch/arm32/tee/tee_svc_asm.S
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "tee_syscall_numbers.h"
+
+.global tee_svc_enter_user_mode
+.global tee_svc_sys_return
+.global tee_svc_syscall
+
+.set SPSR_T_BIT, (1 << 5)
+.set SPSR_IT_MASK, 0x0600FC00
+.set SPSR_IT_MASK1, 0x06000000
+.set SPSR_IT_MASK2, 0x0000FC00
+
+.section .text
+.balign 4
+.code 32
+
+/*
+ * Function called from the vector table when a svc exception is received.
+ * This function handles entry and exit from a system call.
+ */
+.func tee_svc_syscall
+tee_svc_syscall:
+ push {r4 - r12, lr}
+ mov r8, sp
+
+ /* Restore IRQ which are disabled on exception entry */
+ push {r0-r3}
+ blx thread_restore_irq
+ pop {r0-r3}
+
+ /*
+ * Copy eventual arguments passed on the user stack.
+ *
+ * r5 holds the address of the first word
+ * r6 holds the number of words
+ *
+ * TODO figure out how to avoid stack overflow because of too much data
+ * passed on the stack.
+ */
+ sub sp, sp, r6, lsl #2
+ cmp r6, #0
+ beq .Lno_args
+ push {r0}
+ push {r1-r3}
+ mov r0, #0
+ mov r2, r5
+ add r1, sp, #(4 * 4)
+ mov r3, r6, lsl #2
+ ldr lr, =tee_svc_copy_from_user
+ blx lr
+
+ /* If copy failed return the error */
+ cmp r0, #0
+ pop {r1-r3}
+ addne sp, sp, #4
+ popeq {r0}
+ bne .Lret
+.Lno_args:
+
+ /*
+ * Find the system call and call the function.
+ *
+ * System call number is passed in r7.
+ */
+ ldr r12, =tee_svc_syscall_table
+ cmp r7, #TEE_SCN_MAX
+ /* Either syscall function should return to cleanup (.Lret) */
+ ldr lr, =.Lret
+ ldrls pc, [r12, r7, lsl #2] /* if valid syscall number */
+ ldr pc, =tee_svc_sys_nocall /* if invalid syscall number */
+.Lret:
+ mov sp, r8
+ pop {r4 - r12, lr}
+ movs pc, lr
+.endfunc
+
+
+
+@ TEE_Result tee_svc_enter_user_mode(
+@ uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
+@ tee_uaddr_t sp, tee_uaddr_t user_func,
+@ uint32_t *panicked, uint32_t *panic_code);
+.func tee_svc_enter_user_mode
+tee_svc_enter_user_mode:
+ /*
+ * Save all registers to allow tee_svc_sys_return() to
+ * resume execution as if this function would have returned.
+ * This is also used in tee_svc_sys_panic().
+ *
+ * If stack usage of this function is changed tee_svc_sys_return()
+ * and tee_svc_sys_panic() has to be updated.
+ */
+ push {r4-r12,lr}
+
+ ldr r4, [sp, #(10 * 0x4)] /* user stack pointer */
+ ldr r5, [sp, #(11 * 0x4)] /* user function */
+
+ /* Save user sp */
+ stmfd sp, {sp}^ /* store r13 user mode */
+ nop
+ sub sp, sp, #4 /* update stack pointer */
+
+ /* Switch from Supervisor mode to System mode */
+ mrs r6, cpsr
+ mrs r7, cpsr
+ orr r6, #0xF
+ msr cpsr, r6
+
+ /* Setup user stack */
+ mov sp, r4
+
+ /* Switch back to Supervisor mode to have a spsr to modify */
+ msr cpsr, r7
+
+ /*
+ * Set the saved Processors Status Register to user mode to allow entry
+ * of user mode through movs below. Also update thumbstate since movs
+ * doesn't do that automatically.
+ */
+ bic r6, #0xF
+ tst r5, #1 /* If it's odd we should switch to thumb mode */
+ orrne r6, #SPSR_T_BIT /* Enable thumb mode */
+ biceq r6, #SPSR_T_BIT /* Disable thumb mode */
+ bicne r6, #SPSR_IT_MASK1 /* Clear IT state for thumb mode */
+ bicne r6, #SPSR_IT_MASK2 /* Clear IT state for thumb mode */
+ msr spsr_cxsf, r6
+
+ /*
+ * Don't allow return from this function, return is done through
+ * tee_svc_sys_return() below.
+ */
+ mov lr, #0
+ /* Call the user function with its arguments */
+ movs pc, r5
+.endfunc
+
+@ tee_svc_sys_return(uint32_t ret, uint32_t param_types, void *params);
+.func tee_svc_sys_return
+tee_svc_sys_return:
+ mov sp, r8 /* Restore sp in case extra parameters was passed */
+ pop {r4-r12,lr} /* Match the push in tee_svc_syscall() */
+
+ /* Restore user sp */
+ ldmfd sp, {sp}^ /* store r13 user mode */
+ nop
+ add sp, sp, #4 /* update stack pointer */
+
+ pop {r4-r12,pc} /* Match the push in tee_svc_enter_user_mode() */
+.endfunc
+
+@ void tee_svc_sys_panic(uint32_t code);
+.func tee_svc_sys_panic
+tee_svc_sys_panic:
+ mov sp, r8 /* Restore sp in case extra parameters was passed */
+ pop {r4-r12,lr} /* Match the push in tee_svc_syscall() */
+
+.global tee_svc_user_ta_panic_from_pager
+tee_svc_user_ta_panic_from_pager:
+ ldr r1, [sp, #(13 * 0x4)] /* &session->panicked */
+ mov r2, #1 /* true */
+ str r2, [r1] /* update session->panicked */
+
+ ldr r1, [sp, #(14 * 0x4)] /* &session->panic_code */
+ str r0, [r1] /* update session->panic_code */
+
+ /* Restore user sp */
+ ldmfd sp, {sp}^ /* store r13 user mode */
+ nop
+ add sp, sp, #4 /* update stack pointer */
+
+ pop {r4-r12,pc} /* Match the push in tee_svc_enter_user_mode() */
+.endfunc
+
+
+ .section .rodata
+tee_svc_syscall_table:
+.word tee_svc_sys_return
+.word tee_svc_sys_log
+.word tee_svc_sys_panic
+.word tee_svc_sys_dummy
+.word tee_svc_sys_dummy_7args
+.word tee_svc_sys_get_property
+.word tee_svc_open_ta_session
+.word tee_svc_close_ta_session
+.word tee_svc_invoke_ta_command
+.word tee_svc_check_access_rights
+.word tee_svc_get_cancellation_flag
+.word tee_svc_unmask_cancellation
+.word tee_svc_mask_cancellation
+.word tee_svc_wait
+.word tee_svc_get_time
+.word tee_svc_set_ta_time
+.word tee_svc_cryp_state_alloc
+.word tee_svc_cryp_state_copy
+.word tee_svc_cryp_state_free
+.word tee_svc_hash_init
+.word tee_svc_hash_update
+.word tee_svc_hash_final
+.word tee_svc_cipher_init
+.word tee_svc_cipher_update
+.word tee_svc_cipher_final
+.word tee_svc_cryp_obj_get_info
+.word tee_svc_cryp_obj_restrict_usage
+.word tee_svc_cryp_obj_get_attr
+.word tee_svc_cryp_obj_alloc
+.word tee_svc_cryp_obj_close
+.word tee_svc_cryp_obj_reset
+.word tee_svc_cryp_obj_populate
+.word tee_svc_cryp_obj_copy
+.word tee_svc_cryp_derive_key
+.word tee_svc_cryp_random_number_generate
+.word tee_svc_authenc_init
+.word tee_svc_authenc_update_aad
+.word tee_svc_authenc_update_payload
+.word tee_svc_authenc_enc_final
+.word tee_svc_authenc_dec_final
+.word tee_svc_asymm_operate
+.word tee_svc_asymm_verify
+.word tee_svc_storage_obj_open
+.word tee_svc_storage_obj_create
+.word tee_svc_storage_obj_del
+.word tee_svc_storage_obj_rename
+.word tee_svc_storage_alloc_enum
+.word tee_svc_storage_free_enum
+.word tee_svc_storage_reset_enum
+.word tee_svc_storage_start_enum
+.word tee_svc_storage_next_enum
+.word tee_svc_storage_obj_read
+.word tee_svc_storage_obj_write
+.word tee_svc_storage_obj_trunc
+.word tee_svc_storage_obj_seek
+.word tee_svc_obj_generate_key
diff --git a/core/core.mk b/core/core.mk
new file mode 100644
index 00000000000..40ed5721a26
--- /dev/null
+++ b/core/core.mk
@@ -0,0 +1,51 @@
+include mk/cleanvars.mk
+
+# Set current submodule (used for module specific flags compile result etc)
+sm := core
+sm-$(sm) := y
+
+arch-dir := core/arch/$(ARCH)
+platform-dir := $(arch-dir)/plat-$(PLATFORM)
+include $(platform-dir)/conf.mk
+
+cppflags$(sm) += -Icore/include $(platform-cppflags) $(core-platform-cppflags)
+cflags$(sm) += $(platform-cflags) $(core-platform-cflags)
+aflags$(sm) += $(platform-aflags) $(core-platform-aflags)
+
+# Config flags from mk/config.mk
+cppflags$(sm) += -DCFG_TEE_TA_LOG_LEVEL=$(CFG_TEE_TA_LOG_LEVEL)
+cppflags$(sm) += -DCFG_TEE_FW_DEBUG=$(CFG_TEE_FW_DEBUG)
+cppflags$(sm) += -DCFG_TEE_CORE_LOG_LEVEL=$(CFG_TEE_CORE_LOG_LEVEL)
+cppflags$(sm) += -DCFG_TEE_CORE_DYNAMIC_SUPPORT=$(CFG_TEE_CORE_DYNAMIC_SUPPORT)
+
+cppflags$(sm) += -Ilib/libutee/include
+
+#
+# Do libraries
+#
+
+# Set a prefix to avoid conflicts with user TAs that will use the same
+# source but with different flags below
+base-prefix := $(sm)-
+libname = utils
+libdir = lib/libutils
+include mk/lib.mk
+
+libname = mpa
+libdir = lib/libmpa
+include mk/lib.mk
+base-prefix :=
+
+libname = tomcrypt
+libdir = core/lib/libtomcrypt
+include mk/lib.mk
+
+#
+# Do main source
+#
+subdirs = $(core-platform-subdirs) core
+include mk/subdir.mk
+include mk/compile.mk
+include $(platform-dir)/link.mk
+
+
diff --git a/core/default_signing.properties b/core/default_signing.properties
new file mode 100644
index 00000000000..fd6c7dbfeb6
--- /dev/null
+++ b/core/default_signing.properties
@@ -0,0 +1,11 @@
+PAYLOAD_HASH_TYPE=SHA256_HASH
+LOAD_ADDRESS=0x0
+START_ADDRESS=0x0
+DMA_FLAG=false
+PKA_FLAG=false
+BUFFER_SIZE=8k
+SW_VERSION=0
+MAJOR_BUILD_VERSION=0
+MINOR_BUILD_VERSION=0
+FLAGS=0
+TRUSTED_APP_HASH_TYPE=SHA256_HASH
diff --git a/core/drivers/gic.c b/core/drivers/gic.c
new file mode 100644
index 00000000000..918297ff381
--- /dev/null
+++ b/core/drivers/gic.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#include
+#include
+
+#include
+
+/* Offsets from gic.gicc_base */
+#define GICC_CTLR (0x000)
+#define GICC_IAR (0x00C)
+#define GICC_EOIR (0x010)
+
+#define GICC_CTLR_ENABLEGRP0 (1 << 0)
+#define GICC_CTLR_ENABLEGRP1 (1 << 1)
+#define GICC_CTLR_FIQEN (1 << 3)
+
+/* Offsets from gic.gicd_base */
+#define GICD_CTLR (0x000)
+#define GICD_TYPER (0x004)
+#define GICD_IGROUPR(n) (0x080 + (n) * 4)
+#define GICD_ISENABLER(n) (0x100 + (n) * 4)
+#define GICD_ICENABLER(n) (0x180 + (n) * 4)
+#define GICD_ICPENDR(n) (0x280 + (n) * 4)
+#define GICD_IPRIORITYR(n) (0x400 + (n) * 4)
+#define GICD_ITARGETSR(n) (0x800 + (n) * 4)
+
+#define GICD_CTLR_ENABLEGRP0 (1 << 0)
+#define GICD_CTLR_ENABLEGRP1 (1 << 1)
+
+/* Maximum number of interrups a GIC can support */
+#define GIC_MAX_INTS 1020
+
+
+static struct {
+ vaddr_t gicc_base;
+ vaddr_t gicd_base;
+ size_t max_it;
+} gic;
+
+static size_t probe_max_it(void)
+{
+ int i;
+ uint32_t old_ctlr;
+ size_t ret = 0;
+
+ /*
+ * Probe which interrupt number is the largest.
+ */
+ old_ctlr = read32(gic.gicc_base + GICC_CTLR);
+ write32(0, gic.gicc_base + GICC_CTLR);
+ for (i = GIC_MAX_INTS / 32; i > 0; i--) {
+ uint32_t old_reg;
+ uint32_t reg;
+ int b;
+
+ old_reg = read32(gic.gicd_base + GICD_ISENABLER(i));
+ write32(0xffffffff, gic.gicd_base + GICD_ISENABLER(i));
+ reg = read32(gic.gicd_base + GICD_ISENABLER(i));
+ write32(old_reg, gic.gicd_base + GICD_ICENABLER(i));
+ for (b = 31; b > 0; b--) {
+ if ((1 << b) & reg) {
+ ret = i * 32 + b;
+ goto out;
+ }
+ }
+ }
+out:
+ write32(old_ctlr, gic.gicc_base + GICC_CTLR);
+ return ret;
+}
+
+void gic_init(vaddr_t gicc_base, vaddr_t gicd_base)
+{
+ size_t n;
+
+ gic.gicc_base = gicc_base;
+ gic.gicd_base = gicd_base;
+ gic.max_it = probe_max_it();
+
+ for (n = 0; n <= gic.max_it / 32; n++) {
+ /* Disable interrupts */
+ write32(0xffffffff, gic.gicd_base + GICD_ICENABLER(n));
+
+ /* Make interrupts non-pending */
+ write32(0xffffffff, gic.gicd_base + GICD_ICPENDR(n));
+
+ /* Mark interrupts non-secure */
+ write32(0xffffffff, gic.gicd_base + GICD_IGROUPR(n));
+ }
+
+ /* Enable GIC */
+ write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
+ gic.gicc_base + GICC_CTLR);
+ write32(GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1,
+ gic.gicd_base + GICD_CTLR);
+}
+
+void gic_it_add(size_t it)
+{
+ size_t idx = it / 32;
+ uint32_t mask = 1 << (it % 32);
+
+ assert(it <= gic.max_it); /* Not too large */
+
+ /* Disable the interrupt */
+ write32(mask, gic.gicd_base + GICD_ICENABLER(idx));
+ /* Make it non-pending */
+ write32(mask, gic.gicd_base + GICD_ICPENDR(idx));
+ /* Assign it to group0 */
+ write32(read32(gic.gicd_base + GICD_IGROUPR(idx)) & ~mask,
+ gic.gicd_base + GICD_IGROUPR(idx));
+}
+
+void gic_it_set_cpu_mask(size_t it, uint8_t cpu_mask)
+{
+ size_t idx = it / 32;
+ uint32_t mask = 1 << (it % 32);
+ uint32_t target;
+
+ assert(it <= gic.max_it); /* Not too large */
+ /* Assigned to group0 */
+ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask));
+
+ /* Route it to selected CPUs */
+ target = read32(gic.gicd_base + GICD_ITARGETSR(it / 4));
+ target &= ~(0xff << ((it % 4) * 8));
+ target |= cpu_mask << ((it % 4) * 8);
+ DMSG("cpu_mask: writing 0x%x to 0x%x\n",
+ target, gic.gicd_base + GICD_ITARGETSR(it / 4));
+ write32(target, gic.gicd_base + GICD_ITARGETSR(it / 4));
+ DMSG("cpu_mask: 0x%x\n",
+ read32(gic.gicd_base + GICD_ITARGETSR(it / 4)));
+}
+
+void gic_it_set_prio(size_t it, uint8_t prio)
+{
+ size_t idx = it / 32;
+ uint32_t mask = 1 << (it % 32);
+
+ assert(it <= gic.max_it); /* Not too large */
+ /* Assigned to group0 */
+ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask));
+
+ /* Set prio it to selected CPUs */
+ DMSG("prio: writing 0x%x to 0x%x\n",
+ prio, gic.gicd_base + GICD_IPRIORITYR(0) + it);
+ write8(prio, gic.gicd_base + GICD_IPRIORITYR(0) + it);
+}
+
+void gic_it_enable(size_t it)
+{
+ size_t idx = it / 32;
+ uint32_t mask = 1 << (it % 32);
+
+ assert(it <= gic.max_it); /* Not too large */
+ /* Assigned to group0 */
+ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask));
+ /* Not enabled yet */
+ assert(!(read32(gic.gicd_base + GICD_ISENABLER(idx)) & mask));
+
+ /* Enable the interrupt */
+ write32(mask, gic.gicd_base + GICD_ISENABLER(idx));
+}
+
+void gic_it_disable(size_t it)
+{
+ size_t idx = it / 32;
+ uint32_t mask = 1 << (it % 32);
+
+ assert(it <= gic.max_it); /* Not too large */
+ /* Assigned to group0 */
+ assert(!(read32(gic.gicd_base + GICD_IGROUPR(idx)) & mask));
+
+ /* Disable the interrupt */
+ write32(mask, gic.gicd_base + GICD_ICENABLER(idx));
+}
+
+uint32_t gic_read_iar(void)
+{
+ return read32(gic.gicc_base + GICC_IAR);
+}
+
+void gic_write_eoir(uint32_t eoir)
+{
+ write32(eoir, gic.gicc_base + GICC_EOIR);
+}
+
diff --git a/core/drivers/sub.mk b/core/drivers/sub.mk
new file mode 100644
index 00000000000..4562e6da2a0
--- /dev/null
+++ b/core/drivers/sub.mk
@@ -0,0 +1,2 @@
+srcs-y += uart.c
+srcs-y += gic.c
diff --git a/core/drivers/uart.c b/core/drivers/uart.c
new file mode 100644
index 00000000000..f4448c6440e
--- /dev/null
+++ b/core/drivers/uart.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include
+#include
+
+#define UART_DR 0x00 /* data register */
+#define UART_RSR_ECR 0x04 /* receive status or error clear */
+#define UART_DMAWM 0x08 /* DMA watermark configure */
+#define UART_TIMEOUT 0x0C /* Timeout period */
+/* reserved space */
+#define UART_FR 0x18 /* flag register */
+#define UART_LCRH_RX 0x1C /*receive line control */
+#define UART_ILPR 0x20 /* IrDA low-poer */
+#define UART_IBRD 0x24 /* integer baud register */
+#define UART_FBRD 0x28 /* fractional baud register */
+#define UART_LCRH_TX 0x2C /* transmit line control */
+#define UART_CR 0x30 /* control register */
+#define UART_IFLS 0x34 /* interrupt FIFO level select */
+#define UART_IMSC 0x38 /* interrupt mask set/clear */
+#define UART_RIS 0x3C /* raw interrupt register */
+#define UART_MIS 0x40 /* masked interrupt register */
+#define UART_ICR 0x44 /* interrupt clear register */
+#define UART_DMACR 0x48 /* DMA control register */
+
+/* flag register bits */
+#define UART_FR_RTXDIS (1 << 13)
+#define UART_FR_TERI (1 << 12)
+#define UART_FR_DDCD (1 << 11)
+#define UART_FR_DDSR (1 << 10)
+#define UART_FR_DCTS (1 << 9)
+#define UART_FR_RI (1 << 8)
+#define UART_FR_TXFE (1 << 7)
+#define UART_FR_RXFF (1 << 6)
+#define UART_FR_TXFF (1 << 5)
+#define UART_FR_RXFE (1 << 4)
+#define UART_FR_BUSY (1 << 3)
+#define UART_FR_DCD (1 << 2)
+#define UART_FR_DSR (1 << 1)
+#define UART_FR_CTS (1 << 0)
+
+/* transmit/recieve line register bits */
+#define UART_LCRH_SPS (1 << 7)
+#define UART_LCRH_WLEN_8 (3 << 5)
+#define UART_LCRH_WLEN_7 (2 << 5)
+#define UART_LCRH_WLEN_6 (1 << 5)
+#define UART_LCRH_WLEN_5 (0 << 5)
+#define UART_LCRH_FEN (1 << 4)
+#define UART_LCRH_STP2 (1 << 3)
+#define UART_LCRH_EPS (1 << 2)
+#define UART_LCRH_PEN (1 << 1)
+#define UART_LCRH_BRK (1 << 0)
+
+/* control register bits */
+#define UART_CR_CTSEN (1 << 15)
+#define UART_CR_RTSEN (1 << 14)
+#define UART_CR_OUT2 (1 << 13)
+#define UART_CR_OUT1 (1 << 12)
+#define UART_CR_RTS (1 << 11)
+#define UART_CR_DTR (1 << 10)
+#define UART_CR_RXE (1 << 9)
+#define UART_CR_TXE (1 << 8)
+#define UART_CR_LPE (1 << 7)
+#define UART_CR_OVSFACT (1 << 3)
+#define UART_CR_UARTEN (1 << 0)
+
+#define UART_IMSC_RXIM (1 << 4)
+
+void uart_flush_tx_fifo(vaddr_t base)
+{
+ while (!(read32(base + UART_FR) & UART_FR_TXFE))
+ ;
+}
+
+void uart_init(vaddr_t base)
+{
+ write32(0, base + UART_RSR_ECR);
+
+ /* Configure TX to 8 bits, 1 stop bit, no parity, fifo enabled. */
+ write32(UART_LCRH_WLEN_8 | UART_LCRH_FEN, base + UART_LCRH_TX);
+
+ write32(UART_IMSC_RXIM, base + UART_IMSC);
+
+ /* Enable UART and TX */
+ write32(UART_CR_UARTEN | UART_CR_TXE | UART_CR_RXE, base + UART_CR);
+
+ uart_flush_tx_fifo(base);
+}
+
+void uart_putc(int ch, vaddr_t base)
+{
+ /*
+ * Wait until there is space in the FIFO
+ */
+ while (read32(base + UART_FR) & UART_FR_TXFF)
+ ;
+
+ /* Send the character */
+ write32(ch, base + UART_DR);
+}
+
+bool uart_have_rx_data(vaddr_t base)
+{
+ return !(read32(base + UART_FR) & UART_FR_RXFE);
+}
+
+int uart_getchar(vaddr_t base)
+{
+ while (!uart_have_rx_data(base))
+ ;
+ return read32(base + UART_DR) & 0xff;
+}
+
diff --git a/core/include/asm.S b/core/include/asm.S
new file mode 100644
index 00000000000..0a654bdb868
--- /dev/null
+++ b/core/include/asm.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .macro FUNC name colon
+ .global \name
+ .type \name , %function
+ \name \colon
+ .endm
+
+ .macro DATA name colon
+ .global \name
+ .type \name , %object
+ \name \colon
+ .endm
+
+ .macro LOCAL_FUNC name colon
+ .type \name , %function
+ \name \colon
+ .endm
+
+ .macro LOCAL_DATA name colon
+ .type \name , %object
+ \name \colon
+ .endm
+
+ .macro END_DATA name
+ .size \name , .-\name
+ .endm
+
+ .macro END_FUNC name
+ .size \name , .-\name
+ .endm
diff --git a/core/include/core_serviceid.h b/core/include/core_serviceid.h
new file mode 100644
index 00000000000..4356464c469
--- /dev/null
+++ b/core/include/core_serviceid.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Secure services identification numbers
+ *
+ * Here is given the description of the functions that are called by the
+ * dispatcher when user requests a service number
+ */
+typedef enum {
+ /* For TEE Client API 1.0 */
+ SERVICEID_TEE_OPEN_SESSION = 0x11000008,
+ SERVICEID_TEE_CLOSE_SESSION = 0x11000009,
+ SERVICEID_TEE_INVOKE_COMMAND = 0x1100000a,
+ SERVICEID_TEE_REGISTER_RPC = 0x1100000b,
+ SERVICEID_TEE_SET_SEC_DDR = 0x1100000c,
+ SERVICEID_TEE_CANCEL_COMMAND = 0x1100000d,
+ SERVICEID_TEE_REGISTER_MEMORY = 0x1100000e,
+ SERVICEID_TEE_UNREGISTER_MEMORY = 0x1100000f,
+
+ /* teecore logs and restarts */
+ SERVICEID_TEE_DEINIT_CPU = 0x11000010,
+ SERVICEID_TEE_CRASH_CPU = 0x11000011,
+ SERVICEID_TEE_SET_CORE_TRACE_LEVEL = 0x11000012,
+ SERVICEID_TEE_GET_CORE_TRACE_LEVEL = 0x11000013,
+ SERVICEID_TEE_SET_TA_TRACE_LEVEL = 0x11000014,
+ SERVICEID_TEE_GET_TA_TRACE_LEVEL = 0x11000015,
+ SERVICEID_TEE_GET_CORE_STATUS = 0x11000016,
+
+ /* teecore configurations */
+ SERVICEID_REGISTER_DEFAULT_SHMEM = 0x11000020,
+ SERVICEID_UNREGISTER_DEFAULT_SHMEM = 0x11000021,
+ SERVICEID_TEE_REGISTER_IRQFWD = 0x11000022,
+ SERVICEID_TEE_UNREGISTER_IRQFWD = 0x11000023,
+ SERVICEID_GET_SHMEM_START = 0x11000024,
+ SERVICEID_GET_SHMEM_SIZE = 0x11000025,
+ SERVICEID_GET_SHMEM_CACHED = 0x11000026,
+
+ SERVICEID_ENABLE_L2CC_MUTEX = 0x20000000,
+ SERVICEID_DISABLE_L2CC_MUTEX = 0x20000001,
+ SERVICEID_GET_L2CC_MUTEX = 0x20000002,
+ SERVICEID_SET_L2CC_MUTEX = 0x20000003,
+
+ SERVICEID_LOAD_TEE = 0x20000004
+
+} t_service_id;
+
+
+
diff --git a/core/include/drivers/gic.h b/core/include/drivers/gic.h
new file mode 100644
index 00000000000..1bf5523670e
--- /dev/null
+++ b/core/include/drivers/gic.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GIC_H
+#define GIC_H
+#include
+
+void gic_init(paddr_t gicc_base, paddr_t gicd_base);
+
+void gic_it_add(size_t it);
+void gic_it_set_cpu_mask(size_t it, uint8_t cpu_mask);
+void gic_it_set_prio(size_t it, uint8_t prio);
+void gic_it_enable(size_t it);
+void gic_it_disable(size_t it);
+
+uint32_t gic_read_iar(void);
+void gic_write_eoir(uint32_t eoir);
+
+#endif /*GIC_H*/
+
diff --git a/core/include/drivers/uart.h b/core/include/drivers/uart.h
new file mode 100644
index 00000000000..ca263a5773a
--- /dev/null
+++ b/core/include/drivers/uart.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UART_H
+#define UART_H
+
+#include
+
+void uart_init(vaddr_t base);
+
+void uart_putc(int ch, vaddr_t base);
+
+void uart_flush_tx_fifo(vaddr_t base);
+
+bool uart_have_rx_data(vaddr_t base);
+
+int uart_getchar(vaddr_t base);
+
+#endif /*UART_H*/
+
diff --git a/core/include/io.h b/core/include/io.h
new file mode 100644
index 00000000000..be3702749ae
--- /dev/null
+++ b/core/include/io.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IO_H
+#define IO_H
+
+static inline void write8(uint8_t val, vaddr_t addr)
+{
+ *(volatile uint8_t *)addr = val;
+}
+
+static inline void write16(uint16_t val, vaddr_t addr)
+{
+ *(volatile uint16_t *)addr = val;
+}
+
+static inline void write32(uint32_t val, vaddr_t addr)
+{
+ *(volatile uint32_t *)addr = val;
+}
+
+static inline uint8_t read8(vaddr_t addr)
+{
+ return *(volatile uint8_t *)addr;
+}
+
+static inline uint16_t read16(vaddr_t addr)
+{
+ return *(volatile uint16_t *)addr;
+}
+
+static inline uint32_t read32(vaddr_t addr)
+{
+ return *(volatile uint32_t *)addr;
+}
+
+#endif /*IO_H*/
diff --git a/core/include/kernel/chip_services.h b/core/include/kernel/chip_services.h
new file mode 100644
index 00000000000..41586d165aa
--- /dev/null
+++ b/core/include/kernel/chip_services.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CHIP_SERVICES_H
+#define CHIP_SERVICES_H
+
+#include
+#include
+
+
+#include
+
+/* Die ID */
+#define FVR_DIE_ID_NUM_REGS 3
+
+/*-----------------------------------------------------------------------------
+ PUBLIC FUNCTION PROTOTYPES
+ *---------------------------------------------------------------------------*/
+void enable_secure_wd(void);
+
+#endif
diff --git a/core/include/kernel/kernel.h b/core/include/kernel/kernel.h
new file mode 100644
index 00000000000..7bad5b837e3
--- /dev/null
+++ b/core/include/kernel/kernel.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KERNEL_KERNEL_H
+#define KERNEL_KERNEL_H
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+/* Round up the even multiple of size, size has to be a multiple of 2 */
+#define ROUNDUP(v, size) (((v) + (size - 1)) & ~(size - 1))
+
+/* Round down the even multiple of size, size has to be a multiple of 2 */
+#define ROUNDDOWN(v, size) ((v) & ~(size - 1))
+
+#endif /*KERNEL_KERNEL_H*/
diff --git a/core/include/kernel/kta_mem.h b/core/include/kernel/kta_mem.h
new file mode 100644
index 00000000000..1291038a56b
--- /dev/null
+++ b/core/include/kernel/kta_mem.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KTA_MEM_H
+#define KTA_MEM_H
+
+#include
+#include
+
+#define TEE_MEM_SEC_ESRAM_SHIFT 0
+#define TEE_MEM_SEC_CLASS0_DDR_SHIFT 2
+#define TEE_MEM_SEC_CLASS1_DDR_SHIFT 4
+#define TEE_MEM_DDR_SHIFT 6
+#define TEE_MEM_SEC_HW_SHIFT 8
+#define TEE_MEM_RES_MMU_UL1_SHIFT 10
+#define TEE_MEM_MM_ESRAM_FW_SHIFT 12
+
+/* Checking for Secure eSRAM */
+#define TEE_MEM_SEC_ESRAM (1 << TEE_MEM_SEC_ESRAM_SHIFT)
+#define TEE_MEM_NOT_SEC_ESRAM (TEE_MEM_SEC_ESRAM << 1)
+/* Checking for class0 firewalled DDR */
+#define TEE_MEM_SEC_CLASS0_DDR (1 << TEE_MEM_SEC_CLASS0_DDR_SHIFT)
+#define TEE_MEM_NOT_SEC_CLASS0_DDR (TEE_MEM_SEC_CLASS0_DDR << 1)
+/* Checking for class1 firewalled DDR */
+#define TEE_MEM_SEC_CLASS1_DDR (1 << TEE_MEM_SEC_CLASS1_DDR_SHIFT)
+#define TEE_MEM_NOT_SEC_CLASS1_DDR (TEE_MEM_SEC_CLASS1_DDR << 1)
+/* Checking for DDR */
+#define TEE_MEM_DDR (1 << TEE_MEM_DDR_SHIFT)
+#define TEE_MEM_NOT_DDR (TEE_MEM_DDR << 1)
+/*
+ * Checking for secure resources based on ROM:ed MMU mapping with a few
+ * exceptions.
+ */
+#define TEE_MEM_SEC_HW (1 << TEE_MEM_SEC_HW_SHIFT)
+#define TEE_MEM_NOT_SEC_HW (TEE_MEM_SEC_HW << 1)
+
+#define TEE_MEM_RES_MMU_UL1 (1 << TEE_MEM_RES_MMU_UL1_SHIFT)
+#define TEE_MEM_NOT_RES_MMU_UL1 (TEE_MEM_RES_MMU_UL1 << 1)
+#define TEE_MEM_MM_ESRAM_FW (1 << TEE_MEM_MM_ESRAM_FW_SHIFT)
+#define TEE_MEM_NOT_MM_ESRAM_FW (TEE_MEM_MM_ESRAM_FW << 1)
+
+/* Buffer is non secure, writing to it can't compromise security */
+#define TEE_MEM_NON_SEC (TEE_MEM_NOT_SEC_ESRAM | \
+ TEE_MEM_NOT_SEC_CLASS0_DDR | \
+ TEE_MEM_NOT_SEC_CLASS1_DDR | \
+ TEE_MEM_NOT_SEC_HW | \
+ TEE_MEM_NOT_MM_ESRAM_FW)
+
+/* Buffer is secure, data can't be accessed by normal world */
+#define TEE_MEM_SEC (TEE_MEM_SEC_ESRAM | TEE_MEM_SEC_CLASS0_DDR)
+
+/* IO access macro */
+#define IO(addr) (*((volatile unsigned long *)(addr)))
+
+#endif /* KTA_MEM_H */
diff --git a/core/include/kernel/panic.h b/core/include/kernel/panic.h
new file mode 100644
index 00000000000..9ebc1212829
--- /dev/null
+++ b/core/include/kernel/panic.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef KERNEL_PANIC_H
+#define KERNEL_PANIC_H
+
+#define panic() __panic(__FILE__, __LINE__, __func__)
+
+void __panic(const char *file, int line, const char *func)
+ __attribute__((noreturn));
+
+#endif /*KERNEL_PANIC_H*/
diff --git a/core/include/kernel/tee_common.h b/core/include/kernel/tee_common.h
new file mode 100644
index 00000000000..9b4c4248c9a
--- /dev/null
+++ b/core/include/kernel/tee_common.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_COMMON_H
+#define TEE_COMMON_H
+
+#include
+#include
+
+#ifdef MEASURE_TIME
+/*
+ * Initializes mesaure time. Initializes RTT0 to highest possible
+ * resolution.
+ */
+void tee_mtime_init(void);
+
+/*
+ * Adds a time stamp together the description. Note that only the pointer
+ * is copied, not the contents to minimize impact.
+ */
+void tee_mtime_stamp(const char *descr);
+
+/*
+ * Prints a report of measured times and reinitializes clears the table of
+ * saved time stamps.
+ */
+void tee_mtime_report(void);
+
+void tee_mtime_perftest(void);
+#else
+/* Empty macros to not have any impact on code when not meassuring time */
+#define tee_mtime_init() do { } while (0)
+#define tee_mtime_stamp(descr) do { } while (0)
+#define tee_mtime_report() do { } while (0)
+#define tee_mtime_perftest() do { } while (0)
+#endif
+
+/*
+ * Adds physical pages to the pager to use. The supplied virtual address range
+ * is searched for mapped physical pages and unmapped pages are ignored.
+ *
+ * vaddr is the first virtual address
+ * npages is the number of pages to add
+ */
+void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages);
+
+void tee_pager_unhide_all_pages(void);
+
+void tee_pager_unmap(uint32_t page, uint8_t psize);
+
+#endif /* TEE_COMMON_H */
diff --git a/core/include/kernel/tee_common_otp.h b/core/include/kernel/tee_common_otp.h
new file mode 100644
index 00000000000..9d291016e59
--- /dev/null
+++ b/core/include/kernel/tee_common_otp.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_COMMON_OTP_H
+#define TEE_COMMON_OTP_H
+
+#include "stddef.h"
+#include "stdint.h"
+#include
+#include
+
+struct tee_hw_unique_key {
+ uint8_t data[HW_UNIQUE_KEY_LENGTH];
+};
+
+/* exposed to let tee_init set the key */
+extern uint8_t hw_key_digest[TEE_SHA256_HASH_SIZE];
+
+void tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey);
+int tee_otp_get_die_id(uint8_t *buffer, size_t len);
+
+#endif /* TEE_COMMON_OTP_H */
diff --git a/core/include/kernel/tee_common_unpg.h b/core/include/kernel/tee_common_unpg.h
new file mode 100644
index 00000000000..e03d9b98da9
--- /dev/null
+++ b/core/include/kernel/tee_common_unpg.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TEE_COMMON_UNPG_H
+#define TEE_COMMON_UNPG_H
+
+#include
+#include
+#include