aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Royer <nroyer@baylibre.com>2020-09-27 17:06:50 +0200
committernicola-mazzucato-arm <42373140+nicola-mazzucato-arm@users.noreply.github.com>2020-10-15 17:45:38 +0100
commit084f9d4edade54160d16265b0108942709b2e7aa (patch)
tree2867fb045d52e03f2ddd36fc4820005a5872afd1
parentdb2d011984a280afbfdcde930ba1965db0d2b8d5 (diff)
rcar: add rcar platform
This patch adds support for the rcar platform running on FreeRTOS. FreeRTOS source code is temporarily included into rcar product directory until armv8-a support is added to CMSIS-FreeRTOS repository. Change-Id: Ia828f903d52df236922fe7f6f548bce06ee131cc Signed-off-by: Tsutomu Muroya <tsutomu.muroya.jy@bp.renesas.com> Signed-off-by: Nicolas Royer <nroyer@baylibre.com>
-rw-r--r--product/rcar/include/rcar_common.h19
-rw-r--r--product/rcar/include/rcar_core.h15
-rw-r--r--product/rcar/include/rcar_def.h97
-rw-r--r--product/rcar/include/rcar_irq.h31
-rw-r--r--product/rcar/include/rcar_mfismh.h17
-rw-r--r--product/rcar/include/rcar_mmap.h37
-rw-r--r--product/rcar/include/rcar_mmap_scp.h14
-rw-r--r--product/rcar/include/rcar_scmi.h25
-rw-r--r--product/rcar/include/rcar_scmi_id.h46
-rw-r--r--product/rcar/include/software_mmap.h130
-rw-r--r--product/rcar/include/system_clock.h28
-rw-r--r--product/rcar/include/system_mmap.h21
-rw-r--r--product/rcar/include/system_mmap_scp.h16
-rw-r--r--product/rcar/module/rcar_clock/include/mod_rcar_clock.h1
-rw-r--r--product/rcar/module/rcar_power_domain/include/mod_rcar_power_domain.h4
-rw-r--r--product/rcar/module/rcar_reg_sensor/include/mod_rcar_reg_sensor.h2
-rw-r--r--product/rcar/product.mk9
-rw-r--r--product/rcar/scp_ramfw/FreeRTOSConfig.h49
-rw-r--r--product/rcar/scp_ramfw/clock_devices.h208
-rw-r--r--product/rcar/scp_ramfw/clock_mstp_devices.h169
-rw-r--r--product/rcar/scp_ramfw/clock_sd_devices.h75
-rw-r--r--product/rcar/scp_ramfw/config_clock.c1765
-rw-r--r--product/rcar/scp_ramfw/config_scmi.c98
-rw-r--r--product/rcar/scp_ramfw/config_scmi_apcore.c30
-rw-r--r--product/rcar/scp_ramfw/config_scmi_clock.c980
-rw-r--r--product/rcar/scp_ramfw/config_scmi_power_domain.c11
-rw-r--r--product/rcar/scp_ramfw/config_sensor.c115
-rw-r--r--product/rcar/scp_ramfw/config_smt.c79
-rw-r--r--product/rcar/scp_ramfw/firmware.mk92
-rw-r--r--product/rcar/scp_ramfw/fmw_memory.h23
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/CMSIS/RTOS2/FreeRTOS/Source/cmsis_os2_tiny4scp.c537
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/FreeRTOS.h1317
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/deprecated_definitions.h277
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/list.h465
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/mpu_wrappers.h195
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/portable.h236
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/projdefs.h126
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/queue.h1783
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/stack_macros.h133
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/task.h2729
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/include/timers.h1401
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/list.c205
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/port.c299
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portASM.S352
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portmacro.h116
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/portable/MemMang/heap_1.c144
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/queue.c2826
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/tasks.c5356
-rw-r--r--product/rcar/src/CMSIS-FreeRTOS/Source/timers.c1200
-rw-r--r--product/rcar/src/rcar_core.c23
-rw-r--r--tools/build_system/firmware.mk12
-rw-r--r--tools/cppcheck_suppress_list.txt3
52 files changed, 23933 insertions, 8 deletions
diff --git a/product/rcar/include/rcar_common.h b/product/rcar/include/rcar_common.h
new file mode 100644
index 00000000..472731dc
--- /dev/null
+++ b/product/rcar/include/rcar_common.h
@@ -0,0 +1,19 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_COMMON_H
+#define RCAR_COMMON_H
+
+#include <stdint.h>
+
+void mstpcr_write(uint32_t mstpcr, uint32_t mstpsr, uint32_t target_bit);
+void cpg_write(uintptr_t regadr, uint32_t regval);
+
+void udelay(unsigned long usec);
+void mdelay(unsigned long msecs);
+
+#endif /* RCAR_COMMON_H */
diff --git a/product/rcar/include/rcar_core.h b/product/rcar/include/rcar_core.h
new file mode 100644
index 00000000..0c5e0eb0
--- /dev/null
+++ b/product/rcar/include/rcar_core.h
@@ -0,0 +1,15 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_CORE_H
+#define RCAR_CORE_H
+
+#define RCAR_CORE_PER_CLUSTER_MAX 8
+
+unsigned int rcar_core_get_count(void);
+
+#endif /* RCAR_CORE_H */
diff --git a/product/rcar/include/rcar_def.h b/product/rcar/include/rcar_def.h
new file mode 100644
index 00000000..c0b2b2b5
--- /dev/null
+++ b/product/rcar/include/rcar_def.h
@@ -0,0 +1,97 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_DEF_H
+#define RCAR_DEF_H
+
+/* Reset */
+#define RCAR_CPGWPR UINT32_C(0xE6150900) /* CPG write protect */
+#define RCAR_MODEMR UINT32_C(0xE6160060) /* Mode pin */
+#define RCAR_CA57RESCNT UINT32_C(0xE6160040) /* Reset control A57 */
+#define RCAR_CA53RESCNT UINT32_C(0xE6160044) /* Reset control A53 */
+#define RCAR_SRESCR UINT32_C(0xE6160110) /* Soft Power On Reset */
+#define RCAR_CA53WUPCR UINT32_C(0xE6151010) /* Wake-up control A53 */
+#define RCAR_CA57WUPCR UINT32_C(0xE6152010) /* Wake-up control A57 */
+#define RCAR_CA53PSTR UINT32_C(0xE6151040) /* Power status A53 */
+#define RCAR_CA57PSTR UINT32_C(0xE6152040) /* Power status A57 */
+#define RCAR_CA53CPU0CR UINT32_C(0xE6151100) /* CPU control A53 */
+#define RCAR_CA57CPU0CR UINT32_C(0xE6152100) /* CPU control A57 */
+#define RCAR_CA53CPUCMCR UINT32_C(0xE6151184) /* Common power A53 */
+#define RCAR_CA57CPUCMCR UINT32_C(0xE6152184) /* Common power A57 */
+#define RCAR_WUPMSKCA57 UINT32_C(0xE6180014) /* Wake-up mask A57 */
+#define RCAR_WUPMSKCA53 UINT32_C(0xE6180018) /* Wake-up mask A53 */
+
+/* SYSC */
+#define RCAR_PWRSR3 UINT32_C(0xE6180140) /* Power stat A53-SCU */
+#define RCAR_PWRSR5 UINT32_C(0xE61801C0) /* Power stat A57-SCU */
+#define RCAR_SYSCIER UINT32_C(0xE618000C) /* Interrupt enable */
+#define RCAR_SYSCIMR UINT32_C(0xE6180010) /* Interrupt mask */
+#define RCAR_SYSCSR UINT32_C(0xE6180000) /* SYSC status */
+#define RCAR_PWRONCR3 UINT32_C(0xE618014C) /* Power resume A53-SCU */
+#define RCAR_PWRONCR5 UINT32_C(0xE61801CC) /* Power resume A57-SCU */
+#define RCAR_PWROFFCR3 UINT32_C(0xE6180144) /* Power shutof A53-SCU */
+#define RCAR_PWROFFCR5 UINT32_C(0xE61801C4) /* Power shutof A57-SCU */
+#define RCAR_PWRER3 UINT32_C(0xE6180154) /* shutoff/resume error */
+#define RCAR_PWRER5 UINT32_C(0xE61801D4) /* shutoff/resume error */
+#define RCAR_SYSCISR UINT32_C(0xE6180004) /* Interrupt status */
+#define RCAR_SYSCISCR UINT32_C(0xE6180008) /* Interrupt stat clear */
+
+/* Product register */
+#define RCAR_PRR UINT32_C(0xFFF00044)
+#define RCAR_PRODUCT_MASK UINT32_C(0x00007F00)
+#define RCAR_CUT_MASK UINT32_C(0x000000FF)
+#define RCAR_PRODUCT_H3 UINT32_C(0x00004F00)
+#define RCAR_PRODUCT_M3 UINT32_C(0x00005200)
+#define RCAR_PRODUCT_M3N UINT32_C(0x00005500)
+#define RCAR_PRODUCT_E3 UINT32_C(0x00005700)
+#define RCAR_CUT_VER10 UINT32_C(0x00000000)
+#define RCAR_CUT_VER11 UINT32_C(0x00000001) /* H3/M3N/E3 Ver.1.1 */
+#define RCAR_M3_CUT_VER11 UINT32_C(0x00000010) /* M3 Ver.1.1/Ver.1.2 */
+#define RCAR_CUT_VER20 UINT32_C(0x00000010)
+#define RCAR_CUT_VER30 UINT32_C(0x00000020)
+#define RCAR_MAJOR_MASK UINT32_C(0x000000F0)
+#define RCAR_MINOR_MASK UINT32_C(0x0000000F)
+#define RCAR_PRODUCT_SHIFT UINT32_C(8)
+#define RCAR_MAJOR_SHIFT UINT32_C(4)
+#define RCAR_MINOR_SHIFT UINT32_C(0)
+#define RCAR_MAJOR_OFFSET UINT32_C(1)
+#define RCAR_M3_MINOR_OFFSET UINT32_C(2)
+#define RCAR_PRODUCT_H3_CUT10 (RCAR_PRODUCT_H3 | UINT32_C(0x00)) /* 1.0 */
+#define RCAR_PRODUCT_H3_CUT11 (RCAR_PRODUCT_H3 | UINT32_C(0x01)) /* 1.1 */
+#define RCAR_PRODUCT_H3_CUT20 (RCAR_PRODUCT_H3 | UINT32_C(0x10)) /* 2.0 */
+#define RCAR_PRODUCT_M3_CUT10 (RCAR_PRODUCT_M3 | UINT32_C(0x00)) /* 1.0 */
+#define RCAR_PRODUCT_M3_CUT11 (RCAR_PRODUCT_M3 | UINT32_C(0x10))
+#define RCAR_CPU_MASK_CA57 UINT32_C(0x80000000)
+#define RCAR_CPU_MASK_CA53 UINT32_C(0x04000000)
+#define RCAR_CPU_HAVE_CA57 UINT32_C(0x00000000)
+#define RCAR_CPU_HAVE_CA53 UINT32_C(0x00000000)
+#define RCAR_SSCG_MASK UINT32_C(0x1000) /* MD12 */
+#define RCAR_SSCG_ENABLE UINT32_C(0x1000)
+
+/* MD pin information */
+#define MODEMR_BOOT_PLL_MASK UINT32_C(0x00006000)
+#define MODEMR_BOOT_PLL_SHIFT UINT32_C(13)
+/* MODEMR PLL masks and bitfield values */
+#define CHECK_MD13_MD14 UINT32_C(0x6000)
+#define MD14_MD13_TYPE_0 UINT32_C(0x0000) /* MD14=0 MD13=0 */
+#define MD14_MD13_TYPE_1 UINT32_C(0x2000) /* MD14=0 MD13=1 */
+#define MD14_MD13_TYPE_2 UINT32_C(0x4000) /* MD14=1 MD13=0 */
+#define MD14_MD13_TYPE_3 UINT32_C(0x6000) /* MD14=1 MD13=1 */
+/* Frequency of EXTAL(Hz) */
+#define EXTAL_MD14_MD13_TYPE_0 UINT32_C(8333300) /* MD14=0 MD13=0 */
+#define EXTAL_MD14_MD13_TYPE_1 UINT32_C(10000000) /* MD14=0 MD13=1 */
+#define EXTAL_MD14_MD13_TYPE_2 UINT32_C(12500000) /* MD14=1 MD13=0 */
+#define EXTAL_MD14_MD13_TYPE_3 UINT32_C(16666600) /* MD14=1 MD13=1 */
+#define EXTAL_SALVATOR_XS UINT32_C(8320000) /* Salvator-XS */
+#define EXTAL_EBISU UINT32_C(24000000) /* Ebisu */
+
+/* CPG Secure Module Stop Control Register 9 */
+#define SCMSTPCR9 (CPG_BASE + 0x0B44U)
+/* CPG stop status 9 */
+#define CPG_MSTPSR9 (CPG_BASE + 0x09A4U)
+
+#endif /* RCAR_DEF_H */
diff --git a/product/rcar/include/rcar_irq.h b/product/rcar/include/rcar_irq.h
new file mode 100644
index 00000000..6b4b9b1a
--- /dev/null
+++ b/product/rcar/include/rcar_irq.h
@@ -0,0 +1,31 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_IRQ_H
+#define RCAR_IRQ_H
+
+#include <fwk_interrupt.h>
+
+enum rcar_irq {
+ /**/
+ MIN_IRQ = 16,
+ VIRTUAL_TIMER_IRQ = 27,
+ /**/
+ MFIS_AREICR1_IRQ = 257,
+ MFIS_AREICR2_IRQ = 258,
+ MFIS_AREICR3_IRQ = 259,
+ /**/ /* The following numbers are virtual IID */
+ SMCMH_IRQ_START = 1024,
+ SMCMH_SECURE_IRQ = SMCMH_IRQ_START, /* SMC Secure */
+ SMCMH_LOW_PRIO_IRQ, /* SMC Low Priority */
+ SMCMH_HIGH_PRIO_IRQ, /* SMC High Priority */
+ SMCMH_IRQ_END,
+ MAX_IRQ = SMCMH_IRQ_END,
+ /**/
+};
+
+#endif /* RCAR_IRQ_H */
diff --git a/product/rcar/include/rcar_mfismh.h b/product/rcar/include/rcar_mfismh.h
new file mode 100644
index 00000000..b0397d2a
--- /dev/null
+++ b/product/rcar/include/rcar_mfismh.h
@@ -0,0 +1,17 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_MFISMH_H
+#define RCAR_MFISMH_H
+
+enum rcar_smcmh_device_idx {
+ RCAR_MFISMH_DEVICE_IDX_S,
+ RCAR_MFISMH_DEVICE_IDX_NS_L,
+ RCAR_MFISMH_DEVICE_IDX_COUNT,
+};
+
+#endif /* RCAR_MFISMH_H */
diff --git a/product/rcar/include/rcar_mmap.h b/product/rcar/include/rcar_mmap.h
new file mode 100644
index 00000000..d0ce17c5
--- /dev/null
+++ b/product/rcar/include/rcar_mmap.h
@@ -0,0 +1,37 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_MMAP_H
+#define RCAR_MMAP_H
+
+#include <rcar_def.h>
+
+#include <stdint.h>
+
+/*
+ * Top-level base addresses
+ */
+#define PERIPHERAL_BASE UINT64_C(0xE6000000)
+
+/*
+ * Peripherals
+ */
+#define CPG_BASE (PERIPHERAL_BASE + 0x00150000)
+#define MFIS_BASE (PERIPHERAL_BASE + 0x00260000)
+
+#define MFISAREICR_BASE (MFIS_BASE + 0x0400)
+
+/* Memory */
+#define TRUSTED_RAM_BASE UINT64_C(0x040000000)
+#define NONTRUSTED_RAM_BASE UINT64_C(0x040002000)
+
+#define SCP_SRAM_BASE (0xE6302000U)
+#define SCP_SRAM_SIZE (0x00001000U)
+#define SCP_SRAM_STACK_BASE (SCP_SRAM_BASE + SCP_SRAM_SIZE)
+#define SCP_SRAM_STACK_SIZE (0x00001000U)
+
+#endif /* RCAR_MMAP_H */
diff --git a/product/rcar/include/rcar_mmap_scp.h b/product/rcar/include/rcar_mmap_scp.h
new file mode 100644
index 00000000..390f17d3
--- /dev/null
+++ b/product/rcar/include/rcar_mmap_scp.h
@@ -0,0 +1,14 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_MMAP_SCP_H
+#define RCAR_MMAP_SCP_H
+
+#define SCP_ROM_BASE 0x00000000
+#define SCP_RAM_BASE 0x44200000
+
+#endif /* RCAR_MMAP_SCP_H */
diff --git a/product/rcar/include/rcar_scmi.h b/product/rcar/include/rcar_scmi.h
new file mode 100644
index 00000000..cf4773dd
--- /dev/null
+++ b/product/rcar/include/rcar_scmi.h
@@ -0,0 +1,25 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_SCMI_H
+#define RCAR_SCMI_H
+
+/* SCMI agent identifiers */
+enum rcar_scmi_agent_id {
+ /* 0 is reserved for the platform */
+ SCMI_AGENT_ID_OSPM = 1,
+ SCMI_AGENT_ID_PSCI,
+ SCMI_AGENT_ID_COUNT,
+};
+
+/* SCMI service indexes */
+enum rcar_scmi_service_idx {
+ RCAR_SCMI_SERVICE_IDX_PSCI,
+ RCAR_SCMI_SERVICE_IDX_OSPM_0,
+ RCAR_SCMI_SERVICE_IDX_COUNT,
+};
+#endif /* RCAR_SCMI_H */
diff --git a/product/rcar/include/rcar_scmi_id.h b/product/rcar/include/rcar_scmi_id.h
new file mode 100644
index 00000000..39784001
--- /dev/null
+++ b/product/rcar/include/rcar_scmi_id.h
@@ -0,0 +1,46 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RCAR_SCMI_ID_H
+#define RCAR_SCMI_ID_H
+
+/* SCMI PowerDomain Domain indexes */
+enum rcar_scmi_pd_domain_id {
+ PD_RCAR_CLUS0CORE0 = 0,
+ PD_RCAR_CLUS0CORE1,
+ PD_RCAR_CLUS0CORE2,
+ PD_RCAR_CLUS0CORE3,
+ PD_RCAR_CLUS1CORE0,
+ PD_RCAR_CLUS1CORE1,
+ PD_RCAR_CLUS1CORE2,
+ PD_RCAR_CLUS1CORE3,
+ PD_RCAR_CLUSTER0,
+ PD_RCAR_CLUSTER1,
+ PD_RCAR_A3IR,
+ PD_RCAR_3DGE,
+ PD_RCAR_3DGD,
+ PD_RCAR_3DGC,
+ PD_RCAR_3DGB,
+ PD_RCAR_3DGA,
+ PD_RCAR_A2VC1,
+ PD_RCAR_A3VC,
+ PD_RCAR_CR7,
+ PD_RCAR_A3VP,
+ PD_RCAR_COUNT,
+};
+
+/* SCMI Clock indexes */
+enum rcar_scmi_clock_id {
+ CLOCK_RCAR_COUNT,
+};
+
+/* SCMI Sensor indexes */
+enum rcar_scmi_sensor_id {
+ SENSOR_RCAR_COUNT,
+};
+
+#endif /* RCAR_SCMI_ID_H */
diff --git a/product/rcar/include/software_mmap.h b/product/rcar/include/software_mmap.h
new file mode 100644
index 00000000..f1ef55d2
--- /dev/null
+++ b/product/rcar/include/software_mmap.h
@@ -0,0 +1,130 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SOFTWARE_MMAP_H
+#define SOFTWARE_MMAP_H
+
+#include <system_mmap.h>
+
+#include <fwk_macros.h>
+
+/*
+ * The 4KiB AP/SCP Shared memory at the base of Trusted SRAM is used for several
+ * purposes. These are: the Shared Data Storage (SDS) Memory Region, the SCMI
+ * secure payload areas, and the context area for Application Processor
+ * firmware.
+ *
+ * Shared Data Storage (SDS) Memory Region: Used for structured storage of data
+ * that is shared between SCP Firmware and Application Processor firmware. The
+ * SDS Memory Region occupies the area between the context region base and
+ * the SCMI Secure Payload base.
+ *
+ * SCMI Secure Payload Areas: Storage for SCMI message contents in both the
+ * Agent->Platform and Platform->Agent directions.
+ *
+ * Application Processor Context Area: The usage of this area is defined by the
+ * firmware running on the Application Processors. The SCP Firmware must zero
+ * this memory before releasing any Application Processors. This area must
+ * always be located in the top 64 bytes of the 4KiB reserved region.
+ *
+ * +-----------------------+ 4096
+ * | |
+ * 64B | AP Context Area |
+ * | |
+ * +-----------------------+
+ * | |
+ * 256B | Unused |
+ * | |
+ * +-----------------------+
+ * | |
+ * | SCMI Sec. Payload |
+ * 128B | Platform to Agent |
+ * | |
+ * +-----------------------+
+ * | |
+ * 128B | SCMI Sec. Payload |
+ * | Agent to Platform |
+ * | |
+ * +-----------------------+
+ * | |
+ * 3520B | SDS Memory Region |
+ * | |
+ * +-----------------------+ 0
+ */
+
+/* Secure shared memory at the base of Trusted SRAM */
+#define SHARED_SECURE_BASE (TRUSTED_RAM_BASE)
+#define SHARED_SECURE_SIZE (4 * FWK_KIB)
+
+/* SDS Memory Region */
+#define SDS_MEM_BASE (SHARED_SECURE_BASE)
+#if 0 /* Tentative */
+# define SDS_MEM_SIZE (3520)
+#else
+# define SDS_MEM_SIZE (0)
+#endif
+
+/* AP Context Area */
+#define AP_CONTEXT_BASE \
+ (SHARED_SECURE_BASE + SHARED_SECURE_SIZE - AP_CONTEXT_SIZE)
+#define AP_CONTEXT_SIZE (64)
+
+/* SCMI Secure Payload Areas */
+#define SCMI_PAYLOAD_SIZE (128)
+#define SCMI_PAYLOAD_S_A2P_BASE (SDS_MEM_BASE + SDS_MEM_SIZE)
+#define SCMI_PAYLOAD_S_P2A_BASE (SCMI_PAYLOAD_S_A2P_BASE + SCMI_PAYLOAD_SIZE)
+
+/*
+ * The 4KiB AP/SCP Shared memory at the base of Non-trusted SRAM is used for the
+ * SCMI non-secure payload areas.
+ *
+ * Two SCMI non-Secure Payload Areas: Storage for SCMI message contents in both
+ * the Agent->Platform and Platform->Agent directions.
+ *
+ * +-----------------------+ 4096
+ * 3584B | Unused |
+ * +-----------------------+
+ * | |
+ * | Non-Sec. Channel 1 |
+ * | SCMI non-Sec. Payload |
+ * 128B | Platform to Agent |
+ * | |
+ * +-----------------------+
+ * | |
+ * | Non-Sec. Channel 1 |
+ * 128B | SCMI non-Sec. Payload |
+ * | Agent to Platform |
+ * | |
+ * +-----------------------+
+ * | |
+ * | Non-Sec. Channel 0 |
+ * | SCMI non-Sec. Payload |
+ * 128B | Platform to Agent |
+ * | |
+ * +-----------------------+
+ * | |
+ * | Non-Sec. Channel 0 |
+ * 128B | SCMI non-Sec. Payload |
+ * | Agent to Platform |
+ * | |
+ * +-----------------------+ 0
+ */
+
+/* Non-secure shared memory at the base of Non-trusted SRAM */
+#define SHARED_NONSECURE_BASE (NONTRUSTED_RAM_BASE)
+#define SHARED_NONSECURE_SIZE (4 * FWK_KIB)
+
+/* SCMI Non-Secure Payload Areas */
+#define SCMI_PAYLOAD0_NS_A2P_BASE (SHARED_NONSECURE_BASE)
+#define SCMI_PAYLOAD0_NS_P2A_BASE \
+ (SCMI_PAYLOAD0_NS_A2P_BASE + SCMI_PAYLOAD_SIZE)
+#define SCMI_PAYLOAD1_NS_A2P_BASE \
+ (SCMI_PAYLOAD0_NS_P2A_BASE + SCMI_PAYLOAD_SIZE)
+#define SCMI_PAYLOAD1_NS_P2A_BASE \
+ (SCMI_PAYLOAD1_NS_A2P_BASE + SCMI_PAYLOAD_SIZE)
+
+#endif /* SOFTWARE_MMAP_H */
diff --git a/product/rcar/include/system_clock.h b/product/rcar/include/system_clock.h
new file mode 100644
index 00000000..444c2419
--- /dev/null
+++ b/product/rcar/include/system_clock.h
@@ -0,0 +1,28 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SYSTEM_CLOCK_H
+#define SYSTEM_CLOCK_H
+
+#include <fwk_macros.h>
+
+/*!
+ * \brief Calculates the necessary divider for obtaining a target frequency
+ * from a given clock.
+ *
+ * \param CLOCK_RATE The tick rate of the clock to be divided.
+ *
+ * \param TARGET_FREQ The target frequency to be obtained by the division.
+ *
+ * \return The divider needed to obtain TARGET_FREQ from CLOCK_RATE.
+ */
+#define DIV_FROM_CLOCK(CLOCK_RATE, TARGET_FREQ) ((CLOCK_RATE) / (TARGET_FREQ))
+
+#define CLOCK_RATE_REFCLK (50UL * FWK_MHZ)
+#define CLOCK_RATE_SYSPLLCLK (2000UL * FWK_MHZ)
+
+#endif /* SYSTEM_CLOCK_H */
diff --git a/product/rcar/include/system_mmap.h b/product/rcar/include/system_mmap.h
new file mode 100644
index 00000000..85af2398
--- /dev/null
+++ b/product/rcar/include/system_mmap.h
@@ -0,0 +1,21 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SYSTEM_MMAP_H
+#define SYSTEM_MMAP_H
+
+#include <rcar_mmap.h>
+
+#define BOARD_UART1_BASE (PERIPHERAL_BASE + 0xe68000)
+#define BOARD_UART2_BASE (PERIPHERAL_BASE + 0xe88000)
+
+#define SENSOR_SOC_TEMP1 (PERIPHERAL_BASE + 0x198000)
+#define SENSOR_SOC_TEMP2 (PERIPHERAL_BASE + 0x1a0000)
+#define SENSOR_SOC_TEMP3 (PERIPHERAL_BASE + 0x1a8000)
+#define SENSOR_SOC_TEMP (SENSOR_SOC_TEMP1)
+
+#endif /* SYSTEM_MMAP_H */
diff --git a/product/rcar/include/system_mmap_scp.h b/product/rcar/include/system_mmap_scp.h
new file mode 100644
index 00000000..d87281d4
--- /dev/null
+++ b/product/rcar/include/system_mmap_scp.h
@@ -0,0 +1,16 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SYSTEM_MMAP_SCP_H
+#define SYSTEM_MMAP_SCP_H
+
+#include <rcar_mmap_scp.h>
+
+#define SCP_ROM_SIZE (64 * 1024) /* for SCP romfw */
+#define SCP_RAM_SIZE (256 * 1024) /* for SCP ramfw */
+
+#endif /* SYSTEM_MMAP_SCP_H */
diff --git a/product/rcar/module/rcar_clock/include/mod_rcar_clock.h b/product/rcar/module/rcar_clock/include/mod_rcar_clock.h
index 1bbc1bd5..188fdf73 100644
--- a/product/rcar/module/rcar_clock/include/mod_rcar_clock.h
+++ b/product/rcar/module/rcar_clock/include/mod_rcar_clock.h
@@ -9,6 +9,7 @@
#define MOD_RCAR_CLOCK_H
#include <rcar_mmap.h>
+#include <utils_def.h>
#include <mod_clock.h>
diff --git a/product/rcar/module/rcar_power_domain/include/mod_rcar_power_domain.h b/product/rcar/module/rcar_power_domain/include/mod_rcar_power_domain.h
index 03b8e7e3..e49dc6cb 100644
--- a/product/rcar/module/rcar_power_domain/include/mod_rcar_power_domain.h
+++ b/product/rcar/module/rcar_power_domain/include/mod_rcar_power_domain.h
@@ -857,8 +857,8 @@ static const fwk_id_t mod_pd_notification_id_power_state_pre_transition =
* @cond
*/
-#define CPU_PWR_OFF U(0x00000003)
-#define MODE_L2_DOWN (0x00000002U)
+#define CPU_PWR_OFF UINT32_C(0x00000003)
+#define MODE_L2_DOWN UINT32_C(0x00000002)
/*
* Module and power domain contexts
diff --git a/product/rcar/module/rcar_reg_sensor/include/mod_rcar_reg_sensor.h b/product/rcar/module/rcar_reg_sensor/include/mod_rcar_reg_sensor.h
index 5c57fec5..02a372f3 100644
--- a/product/rcar/module/rcar_reg_sensor/include/mod_rcar_reg_sensor.h
+++ b/product/rcar/module/rcar_reg_sensor/include/mod_rcar_reg_sensor.h
@@ -8,6 +8,8 @@
#ifndef MOD_RCAR_REG_SENSOR_H
#define MOD_RCAR_REG_SENSOR_H
+#include <utils_def.h>
+
#include <mod_sensor.h>
#include <stdint.h>
diff --git a/product/rcar/product.mk b/product/rcar/product.mk
new file mode 100644
index 00000000..b5f86297
--- /dev/null
+++ b/product/rcar/product.mk
@@ -0,0 +1,9 @@
+#
+# Renesas SCP/MCP Software
+# Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BS_PRODUCT_NAME := rcar
+BS_FIRMWARE_LIST := scp_ramfw
diff --git a/product/rcar/scp_ramfw/FreeRTOSConfig.h b/product/rcar/scp_ramfw/FreeRTOSConfig.h
new file mode 100644
index 00000000..0400d223
--- /dev/null
+++ b/product/rcar/scp_ramfw/FreeRTOSConfig.h
@@ -0,0 +1,49 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FREERTOS_CONFIG_H
+#define FREERTOS_CONFIG_H
+
+#define configUSE_IDLE_HOOK 1
+#define configUSE_TICK_HOOK 0
+#define configMINIMAL_STACK_SIZE ((unsigned short)200)
+#define configTOTAL_HEAP_SIZE ((size_t)(20 * 1024))
+
+/* Software timer definitions. */
+#define configUSE_TIMERS 1
+#define configTIMER_TASK_PRIORITY (configMAX_PRIORITIES - 1)
+#define configTIMER_QUEUE_LENGTH 5
+#define configTIMER_TASK_STACK_DEPTH (configMINIMAL_STACK_SIZE * 2)
+
+/* Set the following definitions to 1 to include the API function, or zero
+to exclude the API function. */
+#define INCLUDE_vTaskDelay 1
+
+#define INCLUDE_xSemaphoreGetMutexHolder 1
+
+void vConfigureTickInterrupt(void);
+#define configSETUP_TICK_INTERRUPT() vConfigureTickInterrupt()
+void vClearTickInterrupt(void);
+#define configCLEAR_TICK_INTERRUPT() vClearTickInterrupt()
+
+/* Defines needed by FreeRTOS to implement CMSIS RTOS2 API. Do not change! */
+#define configTICK_RATE_HZ ((TickType_t)1000 /* 1ms */)
+#define configSUPPORT_STATIC_ALLOCATION 1
+#define configSUPPORT_DYNAMIC_ALLOCATION 1
+#define configUSE_PREEMPTION 0
+#define configUSE_TIMERS 1
+#define configUSE_MUTEXES 1
+#define configUSE_RECURSIVE_MUTEXES 1
+#define configUSE_COUNTING_SEMAPHORES 1
+#define configUSE_TASK_NOTIFICATIONS 1
+#define configUSE_TRACE_FACILITY 1
+#define configUSE_16_BIT_TICKS 0
+#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
+#define configMAX_PRIORITIES 56
+#define configUSE_TASK_NOTIFICATIONS 1
+#define configUSE_OS2_THREAD_FLAGS 1
+#endif /* FREERTOS_CONFIG_H */
diff --git a/product/rcar/scp_ramfw/clock_devices.h b/product/rcar/scp_ramfw/clock_devices.h
new file mode 100644
index 00000000..77c04038
--- /dev/null
+++ b/product/rcar/scp_ramfw/clock_devices.h
@@ -0,0 +1,208 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCK_DEVICES_H
+#define CLOCK_DEVICES_H
+
+/*!
+ * \brief Clock device indexes.
+ */
+enum clock_dev_idx {
+ CLOCK_DEV_IDX_BIG,
+ CLOCK_DEV_IDX_LITTLE,
+ CLOCK_DEV_IDX_ZTR,
+ CLOCK_DEV_IDX_ZTRD2,
+ CLOCK_DEV_IDX_ZT,
+ CLOCK_DEV_IDX_ZX,
+ CLOCK_DEV_IDX_S0D1,
+ CLOCK_DEV_IDX_S0D2,
+ CLOCK_DEV_IDX_S0D3,
+ CLOCK_DEV_IDX_S0D4,
+ CLOCK_DEV_IDX_S0D6,
+ CLOCK_DEV_IDX_S0D8,
+ CLOCK_DEV_IDX_S0D12,
+ CLOCK_DEV_IDX_S1D1,
+ CLOCK_DEV_IDX_S1D2,
+ CLOCK_DEV_IDX_S1D4,
+ CLOCK_DEV_IDX_S2D1,
+ CLOCK_DEV_IDX_S2D2,
+ CLOCK_DEV_IDX_S2D4,
+ CLOCK_DEV_IDX_S3D1,
+ CLOCK_DEV_IDX_S3D2,
+ CLOCK_DEV_IDX_S3D4,
+ CLOCK_DEV_IDX_SD0,
+ CLOCK_DEV_IDX_SD1,
+ CLOCK_DEV_IDX_SD2,
+ CLOCK_DEV_IDX_SD3,
+ CLOCK_DEV_IDX_CL,
+ CLOCK_DEV_IDX_CR,
+ CLOCK_DEV_IDX_CP,
+ CLOCK_DEV_IDX_CPEX,
+ CLOCK_DEV_IDX_CANFD,
+ CLOCK_DEV_IDX_CSI0,
+ CLOCK_DEV_IDX_MSO,
+ CLOCK_DEV_IDX_HDMI,
+ CLOCK_DEV_IDX_OSC,
+ CLOCK_DEV_IDX_R,
+ CLOCK_DEV_IDX_S0,
+ CLOCK_DEV_IDX_S1,
+ CLOCK_DEV_IDX_S2,
+ CLOCK_DEV_IDX_S3,
+ CLOCK_DEV_IDX_SDSRC,
+ CLOCK_DEV_IDX_RINT,
+ CLOCK_DEV_IDX_FDP1_1,
+ CLOCK_DEV_IDX_FDP1_0,
+ CLOCK_DEV_IDX_SCIF5,
+ CLOCK_DEV_IDX_SCIF4,
+ CLOCK_DEV_IDX_SCIF3,
+ CLOCK_DEV_IDX_SCIF1,
+ CLOCK_DEV_IDX_SCIF0,
+ CLOCK_DEV_IDX_MSIOF3,
+ CLOCK_DEV_IDX_MSIOF2,
+ CLOCK_DEV_IDX_MSIOF1,
+ CLOCK_DEV_IDX_MSIOF0,
+ CLOCK_DEV_IDX_SYS_DMAC2,
+ CLOCK_DEV_IDX_SYS_DMAC1,
+ CLOCK_DEV_IDX_SYS_DMAC0,
+ CLOCK_DEV_IDX_SCEG_PUB,
+ CLOCK_DEV_IDX_CMT3,
+ CLOCK_DEV_IDX_CMT2,
+ CLOCK_DEV_IDX_CMT1,
+ CLOCK_DEV_IDX_CMT0,
+ CLOCK_DEV_IDX_TPU0,
+ CLOCK_DEV_IDX_SCIF2,
+ CLOCK_DEV_IDX_SDIF3,
+ CLOCK_DEV_IDX_SDIF2,
+ CLOCK_DEV_IDX_SDIF1,
+ CLOCK_DEV_IDX_SDIF0,
+ CLOCK_DEV_IDX_PCIE1,
+ CLOCK_DEV_IDX_PCIE0,
+ CLOCK_DEV_IDX_USB_DMAC30,
+ CLOCK_DEV_IDX_USB3_IF0,
+ CLOCK_DEV_IDX_USB_DMAC31,
+ CLOCK_DEV_IDX_USB_DMAC0,
+ CLOCK_DEV_IDX_USB_DMAC1,
+ CLOCK_DEV_IDX_RWDT,
+ CLOCK_DEV_IDX_INTC_EX,
+ CLOCK_DEV_IDX_INTC_AP,
+ CLOCK_DEV_IDX_AUDMAC1,
+ CLOCK_DEV_IDX_AUDMAC0,
+ CLOCK_DEV_IDX_DRIF31,
+ CLOCK_DEV_IDX_DRIF30,
+ CLOCK_DEV_IDX_DRIF21,
+ CLOCK_DEV_IDX_DRIF20,
+ CLOCK_DEV_IDX_DRIF11,
+ CLOCK_DEV_IDX_DRIF10,
+ CLOCK_DEV_IDX_DRIF01,
+ CLOCK_DEV_IDX_DRIF00,
+ CLOCK_DEV_IDX_HSCIF4,
+ CLOCK_DEV_IDX_HSCIF3,
+ CLOCK_DEV_IDX_HSCIF2,
+ CLOCK_DEV_IDX_HSCIF1,
+ CLOCK_DEV_IDX_HSCIF0,
+ CLOCK_DEV_IDX_THERMAL,
+ CLOCK_DEV_IDX_PWM,
+ CLOCK_DEV_IDX_FCPVD2,
+ CLOCK_DEV_IDX_FCPVD1,
+ CLOCK_DEV_IDX_FCPVD0,
+ CLOCK_DEV_IDX_FCPVB1,
+ CLOCK_DEV_IDX_FCPVB0,
+ CLOCK_DEV_IDX_FCPVI1,
+ CLOCK_DEV_IDX_FCPVI0,
+ CLOCK_DEV_IDX_FCPF1,
+ CLOCK_DEV_IDX_FCPF0,
+ CLOCK_DEV_IDX_FCPCS,
+ CLOCK_DEV_IDX_VSPD2,
+ CLOCK_DEV_IDX_VSPD1,
+ CLOCK_DEV_IDX_VSPD0,
+ CLOCK_DEV_IDX_VSPBC,
+ CLOCK_DEV_IDX_VSPBD,
+ CLOCK_DEV_IDX_VSPI1,
+ CLOCK_DEV_IDX_VSPI0,
+ CLOCK_DEV_IDX_EHCI3,
+ CLOCK_DEV_IDX_EHCI2,
+ CLOCK_DEV_IDX_EHCI1,
+ CLOCK_DEV_IDX_EHCI0,
+ CLOCK_DEV_IDX_HSUSB,
+ CLOCK_DEV_IDX_HSUSB3,
+ CLOCK_DEV_IDX_CMM3,
+ CLOCK_DEV_IDX_CMM2,
+ CLOCK_DEV_IDX_CMM1,
+ CLOCK_DEV_IDX_CMM0,
+ CLOCK_DEV_IDX_CSI20,
+ CLOCK_DEV_IDX_CSI41,
+ CLOCK_DEV_IDX_CSI40,
+ CLOCK_DEV_IDX_DU3,
+ CLOCK_DEV_IDX_DU2,
+ CLOCK_DEV_IDX_DU1,
+ CLOCK_DEV_IDX_DU0,
+ CLOCK_DEV_IDX_LVDS,
+ CLOCK_DEV_IDX_HDMI1,
+ CLOCK_DEV_IDX_HDMI0,
+ CLOCK_DEV_IDX_VIN7,
+ CLOCK_DEV_IDX_VIN6,
+ CLOCK_DEV_IDX_VIN5,
+ CLOCK_DEV_IDX_VIN4,
+ CLOCK_DEV_IDX_VIN3,
+ CLOCK_DEV_IDX_VIN2,
+ CLOCK_DEV_IDX_VIN1,
+ CLOCK_DEV_IDX_VIN0,
+ CLOCK_DEV_IDX_ETHERAVB,
+ CLOCK_DEV_IDX_SATA0,
+ CLOCK_DEV_IDX_IMR3,
+ CLOCK_DEV_IDX_IMR2,
+ CLOCK_DEV_IDX_IMR1,
+ CLOCK_DEV_IDX_IMR0,
+ CLOCK_DEV_IDX_GPIO7,
+ CLOCK_DEV_IDX_GPIO6,
+ CLOCK_DEV_IDX_GPIO5,
+ CLOCK_DEV_IDX_GPIO4,
+ CLOCK_DEV_IDX_GPIO3,
+ CLOCK_DEV_IDX_GPIO2,
+ CLOCK_DEV_IDX_GPIO1,
+ CLOCK_DEV_IDX_GPIO0,
+ CLOCK_DEV_IDX_CAN_FD,
+ CLOCK_DEV_IDX_CAN_IF1,
+ CLOCK_DEV_IDX_CAN_IF0,
+ CLOCK_DEV_IDX_I2C6,
+ CLOCK_DEV_IDX_I2C5,
+ CLOCK_DEV_IDX_I2C_DVFS,
+ CLOCK_DEV_IDX_I2C4,
+ CLOCK_DEV_IDX_I2C3,
+ CLOCK_DEV_IDX_I2C2,
+ CLOCK_DEV_IDX_I2C1,
+ CLOCK_DEV_IDX_I2C0,
+ CLOCK_DEV_IDX_SSI_ALL,
+ CLOCK_DEV_IDX_SSI9,
+ CLOCK_DEV_IDX_SSI8,
+ CLOCK_DEV_IDX_SSI7,
+ CLOCK_DEV_IDX_SSI6,
+ CLOCK_DEV_IDX_SSI5,
+ CLOCK_DEV_IDX_SSI4,
+ CLOCK_DEV_IDX_SSI3,
+ CLOCK_DEV_IDX_SSI2,
+ CLOCK_DEV_IDX_SSI1,
+ CLOCK_DEV_IDX_SSI0,
+ CLOCK_DEV_IDX_SCU_ALL,
+ CLOCK_DEV_IDX_SCU_DVC1,
+ CLOCK_DEV_IDX_SCU_DVC0,
+ CLOCK_DEV_IDX_SCU_CTU0_MIX1,
+ CLOCK_DEV_IDX_SCU_CTU0_MIX0,
+ CLOCK_DEV_IDX_SCU_SRC9,
+ CLOCK_DEV_IDX_SCU_SRC8,
+ CLOCK_DEV_IDX_SCU_SRC7,
+ CLOCK_DEV_IDX_SCU_SRC6,
+ CLOCK_DEV_IDX_SCU_SRC5,
+ CLOCK_DEV_IDX_SCU_SRC4,
+ CLOCK_DEV_IDX_SCU_SRC3,
+ CLOCK_DEV_IDX_SCU_SRC2,
+ CLOCK_DEV_IDX_SCU_SRC1,
+ CLOCK_DEV_IDX_SCU_SRC0,
+ CLOCK_DEV_IDX_COUNT
+};
+
+#endif /* CLOCK_DEVICES_H */
diff --git a/product/rcar/scp_ramfw/clock_mstp_devices.h b/product/rcar/scp_ramfw/clock_mstp_devices.h
new file mode 100644
index 00000000..95c162b4
--- /dev/null
+++ b/product/rcar/scp_ramfw/clock_mstp_devices.h
@@ -0,0 +1,169 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCK_MSTP_DEVICES_H
+#define CLOCK_MSTP_DEVICES_H
+
+/*!
+ * \brief Clock device indexes.
+ */
+enum clock_mstp_dev_idx {
+ CLOCK_MSTP_DEV_IDX_FDP1_1,
+ CLOCK_MSTP_DEV_IDX_FDP1_0,
+ CLOCK_MSTP_DEV_IDX_SCIF5,
+ CLOCK_MSTP_DEV_IDX_SCIF4,
+ CLOCK_MSTP_DEV_IDX_SCIF3,
+ CLOCK_MSTP_DEV_IDX_SCIF1,
+ CLOCK_MSTP_DEV_IDX_SCIF0,
+ CLOCK_MSTP_DEV_IDX_MSIOF3,
+ CLOCK_MSTP_DEV_IDX_MSIOF2,
+ CLOCK_MSTP_DEV_IDX_MSIOF1,
+ CLOCK_MSTP_DEV_IDX_MSIOF0,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC2,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC1,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC0,
+ CLOCK_MSTP_DEV_IDX_SCEG_PUB,
+ CLOCK_MSTP_DEV_IDX_CMT3,
+ CLOCK_MSTP_DEV_IDX_CMT2,
+ CLOCK_MSTP_DEV_IDX_CMT1,
+ CLOCK_MSTP_DEV_IDX_CMT0,
+ CLOCK_MSTP_DEV_IDX_TPU0,
+ CLOCK_MSTP_DEV_IDX_SCIF2,
+ CLOCK_MSTP_DEV_IDX_SDIF3,
+ CLOCK_MSTP_DEV_IDX_SDIF2,
+ CLOCK_MSTP_DEV_IDX_SDIF1,
+ CLOCK_MSTP_DEV_IDX_SDIF0,
+ CLOCK_MSTP_DEV_IDX_PCIE1,
+ CLOCK_MSTP_DEV_IDX_PCIE0,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC30,
+ CLOCK_MSTP_DEV_IDX_USB3_IF0,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC31,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC0,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC1,
+ CLOCK_MSTP_DEV_IDX_RWDT,
+ CLOCK_MSTP_DEV_IDX_INTC_EX,
+ CLOCK_MSTP_DEV_IDX_INTC_AP,
+ CLOCK_MSTP_DEV_IDX_AUDMAC1,
+ CLOCK_MSTP_DEV_IDX_AUDMAC0,
+ CLOCK_MSTP_DEV_IDX_DRIF31,
+ CLOCK_MSTP_DEV_IDX_DRIF30,
+ CLOCK_MSTP_DEV_IDX_DRIF21,
+ CLOCK_MSTP_DEV_IDX_DRIF20,
+ CLOCK_MSTP_DEV_IDX_DRIF11,
+ CLOCK_MSTP_DEV_IDX_DRIF10,
+ CLOCK_MSTP_DEV_IDX_DRIF01,
+ CLOCK_MSTP_DEV_IDX_DRIF00,
+ CLOCK_MSTP_DEV_IDX_HSCIF4,
+ CLOCK_MSTP_DEV_IDX_HSCIF3,
+ CLOCK_MSTP_DEV_IDX_HSCIF2,
+ CLOCK_MSTP_DEV_IDX_HSCIF1,
+ CLOCK_MSTP_DEV_IDX_HSCIF0,
+ CLOCK_MSTP_DEV_IDX_THERMAL,
+ CLOCK_MSTP_DEV_IDX_PWM,
+ CLOCK_MSTP_DEV_IDX_FCPVD2,
+ CLOCK_MSTP_DEV_IDX_FCPVD1,
+ CLOCK_MSTP_DEV_IDX_FCPVD0,
+ CLOCK_MSTP_DEV_IDX_FCPVB1,
+ CLOCK_MSTP_DEV_IDX_FCPVB0,
+ CLOCK_MSTP_DEV_IDX_FCPVI1,
+ CLOCK_MSTP_DEV_IDX_FCPVI0,
+ CLOCK_MSTP_DEV_IDX_FCPF1,
+ CLOCK_MSTP_DEV_IDX_FCPF0,
+ CLOCK_MSTP_DEV_IDX_FCPCS,
+ CLOCK_MSTP_DEV_IDX_VSPD2,
+ CLOCK_MSTP_DEV_IDX_VSPD1,
+ CLOCK_MSTP_DEV_IDX_VSPD0,
+ CLOCK_MSTP_DEV_IDX_VSPBC,
+ CLOCK_MSTP_DEV_IDX_VSPBD,
+ CLOCK_MSTP_DEV_IDX_VSPI1,
+ CLOCK_MSTP_DEV_IDX_VSPI0,
+ CLOCK_MSTP_DEV_IDX_EHCI3,
+ CLOCK_MSTP_DEV_IDX_EHCI2,
+ CLOCK_MSTP_DEV_IDX_EHCI1,
+ CLOCK_MSTP_DEV_IDX_EHCI0,
+ CLOCK_MSTP_DEV_IDX_HSUSB,
+ CLOCK_MSTP_DEV_IDX_HSUSB3,
+ CLOCK_MSTP_DEV_IDX_CMM3,
+ CLOCK_MSTP_DEV_IDX_CMM2,
+ CLOCK_MSTP_DEV_IDX_CMM1,
+ CLOCK_MSTP_DEV_IDX_CMM0,
+ CLOCK_MSTP_DEV_IDX_CSI20,
+ CLOCK_MSTP_DEV_IDX_CSI41,
+ CLOCK_MSTP_DEV_IDX_CSI40,
+ CLOCK_MSTP_DEV_IDX_DU3,
+ CLOCK_MSTP_DEV_IDX_DU2,
+ CLOCK_MSTP_DEV_IDX_DU1,
+ CLOCK_MSTP_DEV_IDX_DU0,
+ CLOCK_MSTP_DEV_IDX_LVDS,
+ CLOCK_MSTP_DEV_IDX_HDMI1,
+ CLOCK_MSTP_DEV_IDX_HDMI0,
+ CLOCK_MSTP_DEV_IDX_VIN7,
+ CLOCK_MSTP_DEV_IDX_VIN6,
+ CLOCK_MSTP_DEV_IDX_VIN5,
+ CLOCK_MSTP_DEV_IDX_VIN4,
+ CLOCK_MSTP_DEV_IDX_VIN3,
+ CLOCK_MSTP_DEV_IDX_VIN2,
+ CLOCK_MSTP_DEV_IDX_VIN1,
+ CLOCK_MSTP_DEV_IDX_VIN0,
+ CLOCK_MSTP_DEV_IDX_ETHERAVB,
+ CLOCK_MSTP_DEV_IDX_SATA0,
+ CLOCK_MSTP_DEV_IDX_IMR3,
+ CLOCK_MSTP_DEV_IDX_IMR2,
+ CLOCK_MSTP_DEV_IDX_IMR1,
+ CLOCK_MSTP_DEV_IDX_IMR0,
+ CLOCK_MSTP_DEV_IDX_GPIO7,
+ CLOCK_MSTP_DEV_IDX_GPIO6,
+ CLOCK_MSTP_DEV_IDX_GPIO5,
+ CLOCK_MSTP_DEV_IDX_GPIO4,
+ CLOCK_MSTP_DEV_IDX_GPIO3,
+ CLOCK_MSTP_DEV_IDX_GPIO2,
+ CLOCK_MSTP_DEV_IDX_GPIO1,
+ CLOCK_MSTP_DEV_IDX_GPIO0,
+ CLOCK_MSTP_DEV_IDX_CAN_FD,
+ CLOCK_MSTP_DEV_IDX_CAN_IF1,
+ CLOCK_MSTP_DEV_IDX_CAN_IF0,
+ CLOCK_MSTP_DEV_IDX_I2C6,
+ CLOCK_MSTP_DEV_IDX_I2C5,
+ CLOCK_MSTP_DEV_IDX_I2C_DVFS,
+ CLOCK_MSTP_DEV_IDX_I2C4,
+ CLOCK_MSTP_DEV_IDX_I2C3,
+ CLOCK_MSTP_DEV_IDX_I2C2,
+ CLOCK_MSTP_DEV_IDX_I2C1,
+ CLOCK_MSTP_DEV_IDX_I2C0,
+ CLOCK_MSTP_DEV_IDX_SSI_ALL,
+ CLOCK_MSTP_DEV_IDX_SSI9,
+ CLOCK_MSTP_DEV_IDX_SSI8,
+ CLOCK_MSTP_DEV_IDX_SSI7,
+ CLOCK_MSTP_DEV_IDX_SSI6,
+ CLOCK_MSTP_DEV_IDX_SSI5,
+ CLOCK_MSTP_DEV_IDX_SSI4,
+ CLOCK_MSTP_DEV_IDX_SSI3,
+ CLOCK_MSTP_DEV_IDX_SSI2,
+ CLOCK_MSTP_DEV_IDX_SSI1,
+ CLOCK_MSTP_DEV_IDX_SSI0,
+ CLOCK_MSTP_DEV_IDX_SCU_ALL,
+ CLOCK_MSTP_DEV_IDX_SCU_DVC1,
+ CLOCK_MSTP_DEV_IDX_SCU_DVC0,
+ CLOCK_MSTP_DEV_IDX_SCU_CTU0_MIX1,
+ CLOCK_MSTP_DEV_IDX_SCU_CTU0_MIX0,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC9,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC8,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC7,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC6,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC5,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC4,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC3,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC2,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC1,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC0,
+ CLOCK_MSTP_DEV_IDX_COUNT
+};
+
+#define CLK_ID_MSTP_START CLOCK_MSTP_DEV_IDX_FDP1_1
+#define CLK_ID_MSTP_END CLOCK_MSTP_DEV_IDX_COUNT
+
+#endif /* CLOCK_MSTP_DEVICES_H */
diff --git a/product/rcar/scp_ramfw/clock_sd_devices.h b/product/rcar/scp_ramfw/clock_sd_devices.h
new file mode 100644
index 00000000..9312dc4e
--- /dev/null
+++ b/product/rcar/scp_ramfw/clock_sd_devices.h
@@ -0,0 +1,75 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCK_SD_DEVICES_H
+#define CLOCK_SD_DEVICES_H
+
+/*!
+ * \brief Clock device indexes.
+ */
+enum clock_sd_parent_idx {
+ CLK_EXTAL,
+ CLK_OSC_EXTAL,
+ CLK_PLL1,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLOCK_PARENT_IDX_COUNT
+};
+
+enum clock_sd_dev_idx {
+ CLOCK_SD_DEV_IDX_ZTR,
+ CLOCK_SD_DEV_IDX_ZTRD2,
+ CLOCK_SD_DEV_IDX_ZT,
+ CLOCK_SD_DEV_IDX_ZX,
+ CLOCK_SD_DEV_IDX_S0D1,
+ CLOCK_SD_DEV_IDX_S0D2,
+ CLOCK_SD_DEV_IDX_S0D3,
+ CLOCK_SD_DEV_IDX_S0D4,
+ CLOCK_SD_DEV_IDX_S0D6,
+ CLOCK_SD_DEV_IDX_S0D8,
+ CLOCK_SD_DEV_IDX_S0D12,
+ CLOCK_SD_DEV_IDX_S1D1,
+ CLOCK_SD_DEV_IDX_S1D2,
+ CLOCK_SD_DEV_IDX_S1D4,
+ CLOCK_SD_DEV_IDX_S2D1,
+ CLOCK_SD_DEV_IDX_S2D2,
+ CLOCK_SD_DEV_IDX_S2D4,
+ CLOCK_SD_DEV_IDX_S3D1,
+ CLOCK_SD_DEV_IDX_S3D2,
+ CLOCK_SD_DEV_IDX_S3D4,
+ CLOCK_SD_DEV_IDX_SD0,
+ CLOCK_SD_DEV_IDX_SD1,
+ CLOCK_SD_DEV_IDX_SD2,
+ CLOCK_SD_DEV_IDX_SD3,
+ CLOCK_SD_DEV_IDX_CL,
+ CLOCK_SD_DEV_IDX_CR,
+ CLOCK_SD_DEV_IDX_CP,
+ CLOCK_SD_DEV_IDX_CPEX,
+ CLOCK_SD_DEV_IDX_CANFD,
+ CLOCK_SD_DEV_IDX_CSI0,
+ CLOCK_SD_DEV_IDX_MSO,
+ CLOCK_SD_DEV_IDX_HDMI,
+ CLOCK_SD_DEV_IDX_OSC,
+ CLOCK_SD_DEV_IDX_R,
+ CLOCK_SD_DEV_IDX_S0,
+ CLOCK_SD_DEV_IDX_S1,
+ CLOCK_SD_DEV_IDX_S2,
+ CLOCK_SD_DEV_IDX_S3,
+ CLOCK_SD_DEV_IDX_SDSRC,
+ CLOCK_SD_DEV_IDX_RINT,
+ CLOCK_SD_DEV_IDX_COUNT
+};
+
+#define CLK_ID_SD_START CLOCK_SD_DEV_IDX_ZTR
+#define CLK_ID_SD_END CLOCK_SD_DEV_IDX_COUNT
+
+#endif /* CLOCK_SD_DEVICES_H */
diff --git a/product/rcar/scp_ramfw/config_clock.c b/product/rcar/scp_ramfw/config_clock.c
new file mode 100644
index 00000000..e6a4e889
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_clock.c
@@ -0,0 +1,1765 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <clock_devices.h>
+#include <clock_mstp_devices.h>
+#include <clock_sd_devices.h>
+#include <config_rcar_power_domain.h>
+#include <rcar_core.h>
+
+#include <mod_clock.h>
+#include <mod_rcar_clock.h>
+#include <mod_rcar_power_domain.h>
+
+#include <fwk_element.h>
+#include <fwk_module.h>
+#include <fwk_module_idx.h>
+
+#include <stddef.h>
+
+static struct fwk_element clock_dev_desc_table[] = {
+ [CLOCK_DEV_IDX_BIG] = {
+ .name = "CPU_GROUP_BIG",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_CLOCK, 0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_LITTLE] = {
+ .name = "CPU_GROUP_LITTLE",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_CLOCK, 1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_ZTR] = {
+ .name = "ztr",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_ZTR),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_ZTRD2] = {
+ .name = "ztrd2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_ZTRD2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_ZT] = {
+ .name = "zt",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_ZT),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_ZX] = {
+ .name = "zx",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_ZX),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D1] = {
+ .name = "s0d1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D2] = {
+ .name = "s0d2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D3] = {
+ .name = "s0d3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D4] = {
+ .name = "s0d4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D6] = {
+ .name = "s0d6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D8] = {
+ .name = "s0d8",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D8),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0D12] = {
+ .name = "s0d12",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0D12),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S1D1] = {
+ .name = "s1d1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S1D1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S1D2] = {
+ .name = "s1d2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S1D2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S1D4] = {
+ .name = "s1d4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S1D4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S2D1] = {
+ .name = "s2d1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S2D1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S2D2] = {
+ .name = "s2d2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S2D2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S2D4] = {
+ .name = "s2d4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S2D4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S3D1] = {
+ .name = "s3d1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S3D1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S3D2] = {
+ .name = "s3d2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S3D2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S3D4] = {
+ .name = "s3d4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S3D4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SD0] = {
+ .name = "sd0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_SD0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SD1] = {
+ .name = "sd1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_SD1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SD2] = {
+ .name = "sd2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_SD2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SD3] = {
+ .name = "sd3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_SD3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CL] = {
+ .name = "cl",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CL),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CR] = {
+ .name = "cr",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CR),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CP] = {
+ .name = "cp",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CP),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CPEX] = {
+ .name = "cpex",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CPEX),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CANFD] = {
+ .name = "canfd",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CANFD),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CSI0] = {
+ .name = "csi0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_CSI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_MSO] = {
+ .name = "mso",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_MSO),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HDMI] = {
+ .name = "hdmi",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_HDMI),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_OSC] = {
+ .name = "osc",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_OSC),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_R] = {
+ .name = "r",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_R),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S0] = {
+ .name = "s0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S1] = {
+ .name = "s1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S2] = {
+ .name = "s2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_S3] = {
+ .name = "s3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_S3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SDSRC] = {
+ .name = "sdsrc",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_SDSRC),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_RINT] = {
+ .name = "rint",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ CLOCK_SD_DEV_IDX_RINT),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_SD_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FDP1_1] = {
+ .name = "fdp1-1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FDP1_1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FDP1_0] = {
+ .name = "fdp1-0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FDP1_0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF5] = {
+ .name = "scif5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF4] = {
+ .name = "scif4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF3] = {
+ .name = "scif3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF1] = {
+ .name = "scif1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF0] = {
+ .name = "scif0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_MSIOF3] = {
+ .name = "msiof3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_MSIOF3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_MSIOF2] = {
+ .name = "msiof2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_MSIOF2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_MSIOF1] = {
+ .name = "msiof1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_MSIOF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_MSIOF0] = {
+ .name = "msiof0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_MSIOF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SYS_DMAC2] = {
+ .name = "sys-dmac2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SYS_DMAC1] = {
+ .name = "sys-dmac1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SYS_DMAC0] = {
+ .name = "sys-dmac0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SYS_DMAC0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCEG_PUB] = {
+ .name = "sceg-pub",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCEG_PUB),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMT3] = {
+ .name = "cmt3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMT3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMT2] = {
+ .name = "cmt2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMT2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMT1] = {
+ .name = "cmt1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMT1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMT0] = {
+ .name = "cmt0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMT0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_TPU0] = {
+ .name = "tpu0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_TPU0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCIF2] = {
+ .name = "scif2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCIF2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SDIF3] = {
+ .name = "sdif3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SDIF3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SDIF2] = {
+ .name = "sdif2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SDIF2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SDIF1] = {
+ .name = "sdif1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SDIF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SDIF0] = {
+ .name = "sdif0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SDIF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_PCIE1] = {
+ .name = "pcie1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_PCIE1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_PCIE0] = {
+ .name = "pcie0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_PCIE0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_USB_DMAC30] = {
+ .name = "usb-dmac30",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC30),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_USB3_IF0] = {
+ .name = "usb3-if0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_USB3_IF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_USB_DMAC31] = {
+ .name = "usb-dmac31",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC31),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_USB_DMAC0] = {
+ .name = "usb-dmac0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_USB_DMAC1] = {
+ .name = "usb-dmac1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_USB_DMAC1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_RWDT] = {
+ .name = "rwdt",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_RWDT),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_INTC_EX] = {
+ .name = "intc-ex",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_INTC_EX),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_INTC_AP] = {
+ .name = "intc-ap",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_INTC_AP),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_AUDMAC1] = {
+ .name = "audmac1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_AUDMAC1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_AUDMAC0] = {
+ .name = "audmac0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_AUDMAC0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF31] = {
+ .name = "drif31",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF31),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF30] = {
+ .name = "drif30",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF30),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF21] = {
+ .name = "drif21",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF21),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF20] = {
+ .name = "drif20",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF20),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF11] = {
+ .name = "drif11",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF11),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF10] = {
+ .name = "drif10",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF10),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF01] = {
+ .name = "drif01",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF01),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DRIF00] = {
+ .name = "drif00",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DRIF00),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSCIF4] = {
+ .name = "hscif4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSCIF4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSCIF3] = {
+ .name = "hscif3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSCIF3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSCIF2] = {
+ .name = "hscif2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSCIF2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSCIF1] = {
+ .name = "hscif1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSCIF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSCIF0] = {
+ .name = "hscif0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSCIF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_THERMAL] = {
+ .name = "thermal",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_THERMAL),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_PWM] = {
+ .name = "pwm",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_PWM),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVD2] = {
+ .name = "fcpvd2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVD2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVD1] = {
+ .name = "fcpvd1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVD1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVD0] = {
+ .name = "fcpvd0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVD0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVB1] = {
+ .name = "fcpvb1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVB1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVB0] = {
+ .name = "fcpvb0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVB0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVI1] = {
+ .name = "fcpvi1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVI1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPVI0] = {
+ .name = "fcpvi0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPVI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPF1] = {
+ .name = "fcpf1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPF0] = {
+ .name = "fcpf0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_FCPCS] = {
+ .name = "fcpcs",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_FCPCS),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPD2] = {
+ .name = "vspd2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPD2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPD1] = {
+ .name = "vspd1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPD1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPD0] = {
+ .name = "vspd0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPD0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPBC] = {
+ .name = "vspbc",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPBC),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPBD] = {
+ .name = "vspbd",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPBD),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPI1] = {
+ .name = "vspi1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPI1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VSPI0] = {
+ .name = "vspi0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VSPI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_EHCI3] = {
+ .name = "ehci3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_EHCI3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_EHCI2] = {
+ .name = "ehci2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_EHCI2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_EHCI1] = {
+ .name = "ehci1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_EHCI1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_EHCI0] = {
+ .name = "ehci0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_EHCI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSUSB] = {
+ .name = "hsusb",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSUSB),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HSUSB3] = {
+ .name = "hsusb3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HSUSB3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMM3] = {
+ .name = "cmm3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMM3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMM2] = {
+ .name = "cmm2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMM2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMM1] = {
+ .name = "cmm1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMM1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CMM0] = {
+ .name = "cmm0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CMM0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CSI20] = {
+ .name = "csi20",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CSI20),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CSI41] = {
+ .name = "csi41",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CSI41),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CSI40] = {
+ .name = "csi40",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CSI40),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DU3] = {
+ .name = "du3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DU3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DU2] = {
+ .name = "du2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DU2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DU1] = {
+ .name = "du1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DU1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_DU0] = {
+ .name = "du0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_DU0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_LVDS] = {
+ .name = "lvds",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_LVDS),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HDMI1] = {
+ .name = "hdmi1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HDMI1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_HDMI0] = {
+ .name = "hdmi0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_HDMI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN7] = {
+ .name = "vin7",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN7),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN6] = {
+ .name = "vin6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN5] = {
+ .name = "vin5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN4] = {
+ .name = "vin4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN3] = {
+ .name = "vin3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN2] = {
+ .name = "vin2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN1] = {
+ .name = "vin1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_VIN0] = {
+ .name = "vin0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_VIN0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_ETHERAVB] = {
+ .name = "etheravb",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_ETHERAVB),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SATA0] = {
+ .name = "sata0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SATA0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_IMR3] = {
+ .name = "imr3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_IMR3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_IMR2] = {
+ .name = "imr2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_IMR2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_IMR1] = {
+ .name = "imr1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_IMR1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_IMR0] = {
+ .name = "imr0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_IMR0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO7] = {
+ .name = "gpio7",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO7),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO6] = {
+ .name = "gpio6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO5] = {
+ .name = "gpio5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO4] = {
+ .name = "gpio4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO3] = {
+ .name = "gpio3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO2] = {
+ .name = "gpio2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO1] = {
+ .name = "gpio1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_GPIO0] = {
+ .name = "gpio0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_GPIO0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CAN_FD] = {
+ .name = "can-fd",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CAN_FD),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CAN_IF1] = {
+ .name = "can-if1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CAN_IF1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_CAN_IF0] = {
+ .name = "can-if0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_CAN_IF0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C6] = {
+ .name = "i2c6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C5] = {
+ .name = "i2c5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C_DVFS] = {
+ .name = "i2c-dvfs",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C_DVFS),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C4] = {
+ .name = "i2c4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C3] = {
+ .name = "i2c3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C2] = {
+ .name = "i2c2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C1] = {
+ .name = "i2c1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_I2C0] = {
+ .name = "i2c0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_I2C0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI_ALL] = {
+ .name = "ssi-all",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI_ALL),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI9] = {
+ .name = "ssi9",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI9),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI8] = {
+ .name = "ssi8",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI8),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI7] = {
+ .name = "ssi7",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI7),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI6] = {
+ .name = "ssi6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI5] = {
+ .name = "ssi5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI4] = {
+ .name = "ssi4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI3] = {
+ .name = "ssi3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI2] = {
+ .name = "ssi2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI1] = {
+ .name = "ssi1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SSI0] = {
+ .name = "ssi0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SSI0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_ALL] = {
+ .name = "scu-all",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_ALL),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_DVC1] = {
+ .name = "scu-dvc1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_DVC1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_DVC0] = {
+ .name = "scu-dvc0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_DVC0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_CTU0_MIX1] = {
+ .name = "scu-ctu1-mix1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_CTU0_MIX1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_CTU0_MIX0] = {
+ .name = "scu-ctu0-mix0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_CTU0_MIX0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC9] = {
+ .name = "scu-src9",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC9),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC8] = {
+ .name = "scu-src8",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC8),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC7] = {
+ .name = "scu-src7",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC7),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC6] = {
+ .name = "scu-src6",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC6),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC5] = {
+ .name = "scu-src5",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC5),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC4] = {
+ .name = "scu-src4",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC4),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC3] = {
+ .name = "scu-src3",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC3),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC2] = {
+ .name = "scu-src2",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC2),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC1] = {
+ .name = "scu-src1",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC1),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_SCU_SRC0] = {
+ .name = "scu-src0",
+ .data = &((struct mod_clock_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ CLOCK_MSTP_DEV_IDX_SCU_SRC0),
+ .api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_RCAR_MSTP_CLOCK,
+ MOD_RCAR_CLOCK_API_TYPE_CLOCK),
+ }),
+ },
+ [CLOCK_DEV_IDX_COUNT] = { 0 }, /* Termination description. */
+};
+
+static const struct fwk_element *clock_get_dev_desc_table(fwk_id_t module_id)
+{
+ unsigned int i;
+ unsigned int core_count;
+ struct mod_clock_dev_config *dev_config;
+
+ core_count = rcar_core_get_count();
+
+ /* Configure all clocks to respond to changes in SYSTOP power state */
+ for (i = 0; i < CLOCK_DEV_IDX_COUNT; i++) {
+ dev_config =
+ (struct mod_clock_dev_config *)clock_dev_desc_table[i].data;
+ dev_config->pd_source_id = FWK_ID_ELEMENT(
+ FWK_MODULE_IDX_RCAR_POWER_DOMAIN,
+ CONFIG_POWER_DOMAIN_CHILD_COUNT + core_count);
+ }
+ return clock_dev_desc_table;
+}
+
+struct fwk_module_config config_clock = {
+ .elements = FWK_MODULE_DYNAMIC_ELEMENTS(clock_get_dev_desc_table),
+ .data = &((struct mod_clock_config){
+ .pd_transition_notification_id = FWK_ID_NOTIFICATION_INIT(
+ FWK_MODULE_IDX_RCAR_POWER_DOMAIN,
+ MOD_PD_NOTIFICATION_IDX_POWER_STATE_TRANSITION),
+ .pd_pre_transition_notification_id = FWK_ID_NOTIFICATION_INIT(
+ FWK_MODULE_IDX_RCAR_POWER_DOMAIN,
+ MOD_PD_NOTIFICATION_IDX_POWER_STATE_PRE_TRANSITION),
+ }),
+};
diff --git a/product/rcar/scp_ramfw/config_scmi.c b/product/rcar/scp_ramfw/config_scmi.c
new file mode 100644
index 00000000..a2de8fe3
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_scmi.c
@@ -0,0 +1,98 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <rcar_scmi.h>
+
+#include <internal/scmi.h>
+
+#include <mod_scmi.h>
+#include <mod_smt.h>
+
+#include <fwk_element.h>
+#include <fwk_id.h>
+#include <fwk_macros.h>
+#include <fwk_module.h>
+#include <fwk_module_idx.h>
+
+static const struct fwk_element element_table[] = {
+ [RCAR_SCMI_SERVICE_IDX_PSCI] = {
+ .name = "PSCI",
+ .data = &(struct mod_scmi_service_config) {
+ .transport_id = FWK_ID_ELEMENT_INIT(
+ FWK_MODULE_IDX_SMT,
+ RCAR_SCMI_SERVICE_IDX_PSCI),
+ .transport_api_id = FWK_ID_API_INIT(
+ FWK_MODULE_IDX_SMT,
+ MOD_SMT_API_IDX_SCMI_TRANSPORT),
+ .transport_notification_init_id =
+ FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT,
+ MOD_SMT_NOTIFICATION_IDX_INITIALIZED),
+ .scmi_agent_id = SCMI_AGENT_ID_PSCI,
+ },
+ },
+
+ [RCAR_SCMI_SERVICE_IDX_OSPM_0] = {
+ .name = "OSPM 0",
+ .data = &(struct mod_scmi_service_config) {
+ .transport_id = FWK_ID_ELEMENT_INIT(
+ FWK_MODULE_IDX_SMT,
+ RCAR_SCMI_SERVICE_IDX_OSPM_0),
+ .transport_api_id = FWK_ID_API_INIT(
+ FWK_MODULE_IDX_SMT,
+ MOD_SMT_API_IDX_SCMI_TRANSPORT),
+ .transport_notification_init_id =
+ FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT,
+ MOD_SMT_NOTIFICATION_IDX_INITIALIZED),
+ .scmi_agent_id = SCMI_AGENT_ID_OSPM,
+ },
+ },
+/*
+ [RCAR_SCMI_SERVICE_IDX_OSPM_1] = {
+ .name = "OSPM 1",
+ .data = &(struct mod_scmi_service_config) {
+ .transport_id = FWK_ID_ELEMENT_INIT(
+ FWK_MODULE_IDX_SMT,
+ RCAR_SCMI_SERVICE_IDX_OSPM_1),
+ .transport_api_id = FWK_ID_API_INIT(
+ FWK_MODULE_IDX_SMT,
+ MOD_SMT_API_IDX_SCMI_TRANSPORT),
+ .transport_notification_init_id =
+ FWK_ID_NOTIFICATION_INIT(FWK_MODULE_IDX_SMT,
+ MOD_SMT_NOTIFICATION_IDX_INITIALIZED),
+ .scmi_agent_id = SCMI_AGENT_ID_OSPM,
+ },
+ },
+*/
+ [RCAR_SCMI_SERVICE_IDX_COUNT] = { 0 },
+};
+
+static const struct fwk_element *get_element_table(fwk_id_t module_id)
+{
+ return element_table;
+}
+
+static const struct mod_scmi_agent agent_table[] = {
+ [SCMI_AGENT_ID_OSPM] = {
+ .type = SCMI_AGENT_TYPE_OSPM,
+ .name = "OSPM",
+ },
+ [SCMI_AGENT_ID_PSCI] = {
+ .type = SCMI_AGENT_TYPE_PSCI,
+ .name = "PSCI",
+ },
+};
+
+struct fwk_module_config config_scmi = {
+ .elements = FWK_MODULE_DYNAMIC_ELEMENTS(get_element_table),
+ .data = &((struct mod_scmi_config){
+ .protocol_count_max = 9,
+ .agent_count = FWK_ARRAY_SIZE(agent_table) - 1,
+ .agent_table = agent_table,
+ .vendor_identifier = "arm",
+ .sub_vendor_identifier = "arm",
+ }),
+};
diff --git a/product/rcar/scp_ramfw/config_scmi_apcore.c b/product/rcar/scp_ramfw/config_scmi_apcore.c
new file mode 100644
index 00000000..8ee97df2
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_scmi_apcore.c
@@ -0,0 +1,30 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <rcar_core.h>
+
+#include <mod_scmi_apcore.h>
+
+#include <fwk_element.h>
+#include <fwk_macros.h>
+#include <fwk_module.h>
+
+static const struct mod_scmi_apcore_reset_register_group
+ reset_reg_group_table[] = {
+ {
+ .base_register = 0,
+ .register_count = RCAR_CORE_PER_CLUSTER_MAX,
+ },
+ };
+
+const struct fwk_module_config config_scmi_apcore = {
+ .data = &((struct mod_scmi_apcore_config){
+ .reset_register_width = MOD_SCMI_APCORE_REG_WIDTH_64,
+ .reset_register_group_count = FWK_ARRAY_SIZE(reset_reg_group_table),
+ .reset_register_group_table = &reset_reg_group_table[0],
+ }),
+};
diff --git a/product/rcar/scp_ramfw/config_scmi_clock.c b/product/rcar/scp_ramfw/config_scmi_clock.c
new file mode 100644
index 00000000..48d1340a
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_scmi_clock.c
@@ -0,0 +1,980 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <clock_devices.h>
+#include <rcar_scmi.h>
+
+#include <mod_scmi_clock.h>
+
+#include <fwk_macros.h>
+#include <fwk_module.h>
+#include <fwk_module_idx.h>
+
+static const struct mod_scmi_clock_device agent_device_table_ospm[] = {
+ /* Core Clock */
+ {
+ /* ztr */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_ZTR),
+ },
+ {
+ /* ztrd2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_ZTRD2),
+ },
+ {
+ /* zt */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_ZT),
+ },
+ {
+ /* zx */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_ZX),
+ },
+ {
+ /* s0d1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D1),
+ },
+ {
+ /* s0d2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D2),
+ },
+ {
+ /* s0d3 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D3),
+ },
+ {
+ /* s0d4 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D4),
+ },
+ {
+ /* s0d6 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D6),
+ },
+ {
+ /* s0d8 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D8),
+ },
+ {
+ /* s0d12 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0D12),
+ },
+ {
+ /* s1d1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S1D1),
+ },
+ {
+ /* s1d2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S1D2),
+ },
+ {
+ /* s1d4 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S1D4),
+ },
+ {
+ /* s2d1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S2D1),
+ },
+ {
+ /* s2d2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S2D2),
+ },
+ {
+ /* s2d4 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S2D4),
+ },
+ {
+ /* s3d1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S3D1),
+ },
+ {
+ /* s3d2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S3D2),
+ },
+ {
+ /* s3d4 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S3D4),
+ },
+ {
+ /* sd0 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SD0),
+ },
+ {
+ /* sd1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SD1),
+ },
+ {
+ /* sd2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SD2),
+ },
+ {
+ /* sd3 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SD3),
+ },
+ {
+ /* cl */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CL),
+ },
+ {
+ /* cr */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CR),
+ },
+ {
+ /* cp */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CP),
+ },
+ {
+ /* cpex */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CPEX),
+ },
+ {
+ /* canfd */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CANFD),
+ },
+ {
+ /* csi0 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CSI0),
+
+ },
+ {
+ /* mso */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_MSO),
+ },
+ {
+ /* hdmi */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HDMI),
+ },
+ {
+ /* osc */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_OSC),
+ },
+ {
+ /* r */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_R),
+ },
+ {
+ /* s0 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S0),
+ },
+ {
+ /* s1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S1),
+ },
+ {
+ /* s2 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S2),
+ },
+ {
+ /* s3 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_S3),
+ },
+ {
+ /* sdsrc */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SDSRC),
+ },
+ {
+ /* rint */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_RINT),
+ },
+
+ /* mstp Clock */
+ {
+ /* fdp1-1 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FDP1_1),
+ },
+ {
+ /* fdp1-0 */
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FDP1_0),
+ },
+ {
+ /* scif5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF5),
+ },
+ {
+ /* scif4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF4),
+ },
+ {
+ /* scif3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF3),
+ },
+ {
+ /* scif1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF1),
+ },
+ {
+ /* scif0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF0),
+ },
+ {
+ /* msiof3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_MSIOF3),
+ },
+ {
+ /* msiof2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_MSIOF2),
+ },
+ {
+ /* msiof1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_MSIOF1),
+ },
+ {
+ /* msiof0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_MSIOF0),
+ },
+ {
+ /* sys-dmac2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SYS_DMAC2),
+ },
+ {
+ /* sys-dmac1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SYS_DMAC1),
+ },
+ {
+ /* sys-dmac0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SYS_DMAC0),
+ },
+ {
+ /* sceg-pub*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCEG_PUB),
+ },
+ {
+ /* cmt3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMT3),
+ },
+ {
+ /* cmt2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMT2),
+ },
+ {
+ /* cmt1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMT1),
+ },
+ {
+ /* cmt0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMT0),
+ },
+ {
+ /* tpu0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_TPU0),
+ },
+ {
+ /* scif2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCIF2),
+ },
+ {
+ /* sdif3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SDIF3),
+ },
+ {
+ /* sdif2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SDIF2),
+ },
+ {
+ /* sdif1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SDIF1),
+ },
+ {
+ /* sdif0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SDIF0),
+ },
+ {
+ /* pcie1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_PCIE1),
+ },
+ {
+ /* pcie0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_PCIE0),
+ },
+ {
+ /* usb-dmac30*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_USB_DMAC30),
+ },
+ {
+ /* usb3-if0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_USB3_IF0),
+ },
+ {
+ /* usb-dmac31*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_USB_DMAC31),
+ },
+ {
+ /* usb-dmac0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_USB_DMAC0),
+ },
+ {
+ /* usb-dmac1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_USB_DMAC1),
+ },
+ {
+ /* rwdt*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_RWDT),
+ },
+ {
+ /* intc-ex*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_INTC_EX),
+ },
+ {
+ /* intc-ap*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_INTC_AP),
+ },
+ {
+ /* audmac1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_AUDMAC1),
+ },
+ {
+ /* audmac0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_AUDMAC0),
+ },
+ {
+ /* drif31*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF31),
+ },
+ {
+ /* drif30*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF30),
+ },
+ {
+ /* drif21*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF21),
+ },
+ {
+ /* drif20*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF20),
+ },
+ {
+ /* drif11*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF11),
+ },
+ {
+ /* drif10*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF10),
+ },
+ {
+ /* drif01*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF01),
+ },
+ {
+ /* drif00*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DRIF00),
+ },
+ {
+ /* hscif4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSCIF4),
+ },
+ {
+ /* hscif3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSCIF3),
+ },
+ {
+ /* hscif2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSCIF2),
+ },
+ {
+ /* hscif1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSCIF1),
+ },
+ {
+ /* hscif0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSCIF0),
+ },
+ {
+ /* thermal*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_THERMAL),
+ },
+ {
+ /* pwm*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_PWM),
+ },
+ {
+ /* fcpvd2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVD2),
+ },
+ {
+ /* fcpvd1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVD1),
+ },
+ {
+ /* fcpvd0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVD0),
+ },
+ {
+ /* fcpvb1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVB1),
+ },
+ {
+ /* fcpvb0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVB0),
+ },
+ {
+ /* fcpvi1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVI1),
+ },
+ {
+ /* fcpvi0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPVI0),
+ },
+ {
+ /* fcpf1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPF1),
+ },
+ {
+ /* fcpf0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPF0),
+ },
+ {
+ /* fcpcs*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_FCPCS),
+ },
+ {
+ /* vspd2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPD2),
+ },
+ {
+ /* vspd1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPD1),
+ },
+ {
+ /* vspd0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPD0),
+ },
+ {
+ /* vspbc*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPBC),
+ },
+ {
+ /* vspbd*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPBD),
+ },
+ {
+ /* vspi1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPI1),
+ },
+ {
+ /* vspi0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VSPI0),
+ },
+ {
+ /* ehci3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_EHCI3),
+ },
+ {
+ /* ehci2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_EHCI2),
+ },
+ {
+ /* ehci1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_EHCI1),
+ },
+ {
+ /* ehci0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_EHCI0),
+ },
+ {
+ /* hsusb*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSUSB),
+ },
+ {
+ /* hsusb3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HSUSB3),
+ },
+ {
+ /* cmm3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMM3),
+ },
+ {
+ /* cmm2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMM2),
+ },
+ {
+ /* cmm1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMM1),
+ },
+ {
+ /* cmm0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CMM0),
+ },
+ {
+ /* csi20*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CSI20),
+ },
+ {
+ /* csi41*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CSI41),
+ },
+ {
+ /* csi40*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CSI40),
+ },
+ {
+ /* du3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DU3),
+ },
+ {
+ /* du2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DU2),
+ },
+ {
+ /* du1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DU1),
+ },
+ {
+ /* du0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_DU0),
+ },
+ {
+ /* lvds*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_LVDS),
+ },
+ {
+ /* hdmi1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HDMI1),
+ },
+ {
+ /* hdmi0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_HDMI0),
+ },
+ {
+ /* vin7*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN7),
+ },
+ {
+ /* vin6*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN6),
+ },
+ {
+ /* vin5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN5),
+ },
+ {
+ /* vin4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN4),
+ },
+ {
+ /* vin3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN3),
+ },
+ {
+ /* vin2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN2),
+ },
+ {
+ /* vin1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN1),
+ },
+ {
+ /* vin0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_VIN0),
+ },
+ {
+ /* etheravb*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_ETHERAVB),
+ },
+ {
+ /* sata0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SATA0),
+ },
+ {
+ /* imr3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_IMR3),
+ },
+ {
+ /* imr2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_IMR2),
+ },
+ {
+ /* imr1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_IMR1),
+ },
+ {
+ /* imr0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_IMR0),
+ },
+ {
+ /* gpio7*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO7),
+ },
+ {
+ /* gpio6*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO6),
+ },
+ {
+ /* gpio5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO5),
+ },
+ {
+ /* gpio4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO4),
+ },
+ {
+ /* gpio3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO3),
+ },
+ {
+ /* gpio2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO2),
+ },
+ {
+ /* gpio1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO1),
+ },
+ {
+ /* gpio0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_GPIO0),
+ },
+ {
+ /* can-fd*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CAN_FD),
+ },
+ {
+ /* can-if1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CAN_IF1),
+ },
+ {
+ /* can-if0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_CAN_IF0),
+ },
+ {
+ /* i2c6*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C6),
+ },
+ {
+ /* i2c5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C5),
+ },
+ {
+ /* i2c-dvfs*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C_DVFS),
+ },
+ {
+ /* i2c4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C4),
+ },
+ {
+ /* i2c3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C3),
+ },
+ {
+ /* i2c2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C2),
+ },
+ {
+ /* i2c1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C1),
+ },
+ {
+ /* i2c0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_I2C0),
+ },
+ {
+ /* ssi-all*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI_ALL),
+ },
+ {
+ /* ssi9*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI9),
+ },
+ {
+ /* ssi8*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI8),
+ },
+ {
+ /* ssi7*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI7),
+ },
+ {
+ /* ssi6*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI6),
+ },
+ {
+ /* ssi5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI5),
+ },
+ {
+ /* ssi4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI4),
+ },
+ {
+ /* ssi3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI3),
+ },
+ {
+ /* ssi2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI2),
+ },
+ {
+ /* ssi1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI1),
+ },
+ {
+ /* ssi0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SSI0),
+ },
+ {
+ /* scu-all*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_ALL),
+ },
+ {
+ /* scu-dvc1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_DVC1),
+ },
+ {
+ /* scu-dvc0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_DVC0),
+ },
+ {
+ /* scu-ctu1-mix1*/
+ .element_id = FWK_ID_ELEMENT_INIT(
+ FWK_MODULE_IDX_CLOCK,
+ CLOCK_DEV_IDX_SCU_CTU0_MIX1),
+ },
+ {
+ /* scu-ctu0-mix0*/
+ .element_id = FWK_ID_ELEMENT_INIT(
+ FWK_MODULE_IDX_CLOCK,
+ CLOCK_DEV_IDX_SCU_CTU0_MIX0),
+ },
+ {
+ /* scu-src9*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC9),
+ },
+ {
+ /* scu-src8*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC8),
+ },
+ {
+ /* scu-src7*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC7),
+ },
+ {
+ /* scu-src6*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC6),
+ },
+ {
+ /* scu-src5*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC5),
+ },
+ {
+ /* scu-src4*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC4),
+ },
+ {
+ /* scu-src3*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC3),
+ },
+ {
+ /* scu-src2*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC2),
+ },
+ {
+ /* scu-src1*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC1),
+ },
+ {
+ /* scu-src0*/
+ .element_id =
+ FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_CLOCK, CLOCK_DEV_IDX_SCU_SRC0),
+ },
+};
+
+static const struct mod_scmi_clock_agent agent_table[] = {
+ [SCMI_AGENT_ID_PSCI] = { 0 /* No access */ },
+ [SCMI_AGENT_ID_OSPM] = {
+ .device_table = agent_device_table_ospm,
+ .device_count = FWK_ARRAY_SIZE(agent_device_table_ospm),
+ },
+};
+
+struct fwk_module_config config_scmi_clock = {
+ .data = &((struct mod_scmi_clock_config){
+ .max_pending_transactions = 0,
+ .agent_table = agent_table,
+ .agent_count = FWK_ARRAY_SIZE(agent_table),
+ }),
+};
diff --git a/product/rcar/scp_ramfw/config_scmi_power_domain.c b/product/rcar/scp_ramfw/config_scmi_power_domain.c
new file mode 100644
index 00000000..c0c4ec5f
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_scmi_power_domain.c
@@ -0,0 +1,11 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fwk_module.h>
+
+/* No elements, no module configuration data */
+struct fwk_module_config config_scmi_power_domain = { 0 };
diff --git a/product/rcar/scp_ramfw/config_sensor.c b/product/rcar/scp_ramfw/config_sensor.c
new file mode 100644
index 00000000..fe05940b
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_sensor.c
@@ -0,0 +1,115 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <system_mmap.h>
+
+#include <mod_reg_sensor.h>
+#include <mod_sensor.h>
+
+#include <fwk_element.h>
+#include <fwk_module.h>
+#include <fwk_module_idx.h>
+
+#include <stddef.h>
+
+enum REG_SENSOR_DEVICES {
+ REG_SENSOR_DEV_SOC_TEMP1,
+ REG_SENSOR_DEV_SOC_TEMP2,
+ REG_SENSOR_DEV_SOC_TEMP3,
+ REG_SENSOR_DEV_COUNT,
+};
+enum SENSOR_DEVICES {
+ R8A7795_SNSR_THERMAL1,
+ R8A7795_SNSR_THERMAL2,
+ R8A7795_SNSR_THERMAL3,
+ R8A7795_SNSR_COUNT,
+};
+
+/*
+ * Register Sensor driver config
+ */
+static struct mod_sensor_info info_soc_temperature = {
+ .type = MOD_SENSOR_TYPE_DEGREES_C,
+ .update_interval = 0,
+ .update_interval_multiplier = 0,
+ .unit_multiplier = -3,
+};
+
+static const struct fwk_element reg_sensor_element_table[] = {
+ [REG_SENSOR_DEV_SOC_TEMP1] = {
+ .name = "thermal1",
+ .data = &((struct mod_reg_sensor_dev_config) {
+ .reg = (uintptr_t)(SENSOR_SOC_TEMP1),
+ .info = &info_soc_temperature,
+ }),
+ },
+ [REG_SENSOR_DEV_SOC_TEMP2] = {
+ .name = "thermal2",
+ .data = &((struct mod_reg_sensor_dev_config) {
+ .reg = (uintptr_t)(SENSOR_SOC_TEMP2),
+ .info = &info_soc_temperature,
+ }),
+ },
+ [REG_SENSOR_DEV_SOC_TEMP3] = {
+ .name = "thermal3",
+ .data = &((struct mod_reg_sensor_dev_config) {
+ .reg = (uintptr_t)(SENSOR_SOC_TEMP3),
+ .info = &info_soc_temperature,
+ }),
+ },
+ [REG_SENSOR_DEV_COUNT] = { 0 },
+};
+
+static const struct fwk_element *get_reg_sensor_element_table(fwk_id_t id)
+{
+ return reg_sensor_element_table;
+}
+
+struct fwk_module_config config_rcar_reg_sensor = {
+ .elements = FWK_MODULE_DYNAMIC_ELEMENTS(get_reg_sensor_element_table),
+};
+
+/*
+ * Sensor module config
+ */
+static const struct fwk_element sensor_element_table[] = {
+ [R8A7795_SNSR_THERMAL1] = {
+ .name = "thermal1",
+ .data = &((const struct mod_sensor_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_REG_SENSOR,
+ REG_SENSOR_DEV_SOC_TEMP1),
+ .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_REG_SENSOR, 0),
+ }),
+ },
+ [R8A7795_SNSR_THERMAL2] = {
+ .name = "thermal2",
+ .data = &((const struct mod_sensor_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_REG_SENSOR,
+ REG_SENSOR_DEV_SOC_TEMP2),
+ .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_REG_SENSOR, 0),
+ }),
+ },
+ [R8A7795_SNSR_THERMAL3] = {
+ .name = "thermal3",
+ .data = &((const struct mod_sensor_dev_config) {
+ .driver_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_REG_SENSOR,
+ REG_SENSOR_DEV_SOC_TEMP3),
+ .driver_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_REG_SENSOR, 0),
+ }),
+ },
+ [R8A7795_SNSR_COUNT] = { 0 },
+};
+
+static const struct fwk_element *get_sensor_element_table(fwk_id_t module_id)
+{
+ return sensor_element_table;
+}
+
+struct fwk_module_config config_sensor = {
+ .elements = FWK_MODULE_DYNAMIC_ELEMENTS(get_sensor_element_table),
+ .data = NULL,
+};
diff --git a/product/rcar/scp_ramfw/config_smt.c b/product/rcar/scp_ramfw/config_smt.c
new file mode 100644
index 00000000..ebf3b2f7
--- /dev/null
+++ b/product/rcar/scp_ramfw/config_smt.c
@@ -0,0 +1,79 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <config_rcar_power_domain.h>
+#include <rcar_core.h>
+#include <rcar_mfismh.h>
+#include <rcar_scmi.h>
+#include <software_mmap.h>
+
+#include <mod_rcar_power_domain.h>
+#include <mod_smt.h>
+
+#include <fwk_element.h>
+#include <fwk_id.h>
+#include <fwk_module.h>
+#include <fwk_module_idx.h>
+
+#include <stdint.h>
+
+static const struct fwk_element smt_element_table[] = {
+ [RCAR_SCMI_SERVICE_IDX_PSCI] = { .name = "PSCI",
+ .data = &((struct mod_smt_channel_config){
+ .type = MOD_SMT_CHANNEL_TYPE_SLAVE,
+ .policies =
+ MOD_SMT_POLICY_INIT_MAILBOX |
+ MOD_SMT_POLICY_SECURE,
+ .mailbox_address =
+ (uintptr_t)SCMI_PAYLOAD_S_A2P_BASE,
+ .mailbox_size = SCMI_PAYLOAD_SIZE,
+ .driver_id = FWK_ID_SUB_ELEMENT_INIT(
+ FWK_MODULE_IDX_RCAR_MFISMH,
+ RCAR_MFISMH_DEVICE_IDX_S,
+ 0),
+ .driver_api_id = FWK_ID_API_INIT(
+ FWK_MODULE_IDX_RCAR_MFISMH,
+ 0),
+ }) },
+ [RCAR_SCMI_SERVICE_IDX_OSPM_0] = { .name = "OSPM0",
+ .data = &((
+ struct mod_smt_channel_config){
+ .type = MOD_SMT_CHANNEL_TYPE_SLAVE,
+ .policies =
+ MOD_SMT_POLICY_INIT_MAILBOX,
+ .mailbox_address = (uintptr_t)
+ SCMI_PAYLOAD0_NS_A2P_BASE,
+ .mailbox_size = SCMI_PAYLOAD_SIZE,
+ .driver_id = FWK_ID_SUB_ELEMENT_INIT(
+ FWK_MODULE_IDX_RCAR_MFISMH,
+ RCAR_MFISMH_DEVICE_IDX_NS_L,
+ 0),
+ .driver_api_id = FWK_ID_API_INIT(
+ FWK_MODULE_IDX_RCAR_MFISMH,
+ 0),
+ }) },
+ [RCAR_SCMI_SERVICE_IDX_COUNT] = { 0 },
+};
+
+static const struct fwk_element *smt_get_element_table(fwk_id_t module_id)
+{
+ unsigned int idx;
+ struct mod_smt_channel_config *config;
+
+ for (idx = 0; idx < RCAR_SCMI_SERVICE_IDX_COUNT; idx++) {
+ config = (struct mod_smt_channel_config *)(smt_element_table[idx].data);
+ config->pd_source_id = FWK_ID_ELEMENT(
+ FWK_MODULE_IDX_RCAR_POWER_DOMAIN,
+ CONFIG_POWER_DOMAIN_CHILD_COUNT + rcar_core_get_count());
+ }
+
+ return smt_element_table;
+}
+
+struct fwk_module_config config_smt = {
+ .elements = FWK_MODULE_DYNAMIC_ELEMENTS(smt_get_element_table),
+};
diff --git a/product/rcar/scp_ramfw/firmware.mk b/product/rcar/scp_ramfw/firmware.mk
new file mode 100644
index 00000000..707138c8
--- /dev/null
+++ b/product/rcar/scp_ramfw/firmware.mk
@@ -0,0 +1,92 @@
+#
+# Renesas SCP/MCP Software
+# Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+BS_FIRMWARE_CPU := cortex-a57.cortex-a53
+BS_FIRMWARE_HAS_MULTITHREADING := yes
+BS_FIRMWARE_HAS_NOTIFICATION := yes
+BS_FIRMWARE_HAS_RESOURCE_PERMISSIONS := no
+
+BS_FIRMWARE_MODULES := \
+ rcar_scif \
+ rcar_system \
+ scmi \
+ smt \
+ clock \
+ rcar_clock \
+ rcar_sd_clock \
+ rcar_mstp_clock \
+ rcar_system_power \
+ rcar_dvfs \
+ rcar_pmic \
+ rcar_mock_pmic \
+ rcar_mfismh \
+ rcar_power_domain \
+ rcar_pd_sysc \
+ rcar_pd_core \
+ rcar_reg_sensor \
+ sensor \
+ scmi_power_domain \
+ scmi_clock \
+ scmi_sensor \
+ scmi_apcore
+
+ifeq ($(BS_FIRMWARE_HAS_RESOURCE_PERMISSIONS),yes)
+ BS_FIRMWARE_MODULES += resource_perms
+endif
+
+BS_FIRMWARE_SOURCES := \
+ rcar_core.c \
+ config_rcar_scif.c \
+ config_rcar_power_domain.c \
+ config_rcar_pd_sysc.c \
+ config_rcar_pd_core.c \
+ config_sensor.c \
+ config_clock.c \
+ config_rcar_clock.c \
+ config_rcar_sd_clock.c \
+ config_rcar_mstp_clock.c \
+ config_rcar_dvfs.c \
+ config_rcar_pmic.c \
+ config_rcar_mock_pmic.c \
+ config_rcar_mfismh.c \
+ config_smt.c \
+ config_scmi.c \
+ config_scmi_clock.c \
+ config_scmi_apcore.c \
+ config_scmi_power_domain.c \
+ config_rcar_system_power.c \
+ config_rcar_system.c
+
+ifeq ($(BS_FIRMWARE_HAS_RESOURCE_PERMISSIONS),yes)
+ BS_FIRMWARE_SOURCES += config_resource_perms.c
+endif
+
+#
+# Temporary source code until CMSIS-FreeRTOS is updated
+#
+BS_FIRMWARE_SOURCES += \
+ portASM.S \
+ port.c \
+ list.c \
+ queue.c \
+ tasks.c \
+ timers.c \
+ heap_1.c \
+ cmsis_os2_tiny4scp.c
+
+vpath %.c $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar
+vpath %.S $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar
+vpath %.c $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/Source
+vpath %.c $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/Source/portable/MemMang
+vpath %.c $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/CMSIS/RTOS2/FreeRTOS/Source
+
+#
+# Temporary directory until CMSIS-FreeRTOS is updated
+#
+FREERTOS_DIR := $(PRODUCT_DIR)/src/CMSIS-FreeRTOS/CMSIS/RTOS2
+
+include $(BS_DIR)/firmware.mk
diff --git a/product/rcar/scp_ramfw/fmw_memory.h b/product/rcar/scp_ramfw/fmw_memory.h
new file mode 100644
index 00000000..7ebc1d69
--- /dev/null
+++ b/product/rcar/scp_ramfw/fmw_memory.h
@@ -0,0 +1,23 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FMW_MEMORY_H
+#define FMW_MEMORY_H
+
+#include <system_mmap_scp.h>
+
+#define FMW_MEM_MODE ARCH_MEM_MODE_SINGLE_REGION
+
+/*
+ * RAM memory
+ */
+#define FMW_MEM0_SIZE SCP_RAM_SIZE
+#define FMW_MEM0_BASE SCP_RAM_BASE
+
+#define FMW_STACK_SIZE (4 * 1024)
+
+#endif /* FMW_MEMORY_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/CMSIS/RTOS2/FreeRTOS/Source/cmsis_os2_tiny4scp.c b/product/rcar/src/CMSIS-FreeRTOS/CMSIS/RTOS2/FreeRTOS/Source/cmsis_os2_tiny4scp.c
new file mode 100644
index 00000000..b20e8f8f
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/CMSIS/RTOS2/FreeRTOS/Source/cmsis_os2_tiny4scp.c
@@ -0,0 +1,537 @@
+/* --------------------------------------------------------------------------
+ * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Name: cmsis_os2_tyny4scp.c
+ * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
+ *
+ *---------------------------------------------------------------------------*/
+
+#include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
+#include "cmsis_os2.h" // ::CMSIS:RTOS2
+#include "task.h" // ARM.FreeRTOS::RTOS:Core
+
+#include <string.h>
+
+#define __ARM_ARCH_8A__ (1)
+
+#define IS_IRQ() is_irq()
+
+/* Kernel version and identification string definition
+ (major.minor.rev: mmnnnrrrr dec) */
+#define KERNEL_VERSION \
+ (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
+ ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
+ ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
+
+#define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
+
+#define portYIELD_FROM_ISR(n)
+#define __STATIC_INLINE static inline
+/* Limits */
+#define MAX_BITS_TASK_NOTIFY 31U
+#define MAX_BITS_EVENT_GROUPS 24U
+
+#define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
+#define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
+
+static inline uint32_t is_irq(void)
+{
+ uint32_t val;
+ __asm__ volatile("mrs %0, spsel" : "=r"(val));
+ return val & 0x01;
+}
+
+/*
+ Setup SVC to reset value.
+*/
+__STATIC_INLINE void SVC_Setup(void)
+{
+#if ((__ARM_ARCH_7A__ == 0U) && (__ARM_ARCH_8A__ == 0U))
+ /* Service Call interrupt might be configured before kernel start */
+ /* and when its priority is lower or equal to BASEPRI, svc intruction */
+ /* causes a Hard Fault. */
+ NVIC_SetPriority(SVCall_IRQn, 0U);
+#endif
+}
+
+/* Kernel initialization state */
+static osKernelState_t KernelState = osKernelInactive;
+
+osStatus_t osKernelInitialize(void)
+{
+ osStatus_t stat;
+
+ if (IS_IRQ()) {
+ stat = osErrorISR;
+ } else {
+ if (KernelState == osKernelInactive) {
+#if defined(RTE_Compiler_EventRecorder)
+ EvrFreeRTOSSetup(0U);
+#endif
+#if defined(RTE_RTOS_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
+ vPortDefineHeapRegions(configHEAP_5_REGIONS);
+#endif
+ KernelState = osKernelReady;
+ stat = osOK;
+ } else {
+ stat = osError;
+ }
+ }
+
+ return (stat);
+}
+
+osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size)
+{
+ if (version != NULL) {
+ /* Version encoding is major.minor.rev: mmnnnrrrr dec */
+ version->api = KERNEL_VERSION;
+ version->kernel = KERNEL_VERSION;
+ }
+
+ if ((id_buf != NULL) && (id_size != 0U)) {
+ if (id_size > sizeof(KERNEL_ID)) {
+ id_size = sizeof(KERNEL_ID);
+ }
+ memcpy(id_buf, KERNEL_ID, id_size);
+ }
+
+ return (osOK);
+}
+
+osKernelState_t osKernelGetState(void)
+{
+ osKernelState_t state;
+
+ switch (xTaskGetSchedulerState()) {
+ case taskSCHEDULER_RUNNING:
+ state = osKernelRunning;
+ break;
+
+ case taskSCHEDULER_SUSPENDED:
+ state = osKernelLocked;
+ break;
+
+ case taskSCHEDULER_NOT_STARTED:
+ default:
+ if (KernelState == osKernelReady) {
+ state = osKernelReady;
+ } else {
+ state = osKernelInactive;
+ }
+ break;
+ }
+
+ return (state);
+}
+
+osStatus_t osKernelStart(void)
+{
+ osStatus_t stat;
+
+ if (IS_IRQ()) {
+ stat = osErrorISR;
+ } else {
+ if (KernelState == osKernelReady) {
+ /* Ensure SVC priority is at the reset value */
+ SVC_Setup();
+ /* Change state to enable IRQ masking check */
+ KernelState = osKernelRunning;
+ /* Start the kernel scheduler */
+ vTaskStartScheduler();
+ stat = osOK;
+ } else {
+ stat = osError;
+ }
+ }
+
+ return (stat);
+}
+
+int32_t osKernelLock(void)
+{
+ int32_t lock;
+
+ if (IS_IRQ()) {
+ lock = (int32_t)osErrorISR;
+ } else {
+ switch (xTaskGetSchedulerState()) {
+ case taskSCHEDULER_SUSPENDED:
+ lock = 1;
+ break;
+
+ case taskSCHEDULER_RUNNING:
+ vTaskSuspendAll();
+ lock = 0;
+ break;
+
+ case taskSCHEDULER_NOT_STARTED:
+ default:
+ lock = (int32_t)osError;
+ break;
+ }
+ }
+
+ return (lock);
+}
+
+int32_t osKernelUnlock(void)
+{
+ int32_t lock;
+
+ if (IS_IRQ()) {
+ lock = (int32_t)osErrorISR;
+ } else {
+ switch (xTaskGetSchedulerState()) {
+ case taskSCHEDULER_SUSPENDED:
+ lock = 1;
+
+ if (xTaskResumeAll() != pdTRUE) {
+ if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
+ lock = (int32_t)osError;
+ }
+ }
+ break;
+
+ case taskSCHEDULER_RUNNING:
+ lock = 0;
+ break;
+
+ case taskSCHEDULER_NOT_STARTED:
+ default:
+ lock = (int32_t)osError;
+ break;
+ }
+ }
+
+ return (lock);
+}
+
+int32_t osKernelRestoreLock(int32_t lock)
+{
+ if (IS_IRQ()) {
+ lock = (int32_t)osErrorISR;
+ } else {
+ switch (xTaskGetSchedulerState()) {
+ case taskSCHEDULER_SUSPENDED:
+ case taskSCHEDULER_RUNNING:
+ if (lock == 1) {
+ vTaskSuspendAll();
+ } else {
+ if (lock != 0) {
+ lock = (int32_t)osError;
+ } else {
+ if (xTaskResumeAll() != pdTRUE) {
+ if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
+ lock = (int32_t)osError;
+ }
+ }
+ }
+ }
+ break;
+
+ case taskSCHEDULER_NOT_STARTED:
+ default:
+ lock = (int32_t)osError;
+ break;
+ }
+ }
+
+ return (lock);
+}
+
+uint32_t osKernelGetTickCount(void)
+{
+ TickType_t ticks;
+
+ if (IS_IRQ()) {
+ ticks = xTaskGetTickCountFromISR();
+ } else {
+ ticks = xTaskGetTickCount();
+ }
+
+ return (ticks);
+}
+
+uint32_t osKernelGetTickFreq(void)
+{
+ return (configTICK_RATE_HZ);
+}
+
+/*---------------------------------------------------------------------------*/
+
+osThreadId_t osThreadNew(
+ osThreadFunc_t func,
+ void *argument,
+ const osThreadAttr_t *attr)
+{
+ const char *name;
+ uint32_t stack;
+ TaskHandle_t hTask;
+ UBaseType_t prio;
+ int32_t mem;
+
+ hTask = NULL;
+
+ if (!IS_IRQ() && (func != NULL)) {
+ stack = configMINIMAL_STACK_SIZE;
+ prio = (UBaseType_t)osPriorityNormal;
+
+ name = NULL;
+ mem = -1;
+
+ if (attr != NULL) {
+ if (attr->name != NULL) {
+ name = attr->name;
+ }
+ if (attr->priority != osPriorityNone) {
+ prio = (UBaseType_t)attr->priority;
+ }
+
+ if ((prio < osPriorityIdle) || (prio > osPriorityISR) ||
+ ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
+ return (NULL);
+ }
+
+ if (attr->stack_size > 0U) {
+ /* In FreeRTOS stack is not in bytes, */
+ /* but in sizeof(StackType_t) which is 4 on ARM ports. */
+ /* Stack size should be therefore 4 byte aligned in order to */
+ /* avoid division caused side effects */
+ stack = attr->stack_size / sizeof(StackType_t);
+ }
+
+ if ((attr->cb_mem != NULL) &&
+ (attr->cb_size >= sizeof(StaticTask_t)) &&
+ (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
+ mem = 1;
+ } else {
+ if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
+ (attr->stack_mem == NULL)) {
+ mem = 0;
+ }
+ }
+ } else {
+ mem = 0;
+ }
+
+ if (mem == 1) {
+ hTask = xTaskCreateStatic(
+ (TaskFunction_t)func,
+ name,
+ stack,
+ argument,
+ prio,
+ (StackType_t *)attr->stack_mem,
+ (StaticTask_t *)attr->cb_mem);
+ } else {
+ if (mem == 0) {
+ if (xTaskCreate(
+ (TaskFunction_t)func,
+ name,
+ (uint16_t)stack,
+ argument,
+ prio,
+ &hTask) != pdPASS) {
+ hTask = NULL;
+ }
+ }
+ }
+ }
+
+ return ((osThreadId_t)hTask);
+}
+
+uint32_t osThreadFlagsSet(osThreadId_t thread_id, uint32_t flags)
+{
+ TaskHandle_t hTask = (TaskHandle_t)thread_id;
+ uint32_t rflags;
+ BaseType_t yield;
+
+ if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
+ rflags = (uint32_t)osErrorParameter;
+ } else {
+ rflags = (uint32_t)osError;
+
+ if (IS_IRQ()) {
+ yield = pdFALSE;
+
+ (void)xTaskNotifyFromISR(hTask, flags, eSetBits, &yield);
+ (void)xTaskNotifyAndQueryFromISR(
+ hTask, 0, eNoAction, &rflags, NULL);
+
+ portYIELD_FROM_ISR(yield);
+ } else {
+ (void)xTaskNotify(hTask, flags, eSetBits);
+ (void)xTaskNotifyAndQuery(hTask, 0, eNoAction, &rflags);
+ }
+ }
+ /* Return flags after setting */
+ return (rflags);
+}
+
+uint32_t osThreadFlagsClear(uint32_t flags)
+{
+ TaskHandle_t hTask;
+ uint32_t rflags, cflags;
+
+ if (IS_IRQ()) {
+ rflags = (uint32_t)osErrorISR;
+ } else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
+ rflags = (uint32_t)osErrorParameter;
+ } else {
+ hTask = xTaskGetCurrentTaskHandle();
+
+ if (xTaskNotifyAndQuery(hTask, 0, eNoAction, &cflags) == pdPASS) {
+ rflags = cflags;
+ cflags &= ~flags;
+
+ if (xTaskNotify(hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
+ rflags = (uint32_t)osError;
+ }
+ } else {
+ rflags = (uint32_t)osError;
+ }
+ }
+
+ /* Return flags before clearing */
+ return (rflags);
+}
+
+uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout)
+{
+ uint32_t rflags, nval;
+ uint32_t clear;
+ TickType_t t0, td, tout;
+ BaseType_t rval;
+
+ if (IS_IRQ()) {
+ rflags = (uint32_t)osErrorISR;
+ } else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
+ rflags = (uint32_t)osErrorParameter;
+ } else {
+ if ((options & osFlagsNoClear) == osFlagsNoClear) {
+ clear = 0U;
+ } else {
+ clear = flags;
+ }
+
+ rflags = 0U;
+ tout = timeout;
+
+ t0 = xTaskGetTickCount();
+ do {
+ rval = xTaskNotifyWait(0, clear, &nval, tout);
+
+ if (rval == pdPASS) {
+ rflags &= flags;
+ rflags |= nval;
+
+ if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
+ if ((flags & rflags) == flags) {
+ break;
+ } else {
+ if (timeout == 0U) {
+ rflags = (uint32_t)osErrorResource;
+ break;
+ }
+ }
+ } else {
+ if ((flags & rflags) != 0) {
+ break;
+ } else {
+ if (timeout == 0U) {
+ rflags = (uint32_t)osErrorResource;
+ break;
+ }
+ }
+ }
+
+ /* Update timeout */
+ td = xTaskGetTickCount() - t0;
+
+ if (td > tout) {
+ tout = 0;
+ } else {
+ tout -= td;
+ }
+ } else {
+ if (timeout == 0) {
+ rflags = (uint32_t)osErrorResource;
+ } else {
+ rflags = (uint32_t)osErrorTimeout;
+ }
+ }
+ } while (rval != pdFAIL);
+ }
+
+ /* Return flags before clearing */
+ return (rflags);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* External Idle and Timer task static memory allocation functions */
+
+/* External Idle and Timer task static memory allocation functions */
+extern void vApplicationGetIdleTaskMemory(
+ StaticTask_t **ppxIdleTaskTCBBuffer,
+ StackType_t **ppxIdleTaskStackBuffer,
+ uint32_t *pulIdleTaskStackSize);
+extern void vApplicationGetTimerTaskMemory(
+ StaticTask_t **ppxTimerTaskTCBBuffer,
+ StackType_t **ppxTimerTaskStackBuffer,
+ uint32_t *pulTimerTaskStackSize);
+
+/* Idle task control block and stack */
+static StaticTask_t Idle_TCB;
+static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
+
+/* Timer task control block and stack */
+static StaticTask_t Timer_TCB;
+static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
+
+/*
+ vApplicationGetIdleTaskMemory gets called when
+ configSUPPORT_STATIC_ALLOCATION
+ equals to 1 and is required for static memory allocation support.
+*/
+void vApplicationGetIdleTaskMemory(
+ StaticTask_t **ppxIdleTaskTCBBuffer,
+ StackType_t **ppxIdleTaskStackBuffer,
+ uint32_t *pulIdleTaskStackSize)
+{
+ *ppxIdleTaskTCBBuffer = &Idle_TCB;
+ *ppxIdleTaskStackBuffer = &Idle_Stack[0];
+ *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
+}
+
+/*
+ vApplicationGetTimerTaskMemory gets called when
+ configSUPPORT_STATIC_ALLOCATION
+ equals to 1 and is required for static memory allocation support.
+*/
+void vApplicationGetTimerTaskMemory(
+ StaticTask_t **ppxTimerTaskTCBBuffer,
+ StackType_t **ppxTimerTaskStackBuffer,
+ uint32_t *pulTimerTaskStackSize)
+{
+ *ppxTimerTaskTCBBuffer = &Timer_TCB;
+ *ppxTimerTaskStackBuffer = &Timer_Stack[0];
+ *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;
+}
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/FreeRTOS.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/FreeRTOS.h
new file mode 100644
index 00000000..c8d2497e
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/FreeRTOS.h
@@ -0,0 +1,1317 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef INC_FREERTOS_H
+#define INC_FREERTOS_H
+
+/*
+ * Include the generic headers required for the FreeRTOS port being used.
+ */
+#include <stddef.h>
+
+/*
+ * If stdint.h cannot be located then:
+ * + If using GCC ensure the -nostdint options is *not* being used.
+ * + Ensure the project's include path includes the directory in which your
+ * compiler stores stdint.h.
+ * + Set any compiler options necessary for it to support C99, as technically
+ * stdint.h is only mandatory with C99 (FreeRTOS does not require C99 in any
+ * other way).
+ * + The FreeRTOS download includes a simple stdint.h definition that can be
+ * used in cases where none is provided by the compiler. The files only
+ * contains the typedefs required to build FreeRTOS. Read the instructions
+ * in FreeRTOS/source/stdint.readme for more information.
+ */
+#include <stdint.h> /* READ COMMENT ABOVE. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Application specific configuration options. */
+#include "FreeRTOSConfig.h"
+
+/* Basic FreeRTOS definitions. */
+#include "projdefs.h"
+
+/* Definitions specific to the port being used. */
+#include "portable.h"
+
+/* Must be defaulted before configUSE_NEWLIB_REENTRANT is used below. */
+#ifndef configUSE_NEWLIB_REENTRANT
+# define configUSE_NEWLIB_REENTRANT 0
+#endif
+
+/* Required if struct _reent is used. */
+#if (configUSE_NEWLIB_REENTRANT == 1)
+# include <reent.h>
+#endif
+/*
+ * Check all the required application specific macros have been defined.
+ * These macros are application specific and (as downloaded) are defined
+ * within FreeRTOSConfig.h.
+ */
+
+#ifndef configMINIMAL_STACK_SIZE
+#error Missing definition: configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h. configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task. Refer to the demo project provided for your port for a suitable value.
+#endif
+
+#ifndef configMAX_PRIORITIES
+#error Missing definition: configMAX_PRIORITIES must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#if configMAX_PRIORITIES < 1
+# error configMAX_PRIORITIES must be defined to be greater than or equal to 1.
+#endif
+
+#ifndef configUSE_PREEMPTION
+#error Missing definition: configUSE_PREEMPTION must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#ifndef configUSE_IDLE_HOOK
+#error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#ifndef configUSE_TICK_HOOK
+#error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#ifndef configUSE_16_BIT_TICKS
+#error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details.
+#endif
+
+#ifndef configUSE_CO_ROUTINES
+# define configUSE_CO_ROUTINES 0
+#endif
+
+#ifndef INCLUDE_vTaskPrioritySet
+# define INCLUDE_vTaskPrioritySet 0
+#endif
+
+#ifndef INCLUDE_uxTaskPriorityGet
+# define INCLUDE_uxTaskPriorityGet 0
+#endif
+
+#ifndef INCLUDE_vTaskDelete
+# define INCLUDE_vTaskDelete 0
+#endif
+
+#ifndef INCLUDE_vTaskSuspend
+# define INCLUDE_vTaskSuspend 0
+#endif
+
+#ifndef INCLUDE_vTaskDelayUntil
+# define INCLUDE_vTaskDelayUntil 0
+#endif
+
+#ifndef INCLUDE_vTaskDelay
+# define INCLUDE_vTaskDelay 0
+#endif
+
+#ifndef INCLUDE_xTaskGetIdleTaskHandle
+# define INCLUDE_xTaskGetIdleTaskHandle 0
+#endif
+
+#ifndef INCLUDE_xTaskAbortDelay
+# define INCLUDE_xTaskAbortDelay 0
+#endif
+
+#ifndef INCLUDE_xQueueGetMutexHolder
+# define INCLUDE_xQueueGetMutexHolder 0
+#endif
+
+#ifndef INCLUDE_xSemaphoreGetMutexHolder
+# define INCLUDE_xSemaphoreGetMutexHolder INCLUDE_xQueueGetMutexHolder
+#endif
+
+#ifndef INCLUDE_xTaskGetHandle
+# define INCLUDE_xTaskGetHandle 0
+#endif
+
+#ifndef INCLUDE_uxTaskGetStackHighWaterMark
+# define INCLUDE_uxTaskGetStackHighWaterMark 0
+#endif
+
+#ifndef INCLUDE_uxTaskGetStackHighWaterMark2
+# define INCLUDE_uxTaskGetStackHighWaterMark2 0
+#endif
+
+#ifndef INCLUDE_eTaskGetState
+# define INCLUDE_eTaskGetState 0
+#endif
+
+#ifndef INCLUDE_xTaskResumeFromISR
+# define INCLUDE_xTaskResumeFromISR 1
+#endif
+
+#ifndef INCLUDE_xTimerPendFunctionCall
+# define INCLUDE_xTimerPendFunctionCall 0
+#endif
+
+#ifndef INCLUDE_xTaskGetSchedulerState
+# define INCLUDE_xTaskGetSchedulerState 0
+#endif
+
+#ifndef INCLUDE_xTaskGetCurrentTaskHandle
+# define INCLUDE_xTaskGetCurrentTaskHandle 0
+#endif
+
+#if configUSE_CO_ROUTINES != 0
+# ifndef configMAX_CO_ROUTINE_PRIORITIES
+# error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1.
+# endif
+#endif
+
+#ifndef configUSE_DAEMON_TASK_STARTUP_HOOK
+# define configUSE_DAEMON_TASK_STARTUP_HOOK 0
+#endif
+
+#ifndef configUSE_APPLICATION_TASK_TAG
+# define configUSE_APPLICATION_TASK_TAG 0
+#endif
+
+#ifndef configNUM_THREAD_LOCAL_STORAGE_POINTERS
+# define configNUM_THREAD_LOCAL_STORAGE_POINTERS 0
+#endif
+
+#ifndef configUSE_RECURSIVE_MUTEXES
+# define configUSE_RECURSIVE_MUTEXES 0
+#endif
+
+#ifndef configUSE_MUTEXES
+# define configUSE_MUTEXES 0
+#endif
+
+#ifndef configUSE_TIMERS
+# define configUSE_TIMERS 0
+#endif
+
+#ifndef configUSE_COUNTING_SEMAPHORES
+# define configUSE_COUNTING_SEMAPHORES 0
+#endif
+
+#ifndef configUSE_ALTERNATIVE_API
+# define configUSE_ALTERNATIVE_API 0
+#endif
+
+#ifndef portCRITICAL_NESTING_IN_TCB
+# define portCRITICAL_NESTING_IN_TCB 0
+#endif
+
+#ifndef configMAX_TASK_NAME_LEN
+# define configMAX_TASK_NAME_LEN 16
+#endif
+
+#ifndef configIDLE_SHOULD_YIELD
+# define configIDLE_SHOULD_YIELD 1
+#endif
+
+#if configMAX_TASK_NAME_LEN < 1
+#error configMAX_TASK_NAME_LEN must be set to a minimum of 1 in FreeRTOSConfig.h
+#endif
+
+#ifndef configASSERT
+# define configASSERT(x)
+# define configASSERT_DEFINED 0
+#else
+# define configASSERT_DEFINED 1
+#endif
+
+/* configPRECONDITION should be defined as configASSERT.
+The CBMC proofs need a way to track assumptions and assertions.
+A configPRECONDITION statement should express an implicit invariant or
+assumption made. A configASSERT statement should express an invariant that must
+hold explicit before calling the code. */
+#ifndef configPRECONDITION
+# define configPRECONDITION(X) configASSERT(X)
+# define configPRECONDITION_DEFINED 0
+#else
+# define configPRECONDITION_DEFINED 1
+#endif
+
+#ifndef portMEMORY_BARRIER
+# define portMEMORY_BARRIER()
+#endif
+
+#ifndef portSOFTWARE_BARRIER
+# define portSOFTWARE_BARRIER()
+#endif
+
+/* The timers module relies on xTaskGetSchedulerState(). */
+#if configUSE_TIMERS == 1
+
+# ifndef configTIMER_TASK_PRIORITY
+#error If configUSE_TIMERS is set to 1 then configTIMER_TASK_PRIORITY must also be defined.
+# endif /* configTIMER_TASK_PRIORITY */
+
+# ifndef configTIMER_QUEUE_LENGTH
+#error If configUSE_TIMERS is set to 1 then configTIMER_QUEUE_LENGTH must also be defined.
+# endif /* configTIMER_QUEUE_LENGTH */
+
+# ifndef configTIMER_TASK_STACK_DEPTH
+#error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined.
+# endif /* configTIMER_TASK_STACK_DEPTH */
+
+#endif /* configUSE_TIMERS */
+
+#ifndef portSET_INTERRUPT_MASK_FROM_ISR
+# define portSET_INTERRUPT_MASK_FROM_ISR() 0
+#endif
+
+#ifndef portCLEAR_INTERRUPT_MASK_FROM_ISR
+# define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue) \
+ (void)uxSavedStatusValue
+#endif
+
+#ifndef portCLEAN_UP_TCB
+# define portCLEAN_UP_TCB(pxTCB) (void)pxTCB
+#endif
+
+#ifndef portPRE_TASK_DELETE_HOOK
+# define portPRE_TASK_DELETE_HOOK(pvTaskToDelete, pxYieldPending)
+#endif
+
+#ifndef portSETUP_TCB
+# define portSETUP_TCB(pxTCB) (void)pxTCB
+#endif
+
+#ifndef configQUEUE_REGISTRY_SIZE
+# define configQUEUE_REGISTRY_SIZE 0U
+#endif
+
+#if (configQUEUE_REGISTRY_SIZE < 1)
+# define vQueueAddToRegistry(xQueue, pcName)
+# define vQueueUnregisterQueue(xQueue)
+# define pcQueueGetName(xQueue)
+#endif
+
+#ifndef portPOINTER_SIZE_TYPE
+# define portPOINTER_SIZE_TYPE uint32_t
+#endif
+
+/* Remove any unused trace macros. */
+#ifndef traceSTART
+/* Used to perform any necessary initialisation - for example, open a file
+into which trace is to be written. */
+# define traceSTART()
+#endif
+
+#ifndef traceEND
+/* Use to close a trace, for example close a file into which trace has been
+written. */
+# define traceEND()
+#endif
+
+#ifndef traceTASK_SWITCHED_IN
+/* Called after a task has been selected to run. pxCurrentTCB holds a pointer
+to the task control block of the selected task. */
+# define traceTASK_SWITCHED_IN()
+#endif
+
+#ifndef traceINCREASE_TICK_COUNT
+/* Called before stepping the tick count after waking from tickless idle
+sleep. */
+# define traceINCREASE_TICK_COUNT(x)
+#endif
+
+#ifndef traceLOW_POWER_IDLE_BEGIN
+/* Called immediately before entering tickless idle. */
+# define traceLOW_POWER_IDLE_BEGIN()
+#endif
+
+#ifndef traceLOW_POWER_IDLE_END
+/* Called when returning to the Idle task after a tickless idle. */
+# define traceLOW_POWER_IDLE_END()
+#endif
+
+#ifndef traceTASK_SWITCHED_OUT
+/* Called before a task has been selected to run. pxCurrentTCB holds a pointer
+to the task control block of the task being switched out. */
+# define traceTASK_SWITCHED_OUT()
+#endif
+
+#ifndef traceTASK_PRIORITY_INHERIT
+/* Called when a task attempts to take a mutex that is already held by a
+lower priority task. pxTCBOfMutexHolder is a pointer to the TCB of the task
+that holds the mutex. uxInheritedPriority is the priority the mutex holder
+will inherit (the priority of the task that is attempting to obtain the
+muted. */
+# define traceTASK_PRIORITY_INHERIT(pxTCBOfMutexHolder, uxInheritedPriority)
+#endif
+
+#ifndef traceTASK_PRIORITY_DISINHERIT
+/* Called when a task releases a mutex, the holding of which had resulted in
+the task inheriting the priority of a higher priority task.
+pxTCBOfMutexHolder is a pointer to the TCB of the task that is releasing the
+mutex. uxOriginalPriority is the task's configured (base) priority. */
+# define traceTASK_PRIORITY_DISINHERIT( \
+ pxTCBOfMutexHolder, uxOriginalPriority)
+#endif
+
+#ifndef traceBLOCKING_ON_QUEUE_RECEIVE
+/* Task is about to block because it cannot read from a
+queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore
+upon which the read was attempted. pxCurrentTCB points to the TCB of the
+task that attempted the read. */
+# define traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue)
+#endif
+
+#ifndef traceBLOCKING_ON_QUEUE_PEEK
+/* Task is about to block because it cannot read from a
+queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore
+upon which the read was attempted. pxCurrentTCB points to the TCB of the
+task that attempted the read. */
+# define traceBLOCKING_ON_QUEUE_PEEK(pxQueue)
+#endif
+
+#ifndef traceBLOCKING_ON_QUEUE_SEND
+/* Task is about to block because it cannot write to a
+queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore
+upon which the write was attempted. pxCurrentTCB points to the TCB of the
+task that attempted the write. */
+# define traceBLOCKING_ON_QUEUE_SEND(pxQueue)
+#endif
+
+#ifndef configCHECK_FOR_STACK_OVERFLOW
+# define configCHECK_FOR_STACK_OVERFLOW 0
+#endif
+
+#ifndef configRECORD_STACK_HIGH_ADDRESS
+# define configRECORD_STACK_HIGH_ADDRESS 0
+#endif
+
+#ifndef configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H
+# define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 0
+#endif
+
+/* The following event macros are embedded in the kernel API calls. */
+
+#ifndef traceMOVED_TASK_TO_READY_STATE
+# define traceMOVED_TASK_TO_READY_STATE(pxTCB)
+#endif
+
+#ifndef tracePOST_MOVED_TASK_TO_READY_STATE
+# define tracePOST_MOVED_TASK_TO_READY_STATE(pxTCB)
+#endif
+
+#ifndef traceQUEUE_CREATE
+# define traceQUEUE_CREATE(pxNewQueue)
+#endif
+
+#ifndef traceQUEUE_CREATE_FAILED
+# define traceQUEUE_CREATE_FAILED(ucQueueType)
+#endif
+
+#ifndef traceCREATE_MUTEX
+# define traceCREATE_MUTEX(pxNewQueue)
+#endif
+
+#ifndef traceCREATE_MUTEX_FAILED
+# define traceCREATE_MUTEX_FAILED()
+#endif
+
+#ifndef traceGIVE_MUTEX_RECURSIVE
+# define traceGIVE_MUTEX_RECURSIVE(pxMutex)
+#endif
+
+#ifndef traceGIVE_MUTEX_RECURSIVE_FAILED
+# define traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex)
+#endif
+
+#ifndef traceTAKE_MUTEX_RECURSIVE
+# define traceTAKE_MUTEX_RECURSIVE(pxMutex)
+#endif
+
+#ifndef traceTAKE_MUTEX_RECURSIVE_FAILED
+# define traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex)
+#endif
+
+#ifndef traceCREATE_COUNTING_SEMAPHORE
+# define traceCREATE_COUNTING_SEMAPHORE()
+#endif
+
+#ifndef traceCREATE_COUNTING_SEMAPHORE_FAILED
+# define traceCREATE_COUNTING_SEMAPHORE_FAILED()
+#endif
+
+#ifndef traceQUEUE_SEND
+# define traceQUEUE_SEND(pxQueue)
+#endif
+
+#ifndef traceQUEUE_SEND_FAILED
+# define traceQUEUE_SEND_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_RECEIVE
+# define traceQUEUE_RECEIVE(pxQueue)
+#endif
+
+#ifndef traceQUEUE_PEEK
+# define traceQUEUE_PEEK(pxQueue)
+#endif
+
+#ifndef traceQUEUE_PEEK_FAILED
+# define traceQUEUE_PEEK_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_PEEK_FROM_ISR
+# define traceQUEUE_PEEK_FROM_ISR(pxQueue)
+#endif
+
+#ifndef traceQUEUE_RECEIVE_FAILED
+# define traceQUEUE_RECEIVE_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_SEND_FROM_ISR
+# define traceQUEUE_SEND_FROM_ISR(pxQueue)
+#endif
+
+#ifndef traceQUEUE_SEND_FROM_ISR_FAILED
+# define traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_RECEIVE_FROM_ISR
+# define traceQUEUE_RECEIVE_FROM_ISR(pxQueue)
+#endif
+
+#ifndef traceQUEUE_RECEIVE_FROM_ISR_FAILED
+# define traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_PEEK_FROM_ISR_FAILED
+# define traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue)
+#endif
+
+#ifndef traceQUEUE_DELETE
+# define traceQUEUE_DELETE(pxQueue)
+#endif
+
+#ifndef traceTASK_CREATE
+# define traceTASK_CREATE(pxNewTCB)
+#endif
+
+#ifndef traceTASK_CREATE_FAILED
+# define traceTASK_CREATE_FAILED()
+#endif
+
+#ifndef traceTASK_DELETE
+# define traceTASK_DELETE(pxTaskToDelete)
+#endif
+
+#ifndef traceTASK_DELAY_UNTIL
+# define traceTASK_DELAY_UNTIL(x)
+#endif
+
+#ifndef traceTASK_DELAY
+# define traceTASK_DELAY()
+#endif
+
+#ifndef traceTASK_PRIORITY_SET
+# define traceTASK_PRIORITY_SET(pxTask, uxNewPriority)
+#endif
+
+#ifndef traceTASK_SUSPEND
+# define traceTASK_SUSPEND(pxTaskToSuspend)
+#endif
+
+#ifndef traceTASK_RESUME
+# define traceTASK_RESUME(pxTaskToResume)
+#endif
+
+#ifndef traceTASK_RESUME_FROM_ISR
+# define traceTASK_RESUME_FROM_ISR(pxTaskToResume)
+#endif
+
+#ifndef traceTASK_INCREMENT_TICK
+# define traceTASK_INCREMENT_TICK(xTickCount)
+#endif
+
+#ifndef traceTIMER_CREATE
+# define traceTIMER_CREATE(pxNewTimer)
+#endif
+
+#ifndef traceTIMER_CREATE_FAILED
+# define traceTIMER_CREATE_FAILED()
+#endif
+
+#ifndef traceTIMER_COMMAND_SEND
+# define traceTIMER_COMMAND_SEND( \
+ xTimer, xMessageID, xMessageValueValue, xReturn)
+#endif
+
+#ifndef traceTIMER_EXPIRED
+# define traceTIMER_EXPIRED(pxTimer)
+#endif
+
+#ifndef traceTIMER_COMMAND_RECEIVED
+# define traceTIMER_COMMAND_RECEIVED(pxTimer, xMessageID, xMessageValue)
+#endif
+
+#ifndef traceMALLOC
+# define traceMALLOC(pvAddress, uiSize)
+#endif
+
+#ifndef traceFREE
+# define traceFREE(pvAddress, uiSize)
+#endif
+
+#ifndef traceEVENT_GROUP_CREATE
+# define traceEVENT_GROUP_CREATE(xEventGroup)
+#endif
+
+#ifndef traceEVENT_GROUP_CREATE_FAILED
+# define traceEVENT_GROUP_CREATE_FAILED()
+#endif
+
+#ifndef traceEVENT_GROUP_SYNC_BLOCK
+# define traceEVENT_GROUP_SYNC_BLOCK( \
+ xEventGroup, uxBitsToSet, uxBitsToWaitFor)
+#endif
+
+#ifndef traceEVENT_GROUP_SYNC_END
+# define traceEVENT_GROUP_SYNC_END( \
+ xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred) \
+ (void)xTimeoutOccurred
+#endif
+
+#ifndef traceEVENT_GROUP_WAIT_BITS_BLOCK
+# define traceEVENT_GROUP_WAIT_BITS_BLOCK(xEventGroup, uxBitsToWaitFor)
+#endif
+
+#ifndef traceEVENT_GROUP_WAIT_BITS_END
+# define traceEVENT_GROUP_WAIT_BITS_END( \
+ xEventGroup, uxBitsToWaitFor, xTimeoutOccurred) \
+ (void)xTimeoutOccurred
+#endif
+
+#ifndef traceEVENT_GROUP_CLEAR_BITS
+# define traceEVENT_GROUP_CLEAR_BITS(xEventGroup, uxBitsToClear)
+#endif
+
+#ifndef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
+# define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(xEventGroup, uxBitsToClear)
+#endif
+
+#ifndef traceEVENT_GROUP_SET_BITS
+# define traceEVENT_GROUP_SET_BITS(xEventGroup, uxBitsToSet)
+#endif
+
+#ifndef traceEVENT_GROUP_SET_BITS_FROM_ISR
+# define traceEVENT_GROUP_SET_BITS_FROM_ISR(xEventGroup, uxBitsToSet)
+#endif
+
+#ifndef traceEVENT_GROUP_DELETE
+# define traceEVENT_GROUP_DELETE(xEventGroup)
+#endif
+
+#ifndef tracePEND_FUNC_CALL
+# define tracePEND_FUNC_CALL( \
+ xFunctionToPend, pvParameter1, ulParameter2, ret)
+#endif
+
+#ifndef tracePEND_FUNC_CALL_FROM_ISR
+# define tracePEND_FUNC_CALL_FROM_ISR( \
+ xFunctionToPend, pvParameter1, ulParameter2, ret)
+#endif
+
+#ifndef traceQUEUE_REGISTRY_ADD
+# define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName)
+#endif
+
+#ifndef traceTASK_NOTIFY_TAKE_BLOCK
+# define traceTASK_NOTIFY_TAKE_BLOCK()
+#endif
+
+#ifndef traceTASK_NOTIFY_TAKE
+# define traceTASK_NOTIFY_TAKE()
+#endif
+
+#ifndef traceTASK_NOTIFY_WAIT_BLOCK
+# define traceTASK_NOTIFY_WAIT_BLOCK()
+#endif
+
+#ifndef traceTASK_NOTIFY_WAIT
+# define traceTASK_NOTIFY_WAIT()
+#endif
+
+#ifndef traceTASK_NOTIFY
+# define traceTASK_NOTIFY()
+#endif
+
+#ifndef traceTASK_NOTIFY_FROM_ISR
+# define traceTASK_NOTIFY_FROM_ISR()
+#endif
+
+#ifndef traceTASK_NOTIFY_GIVE_FROM_ISR
+# define traceTASK_NOTIFY_GIVE_FROM_ISR()
+#endif
+
+#ifndef traceSTREAM_BUFFER_CREATE_FAILED
+# define traceSTREAM_BUFFER_CREATE_FAILED(xIsMessageBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_CREATE_STATIC_FAILED
+# define traceSTREAM_BUFFER_CREATE_STATIC_FAILED(xReturn, xIsMessageBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_CREATE
+# define traceSTREAM_BUFFER_CREATE(pxStreamBuffer, xIsMessageBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_DELETE
+# define traceSTREAM_BUFFER_DELETE(xStreamBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_RESET
+# define traceSTREAM_BUFFER_RESET(xStreamBuffer)
+#endif
+
+#ifndef traceBLOCKING_ON_STREAM_BUFFER_SEND
+# define traceBLOCKING_ON_STREAM_BUFFER_SEND(xStreamBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_SEND
+# define traceSTREAM_BUFFER_SEND(xStreamBuffer, xBytesSent)
+#endif
+
+#ifndef traceSTREAM_BUFFER_SEND_FAILED
+# define traceSTREAM_BUFFER_SEND_FAILED(xStreamBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_SEND_FROM_ISR
+# define traceSTREAM_BUFFER_SEND_FROM_ISR(xStreamBuffer, xBytesSent)
+#endif
+
+#ifndef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE
+# define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE(xStreamBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_RECEIVE
+# define traceSTREAM_BUFFER_RECEIVE(xStreamBuffer, xReceivedLength)
+#endif
+
+#ifndef traceSTREAM_BUFFER_RECEIVE_FAILED
+# define traceSTREAM_BUFFER_RECEIVE_FAILED(xStreamBuffer)
+#endif
+
+#ifndef traceSTREAM_BUFFER_RECEIVE_FROM_ISR
+# define traceSTREAM_BUFFER_RECEIVE_FROM_ISR(xStreamBuffer, xReceivedLength)
+#endif
+
+#ifndef configGENERATE_RUN_TIME_STATS
+# define configGENERATE_RUN_TIME_STATS 0
+#endif
+
+#if (configGENERATE_RUN_TIME_STATS == 1)
+
+# ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS
+#error If configGENERATE_RUN_TIME_STATS is defined then portCONFIGURE_TIMER_FOR_RUN_TIME_STATS must also be defined. portCONFIGURE_TIMER_FOR_RUN_TIME_STATS should call a port layer function to setup a peripheral timer/counter that can then be used as the run time counter time base.
+# endif /* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS */
+
+# ifndef portGET_RUN_TIME_COUNTER_VALUE
+# ifndef portALT_GET_RUN_TIME_COUNTER_VALUE
+#error If configGENERATE_RUN_TIME_STATS is defined then either portGET_RUN_TIME_COUNTER_VALUE or portALT_GET_RUN_TIME_COUNTER_VALUE must also be defined. See the examples provided and the FreeRTOS web site for more information.
+# endif /* portALT_GET_RUN_TIME_COUNTER_VALUE */
+# endif /* portGET_RUN_TIME_COUNTER_VALUE */
+
+#endif /* configGENERATE_RUN_TIME_STATS */
+
+#ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS
+# define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
+#endif
+
+#ifndef configUSE_MALLOC_FAILED_HOOK
+# define configUSE_MALLOC_FAILED_HOOK 0
+#endif
+
+#ifndef portPRIVILEGE_BIT
+# define portPRIVILEGE_BIT ((UBaseType_t)0x00)
+#endif
+
+#ifndef portYIELD_WITHIN_API
+# define portYIELD_WITHIN_API portYIELD
+#endif
+
+#ifndef portSUPPRESS_TICKS_AND_SLEEP
+# define portSUPPRESS_TICKS_AND_SLEEP(xExpectedIdleTime)
+#endif
+
+#ifndef configEXPECTED_IDLE_TIME_BEFORE_SLEEP
+# define configEXPECTED_IDLE_TIME_BEFORE_SLEEP 2
+#endif
+
+#if configEXPECTED_IDLE_TIME_BEFORE_SLEEP < 2
+# error configEXPECTED_IDLE_TIME_BEFORE_SLEEP must not be less than 2
+#endif
+
+#ifndef configUSE_TICKLESS_IDLE
+# define configUSE_TICKLESS_IDLE 0
+#endif
+
+#ifndef configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING
+# define configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING(x)
+#endif
+
+#ifndef configPRE_SLEEP_PROCESSING
+# define configPRE_SLEEP_PROCESSING(x)
+#endif
+
+#ifndef configPOST_SLEEP_PROCESSING
+# define configPOST_SLEEP_PROCESSING(x)
+#endif
+
+#ifndef configUSE_QUEUE_SETS
+# define configUSE_QUEUE_SETS 0
+#endif
+
+#ifndef portTASK_USES_FLOATING_POINT
+# define portTASK_USES_FLOATING_POINT()
+#endif
+
+#ifndef portALLOCATE_SECURE_CONTEXT
+# define portALLOCATE_SECURE_CONTEXT(ulSecureStackSize)
+#endif
+
+#ifndef portDONT_DISCARD
+# define portDONT_DISCARD
+#endif
+
+#ifndef configUSE_TIME_SLICING
+# define configUSE_TIME_SLICING 1
+#endif
+
+#ifndef configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS
+# define configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS 0
+#endif
+
+#ifndef configUSE_STATS_FORMATTING_FUNCTIONS
+# define configUSE_STATS_FORMATTING_FUNCTIONS 0
+#endif
+
+#ifndef portASSERT_IF_INTERRUPT_PRIORITY_INVALID
+# define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
+#endif
+
+#ifndef configUSE_TRACE_FACILITY
+# define configUSE_TRACE_FACILITY 0
+#endif
+
+#ifndef mtCOVERAGE_TEST_MARKER
+# define mtCOVERAGE_TEST_MARKER()
+#endif
+
+#ifndef mtCOVERAGE_TEST_DELAY
+# define mtCOVERAGE_TEST_DELAY()
+#endif
+
+#ifndef portASSERT_IF_IN_ISR
+# define portASSERT_IF_IN_ISR()
+#endif
+
+#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
+# define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
+#endif
+
+#ifndef configAPPLICATION_ALLOCATED_HEAP
+# define configAPPLICATION_ALLOCATED_HEAP 0
+#endif
+
+#ifndef configUSE_TASK_NOTIFICATIONS
+# define configUSE_TASK_NOTIFICATIONS 1
+#endif
+
+#ifndef configUSE_POSIX_ERRNO
+# define configUSE_POSIX_ERRNO 0
+#endif
+
+#ifndef portTICK_TYPE_IS_ATOMIC
+# define portTICK_TYPE_IS_ATOMIC 0
+#endif
+
+#ifndef configSUPPORT_STATIC_ALLOCATION
+/* Defaults to 0 for backward compatibility. */
+# define configSUPPORT_STATIC_ALLOCATION 0
+#endif
+
+#ifndef configSUPPORT_DYNAMIC_ALLOCATION
+/* Defaults to 1 for backward compatibility. */
+# define configSUPPORT_DYNAMIC_ALLOCATION 1
+#endif
+
+#ifndef configSTACK_DEPTH_TYPE
+/* Defaults to uint16_t for backward compatibility, but can be overridden
+in FreeRTOSConfig.h if uint16_t is too restrictive. */
+# define configSTACK_DEPTH_TYPE uint16_t
+#endif
+
+#ifndef configMESSAGE_BUFFER_LENGTH_TYPE
+/* Defaults to size_t for backward compatibility, but can be overridden
+in FreeRTOSConfig.h if lengths will always be less than the number of bytes
+in a size_t. */
+# define configMESSAGE_BUFFER_LENGTH_TYPE size_t
+#endif
+
+/* Sanity check the configuration. */
+#if (configUSE_TICKLESS_IDLE != 0)
+# if (INCLUDE_vTaskSuspend != 1)
+# error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0
+# endif /* INCLUDE_vTaskSuspend */
+#endif /* configUSE_TICKLESS_IDLE */
+
+#if ( \
+ (configSUPPORT_STATIC_ALLOCATION == 0) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 0))
+# error configSUPPORT_STATIC_ALLOCATION and configSUPPORT_DYNAMIC_ALLOCATION cannot both be 0, but can both be 1.
+#endif
+
+#if ((configUSE_RECURSIVE_MUTEXES == 1) && (configUSE_MUTEXES != 1))
+# error configUSE_MUTEXES must be set to 1 to use recursive mutexes
+#endif
+
+#ifndef configINITIAL_TICK_COUNT
+# define configINITIAL_TICK_COUNT 0
+#endif
+
+#if (portTICK_TYPE_IS_ATOMIC == 0)
+/* Either variables of tick type cannot be read atomically, or
+portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
+the tick count is returned to the standard critical section macros. */
+# define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL()
+# define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL()
+# define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() \
+ portSET_INTERRUPT_MASK_FROM_ISR()
+# define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR(x) \
+ portCLEAR_INTERRUPT_MASK_FROM_ISR((x))
+#else
+/* The tick type can be read atomically, so critical sections used when the
+tick count is returned can be defined away. */
+# define portTICK_TYPE_ENTER_CRITICAL()
+# define portTICK_TYPE_EXIT_CRITICAL()
+# define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() 0
+# define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR(x) (void)x
+#endif
+
+/* Definitions to allow backward compatibility with FreeRTOS versions prior to
+V8 if desired. */
+#ifndef configENABLE_BACKWARD_COMPATIBILITY
+# define configENABLE_BACKWARD_COMPATIBILITY 1
+#endif
+
+#ifndef configPRINTF
+/* configPRINTF() was not defined, so define it away to nothing. To use
+configPRINTF() then define it as follows (where MyPrintFunction() is
+provided by the application writer):
+
+void MyPrintFunction(const char *pcFormat, ... );
+#define configPRINTF( X ) MyPrintFunction X
+
+Then call like a standard printf() function, but placing brackets around
+all parameters so they are passed as a single parameter. For example:
+configPRINTF( ("Value = %d", MyVariable) ); */
+# define configPRINTF(X)
+#endif
+
+#ifndef configMAX
+/* The application writer has not provided their own MAX macro, so define
+the following generic implementation. */
+# define configMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef configMIN
+/* The application writer has not provided their own MAX macro, so define
+the following generic implementation. */
+# define configMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#if configENABLE_BACKWARD_COMPATIBILITY == 1
+# define eTaskStateGet eTaskGetState
+# define portTickType TickType_t
+# define xTaskHandle TaskHandle_t
+# define xQueueHandle QueueHandle_t
+# define xSemaphoreHandle SemaphoreHandle_t
+# define xQueueSetHandle QueueSetHandle_t
+# define xQueueSetMemberHandle QueueSetMemberHandle_t
+# define xTimeOutType TimeOut_t
+# define xMemoryRegion MemoryRegion_t
+# define xTaskParameters TaskParameters_t
+# define xTaskStatusType TaskStatus_t
+# define xTimerHandle TimerHandle_t
+# define xCoRoutineHandle CoRoutineHandle_t
+# define pdTASK_HOOK_CODE TaskHookFunction_t
+# define portTICK_RATE_MS portTICK_PERIOD_MS
+# define pcTaskGetTaskName pcTaskGetName
+# define pcTimerGetTimerName pcTimerGetName
+# define pcQueueGetQueueName pcQueueGetName
+# define vTaskGetTaskInfo vTaskGetInfo
+# define xTaskGetIdleRunTimeCounter ulTaskGetIdleRunTimeCounter
+
+/* Backward compatibility within the scheduler code only - these definitions
+are not really required but are included for completeness. */
+# define tmrTIMER_CALLBACK TimerCallbackFunction_t
+# define pdTASK_CODE TaskFunction_t
+# define xListItem ListItem_t
+# define xList List_t
+
+/* For libraries that break the list data hiding, and access list structure
+members directly (which is not supposed to be done). */
+# define pxContainer pvContainer
+#endif /* configENABLE_BACKWARD_COMPATIBILITY */
+
+#if (configUSE_ALTERNATIVE_API != 0)
+# error The alternative API was deprecated some time ago, and was removed in FreeRTOS V9.0 0
+#endif
+
+/* Set configUSE_TASK_FPU_SUPPORT to 0 to omit floating point support even
+if floating point hardware is otherwise supported by the FreeRTOS port in use.
+This constant is not supported by all FreeRTOS ports that include floating
+point support. */
+#ifndef configUSE_TASK_FPU_SUPPORT
+# define configUSE_TASK_FPU_SUPPORT 1
+#endif
+
+/* Set configENABLE_MPU to 1 to enable MPU support and 0 to disable it. This is
+currently used in ARMv8M ports. */
+#ifndef configENABLE_MPU
+# define configENABLE_MPU 0
+#endif
+
+/* Set configENABLE_FPU to 1 to enable FPU support and 0 to disable it. This is
+currently used in ARMv8M ports. */
+#ifndef configENABLE_FPU
+# define configENABLE_FPU 1
+#endif
+
+/* Set configENABLE_TRUSTZONE to 1 enable TrustZone support and 0 to disable it.
+This is currently used in ARMv8M ports. */
+#ifndef configENABLE_TRUSTZONE
+# define configENABLE_TRUSTZONE 1
+#endif
+
+/* Set configRUN_FREERTOS_SECURE_ONLY to 1 to run the FreeRTOS ARMv8M port on
+the Secure Side only. */
+#ifndef configRUN_FREERTOS_SECURE_ONLY
+# define configRUN_FREERTOS_SECURE_ONLY 0
+#endif
+
+/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
+ * dynamically allocated RAM, in which case when any task is deleted it is known
+ * that both the task's stack and TCB need to be freed. Sometimes the
+ * FreeRTOSConfig.h settings only allow a task to be created using statically
+ * allocated RAM, in which case when any task is deleted it is known that
+ * neither the task's stack or TCB should be freed. Sometimes the
+ * FreeRTOSConfig.h settings allow a task to be created using either statically
+ * or dynamically allocated RAM, in which case a member of the TCB is used to
+ * record whether the stack and/or TCB were allocated statically or dynamically,
+ * so when a task is deleted the RAM that was allocated dynamically is freed
+ * again and no attempt is made to free the RAM that was allocated statically.
+ * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for
+ * a task to be created using either statically or dynamically allocated RAM.
+ * Note that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created
+ * with a statically allocated stack and a dynamically allocated TCB.
+ *
+ * The following table lists various combinations of portUSING_MPU_WRAPPERS,
+ * configSUPPORT_DYNAMIC_ALLOCATION and configSUPPORT_STATIC_ALLOCATION and
+ * when it is possible to have both static and dynamic allocation:
+ * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
+ * | MPU | Dynamic | Static | Available Functions | Possible
+ * Allocations | Both Dynamic and | Need Free | | | | | | |
+ * Static Possible | |
+ * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
+ * | 0 | 0 | 1 | xTaskCreateStatic | TCB - Static, Stack
+ * - Static | No | No |
+ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
+ * | 0 | 1 | 0 | xTaskCreate | TCB - Dynamic, Stack
+ * - Dynamic | No | Yes |
+ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
+ * | 0 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic,
+ * Stack - Dynamic | Yes | Yes | | | | |
+ * xTaskCreateStatic | 2. TCB - Static, Stack - Static | | |
+ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
+ * | 1 | 0 | 1 | xTaskCreateStatic, | TCB - Static, Stack
+ * - Static | No | No | | | | |
+ * xTaskCreateRestrictedStatic | | | |
+ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
+ * | 1 | 1 | 0 | xTaskCreate, | 1. TCB - Dynamic,
+ * Stack - Dynamic | Yes | Yes | | | | |
+ * xTaskCreateRestricted | 2. TCB - Dynamic, Stack - Static | | |
+ * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------|
+ * | 1 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic,
+ * Stack - Dynamic | Yes | Yes | | | | |
+ * xTaskCreateStatic, | 2. TCB - Dynamic, Stack - Static | | | | |
+ * | | xTaskCreateRestricted, | 3. TCB - Static, Stack - Static |
+ * | | | | | | xTaskCreateRestrictedStatic | | | |
+ * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+
+ */
+#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE \
+ (((portUSING_MPU_WRAPPERS == 0) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
+ (configSUPPORT_STATIC_ALLOCATION == 1)) || \
+ ((portUSING_MPU_WRAPPERS == 1) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1)))
+
+/*
+ * In line with software engineering best practice, FreeRTOS implements a strict
+ * data hiding policy, so the real structures used by FreeRTOS to maintain the
+ * state of tasks, queues, semaphores, etc. are not accessible to the
+ * application code. However, if the application writer wants to statically
+ * allocate such an object then the size of the object needs to be know. Dummy
+ * structures that are guaranteed to have the same size and alignment
+ * requirements of the real objects are used for this purpose. The dummy list
+ * and list item structures below are used for inclusion in such a dummy
+ * structure.
+ */
+struct xSTATIC_LIST_ITEM {
+#if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1)
+ TickType_t xDummy1;
+#endif
+ TickType_t xDummy2;
+ void *pvDummy3[4];
+#if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1)
+ TickType_t xDummy4;
+#endif
+};
+typedef struct xSTATIC_LIST_ITEM StaticListItem_t;
+
+/* See the comments above the struct xSTATIC_LIST_ITEM definition. */
+struct xSTATIC_MINI_LIST_ITEM {
+#if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1)
+ TickType_t xDummy1;
+#endif
+ TickType_t xDummy2;
+ void *pvDummy3[2];
+};
+typedef struct xSTATIC_MINI_LIST_ITEM StaticMiniListItem_t;
+
+/* See the comments above the struct xSTATIC_LIST_ITEM definition. */
+typedef struct xSTATIC_LIST {
+#if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1)
+ TickType_t xDummy1;
+#endif
+ UBaseType_t uxDummy2;
+ void *pvDummy3;
+ StaticMiniListItem_t xDummy4;
+#if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1)
+ TickType_t xDummy5;
+#endif
+} StaticList_t;
+
+/*
+ * In line with software engineering best practice, especially when supplying a
+ * library that is likely to change in future versions, FreeRTOS implements a
+ * strict data hiding policy. This means the Task structure used internally by
+ * FreeRTOS is not accessible to application code. However, if the application
+ * writer wants to statically allocate the memory required to create a task then
+ * the size of the task object needs to be know. The StaticTask_t structure
+ * below is provided for this purpose. Its sizes and alignment requirements are
+ * guaranteed to match those of the genuine structure, no matter which
+ * architecture is being used, and no matter how the values in FreeRTOSConfig.h
+ * are set. Its contents are somewhat obfuscated in the hope users will
+ * recognise that it would be unwise to make direct use of the structure
+ * members.
+ */
+typedef struct xSTATIC_TCB {
+ void *pxDummy1;
+#if (portUSING_MPU_WRAPPERS == 1)
+ xMPU_SETTINGS xDummy2;
+#endif
+ StaticListItem_t xDummy3[2];
+ UBaseType_t uxDummy5;
+ void *pxDummy6;
+ uint8_t ucDummy7[configMAX_TASK_NAME_LEN];
+#if ((portSTACK_GROWTH > 0) || (configRECORD_STACK_HIGH_ADDRESS == 1))
+ void *pxDummy8;
+#endif
+#if (portCRITICAL_NESTING_IN_TCB == 1)
+ UBaseType_t uxDummy9;
+#endif
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxDummy10[2];
+#endif
+#if (configUSE_MUTEXES == 1)
+ UBaseType_t uxDummy12[2];
+#endif
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+ void *pxDummy14;
+#endif
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0)
+ void *pvDummy15[configNUM_THREAD_LOCAL_STORAGE_POINTERS];
+#endif
+#if (configGENERATE_RUN_TIME_STATS == 1)
+ uint32_t ulDummy16;
+#endif
+#if (configUSE_NEWLIB_REENTRANT == 1)
+ struct _reent xDummy17;
+#endif
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+ uint32_t ulDummy18;
+ uint8_t ucDummy19;
+#endif
+#if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0)
+ uint8_t uxDummy20;
+#endif
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+ uint8_t ucDummy21;
+#endif
+#if (configUSE_POSIX_ERRNO == 1)
+ int iDummy22;
+#endif
+} StaticTask_t;
+
+/*
+ * In line with software engineering best practice, especially when supplying a
+ * library that is likely to change in future versions, FreeRTOS implements a
+ * strict data hiding policy. This means the Queue structure used internally by
+ * FreeRTOS is not accessible to application code. However, if the application
+ * writer wants to statically allocate the memory required to create a queue
+ * then the size of the queue object needs to be know. The StaticQueue_t
+ * structure below is provided for this purpose. Its sizes and alignment
+ * requirements are guaranteed to match those of the genuine structure, no
+ * matter which architecture is being used, and no matter how the values in
+ * FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in the hope
+ * users will recognise that it would be unwise to make direct use of the
+ * structure members.
+ */
+typedef struct xSTATIC_QUEUE {
+ void *pvDummy1[3];
+
+ union {
+ void *pvDummy2;
+ UBaseType_t uxDummy2;
+ } u;
+
+ StaticList_t xDummy3[2];
+ UBaseType_t uxDummy4[3];
+ uint8_t ucDummy5[2];
+
+#if ( \
+ (configSUPPORT_STATIC_ALLOCATION == 1) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+ uint8_t ucDummy6;
+#endif
+
+#if (configUSE_QUEUE_SETS == 1)
+ void *pvDummy7;
+#endif
+
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxDummy8;
+ uint8_t ucDummy9;
+#endif
+
+} StaticQueue_t;
+typedef StaticQueue_t StaticSemaphore_t;
+
+/*
+ * In line with software engineering best practice, especially when supplying a
+ * library that is likely to change in future versions, FreeRTOS implements a
+ * strict data hiding policy. This means the event group structure used
+ * internally by FreeRTOS is not accessible to application code. However, if
+ * the application writer wants to statically allocate the memory required to
+ * create an event group then the size of the event group object needs to be
+ * know. The StaticEventGroup_t structure below is provided for this purpose.
+ * Its sizes and alignment requirements are guaranteed to match those of the
+ * genuine structure, no matter which architecture is being used, and no matter
+ * how the values in FreeRTOSConfig.h are set. Its contents are somewhat
+ * obfuscated in the hope users will recognise that it would be unwise to make
+ * direct use of the structure members.
+ */
+typedef struct xSTATIC_EVENT_GROUP {
+ TickType_t xDummy1;
+ StaticList_t xDummy2;
+
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxDummy3;
+#endif
+
+#if ( \
+ (configSUPPORT_STATIC_ALLOCATION == 1) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+ uint8_t ucDummy4;
+#endif
+
+} StaticEventGroup_t;
+
+/*
+ * In line with software engineering best practice, especially when supplying a
+ * library that is likely to change in future versions, FreeRTOS implements a
+ * strict data hiding policy. This means the software timer structure used
+ * internally by FreeRTOS is not accessible to application code. However, if
+ * the application writer wants to statically allocate the memory required to
+ * create a software timer then the size of the queue object needs to be know.
+ * The StaticTimer_t structure below is provided for this purpose. Its sizes
+ * and alignment requirements are guaranteed to match those of the genuine
+ * structure, no matter which architecture is being used, and no matter how the
+ * values in FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in
+ * the hope users will recognise that it would be unwise to make direct use of
+ * the structure members.
+ */
+typedef struct xSTATIC_TIMER {
+ void *pvDummy1;
+ StaticListItem_t xDummy2;
+ TickType_t xDummy3;
+ void *pvDummy5;
+ TaskFunction_t pvDummy6;
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxDummy7;
+#endif
+ uint8_t ucDummy8;
+
+} StaticTimer_t;
+
+/*
+ * In line with software engineering best practice, especially when supplying a
+ * library that is likely to change in future versions, FreeRTOS implements a
+ * strict data hiding policy. This means the stream buffer structure used
+ * internally by FreeRTOS is not accessible to application code. However, if
+ * the application writer wants to statically allocate the memory required to
+ * create a stream buffer then the size of the stream buffer object needs to be
+ * know. The StaticStreamBuffer_t structure below is provided for this purpose.
+ * Its size and alignment requirements are guaranteed to match those of the
+ * genuine structure, no matter which architecture is being used, and no matter
+ * how the values in FreeRTOSConfig.h are set. Its contents are somewhat
+ * obfuscated in the hope users will recognise that it would be unwise to make
+ * direct use of the structure members.
+ */
+typedef struct xSTATIC_STREAM_BUFFER {
+ size_t uxDummy1[4];
+ void *pvDummy2[3];
+ uint8_t ucDummy3;
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxDummy4;
+#endif
+} StaticStreamBuffer_t;
+
+/* Message buffers are built on stream buffers. */
+typedef StaticStreamBuffer_t StaticMessageBuffer_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* INC_FREERTOS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/deprecated_definitions.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/deprecated_definitions.h
new file mode 100644
index 00000000..82bc047a
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/deprecated_definitions.h
@@ -0,0 +1,277 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef DEPRECATED_DEFINITIONS_H
+#define DEPRECATED_DEFINITIONS_H
+
+/* Each FreeRTOS port has a unique portmacro.h header file. Originally a
+pre-processor definition was used to ensure the pre-processor found the correct
+portmacro.h file for the port being used. That scheme was deprecated in favour
+of setting the compiler's include path such that it found the correct
+portmacro.h file - removing the need for the constant and allowing the
+portmacro.h file to be located anywhere in relation to the port being used. The
+definitions below remain in the code for backward compatibility only. New
+projects should not use them. */
+
+#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT
+# include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h"
+typedef void(__interrupt __far *pxISR)();
+#endif
+
+#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT
+# include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h"
+typedef void(__interrupt __far *pxISR)();
+#endif
+
+#ifdef GCC_MEGA_AVR
+# include "../portable/GCC/ATMega323/portmacro.h"
+#endif
+
+#ifdef IAR_MEGA_AVR
+# include "../portable/IAR/ATMega323/portmacro.h"
+#endif
+
+#ifdef MPLAB_PIC24_PORT
+# include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h"
+#endif
+
+#ifdef MPLAB_DSPIC_PORT
+# include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h"
+#endif
+
+#ifdef MPLAB_PIC18F_PORT
+# include "../../Source/portable/MPLAB/PIC18F/portmacro.h"
+#endif
+
+#ifdef MPLAB_PIC32MX_PORT
+# include "../../Source/portable/MPLAB/PIC32MX/portmacro.h"
+#endif
+
+#ifdef _FEDPICC
+# include "libFreeRTOS/Include/portmacro.h"
+#endif
+
+#ifdef SDCC_CYGNAL
+# include "../../Source/portable/SDCC/Cygnal/portmacro.h"
+#endif
+
+#ifdef GCC_ARM7
+# include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h"
+#endif
+
+#ifdef GCC_ARM7_ECLIPSE
+# include "portmacro.h"
+#endif
+
+#ifdef ROWLEY_LPC23xx
+# include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h"
+#endif
+
+#ifdef IAR_MSP430
+# include "..\..\Source\portable\IAR\MSP430\portmacro.h"
+#endif
+
+#ifdef GCC_MSP430
+# include "../../Source/portable/GCC/MSP430F449/portmacro.h"
+#endif
+
+#ifdef ROWLEY_MSP430
+# include "../../Source/portable/Rowley/MSP430F449/portmacro.h"
+#endif
+
+#ifdef ARM7_LPC21xx_KEIL_RVDS
+# include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h"
+#endif
+
+#ifdef SAM7_GCC
+# include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h"
+#endif
+
+#ifdef SAM7_IAR
+# include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h"
+#endif
+
+#ifdef SAM9XE_IAR
+# include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h"
+#endif
+
+#ifdef LPC2000_IAR
+# include "..\..\Source\portable\IAR\LPC2000\portmacro.h"
+#endif
+
+#ifdef STR71X_IAR
+# include "..\..\Source\portable\IAR\STR71x\portmacro.h"
+#endif
+
+#ifdef STR75X_IAR
+# include "..\..\Source\portable\IAR\STR75x\portmacro.h"
+#endif
+
+#ifdef STR75X_GCC
+# include "..\..\Source\portable\GCC\STR75x\portmacro.h"
+#endif
+
+#ifdef STR91X_IAR
+# include "..\..\Source\portable\IAR\STR91x\portmacro.h"
+#endif
+
+#ifdef GCC_H8S
+# include "../../Source/portable/GCC/H8S2329/portmacro.h"
+#endif
+
+#ifdef GCC_AT91FR40008
+# include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h"
+#endif
+
+#ifdef RVDS_ARMCM3_LM3S102
+# include "../../Source/portable/RVDS/ARM_CM3/portmacro.h"
+#endif
+
+#ifdef GCC_ARMCM3_LM3S102
+# include "../../Source/portable/GCC/ARM_CM3/portmacro.h"
+#endif
+
+#ifdef GCC_ARMCM3
+# include "../../Source/portable/GCC/ARM_CM3/portmacro.h"
+#endif
+
+#ifdef IAR_ARM_CM3
+# include "../../Source/portable/IAR/ARM_CM3/portmacro.h"
+#endif
+
+#ifdef IAR_ARMCM3_LM
+# include "../../Source/portable/IAR/ARM_CM3/portmacro.h"
+#endif
+
+#ifdef HCS12_CODE_WARRIOR
+# include "../../Source/portable/CodeWarrior/HCS12/portmacro.h"
+#endif
+
+#ifdef MICROBLAZE_GCC
+# include "../../Source/portable/GCC/MicroBlaze/portmacro.h"
+#endif
+
+#ifdef TERN_EE
+# include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h"
+#endif
+
+#ifdef GCC_HCS12
+# include "../../Source/portable/GCC/HCS12/portmacro.h"
+#endif
+
+#ifdef GCC_MCF5235
+# include "../../Source/portable/GCC/MCF5235/portmacro.h"
+#endif
+
+#ifdef COLDFIRE_V2_GCC
+# include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h"
+#endif
+
+#ifdef COLDFIRE_V2_CODEWARRIOR
+# include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h"
+#endif
+
+#ifdef GCC_PPC405
+# include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h"
+#endif
+
+#ifdef GCC_PPC440
+# include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h"
+#endif
+
+#ifdef _16FX_SOFTUNE
+# include "..\..\Source\portable\Softune\MB96340\portmacro.h"
+#endif
+
+#ifdef BCC_INDUSTRIAL_PC_PORT
+/* A short file name has to be used in place of the normal
+FreeRTOSConfig.h when using the Borland compiler. */
+# include "..\portable\BCC\16BitDOS\PC\prtmacro.h"
+# include "frconfig.h"
+typedef void(__interrupt __far *pxISR)();
+#endif
+
+#ifdef BCC_FLASH_LITE_186_PORT
+/* A short file name has to be used in place of the normal
+FreeRTOSConfig.h when using the Borland compiler. */
+# include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h"
+# include "frconfig.h"
+typedef void(__interrupt __far *pxISR)();
+#endif
+
+#ifdef __GNUC__
+# ifdef __AVR32_AVR32A__
+# include "portmacro.h"
+# endif
+#endif
+
+#ifdef __ICCAVR32__
+# ifdef __CORE__
+# if __CORE__ == __AVR32A__
+# include "portmacro.h"
+# endif
+# endif
+#endif
+
+#ifdef __91467D
+# include "portmacro.h"
+#endif
+
+#ifdef __96340
+# include "portmacro.h"
+#endif
+
+#ifdef __IAR_V850ES_Fx3__
+# include "../../Source/portable/IAR/V850ES/portmacro.h"
+#endif
+
+#ifdef __IAR_V850ES_Jx3__
+# include "../../Source/portable/IAR/V850ES/portmacro.h"
+#endif
+
+#ifdef __IAR_V850ES_Jx3_L__
+# include "../../Source/portable/IAR/V850ES/portmacro.h"
+#endif
+
+#ifdef __IAR_V850ES_Jx2__
+# include "../../Source/portable/IAR/V850ES/portmacro.h"
+#endif
+
+#ifdef __IAR_V850ES_Hx2__
+# include "../../Source/portable/IAR/V850ES/portmacro.h"
+#endif
+
+#ifdef __IAR_78K0R_Kx3__
+# include "../../Source/portable/IAR/78K0R/portmacro.h"
+#endif
+
+#ifdef __IAR_78K0R_Kx3L__
+# include "../../Source/portable/IAR/78K0R/portmacro.h"
+#endif
+
+#endif /* DEPRECATED_DEFINITIONS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/list.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/list.h
new file mode 100644
index 00000000..5f80895f
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/list.h
@@ -0,0 +1,465 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/*
+ * This is the list implementation used by the scheduler. While it is tailored
+ * heavily for the schedulers needs, it is also available for use by
+ * application code.
+ *
+ * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a
+ * numeric value (xItemValue). Most of the time the lists are sorted in
+ * descending item value order.
+ *
+ * Lists are created already containing one list item. The value of this
+ * item is the maximum possible that can be stored, it is therefore always at
+ * the end of the list and acts as a marker. The list member pxHead always
+ * points to this marker - even though it is at the tail of the list. This
+ * is because the tail contains a wrap back pointer to the true head of
+ * the list.
+ *
+ * In addition to it's value, each list item contains a pointer to the next
+ * item in the list (pxNext), a pointer to the list it is in (pxContainer)
+ * and a pointer to back to the object that contains it. These later two
+ * pointers are included for efficiency of list manipulation. There is
+ * effectively a two way link between the object containing the list item and
+ * the list item itself.
+ *
+ *
+ * \page ListIntroduction List Implementation
+ * \ingroup FreeRTOSIntro
+ */
+
+#ifndef INC_FREERTOS_H
+# error FreeRTOS.h must be included before list.h
+#endif
+
+#ifndef LIST_H
+# define LIST_H
+
+/*
+ * The list structure members are modified from within interrupts, and therefore
+ * by rights should be declared volatile. However, they are only modified in a
+ * functionally atomic way (within critical sections of with the scheduler
+ * suspended) and are either passed by reference into a function or indexed via
+ * a volatile variable. Therefore, in all use cases tested so far, the volatile
+ * qualifier can be omitted in order to provide a moderate performance
+ * improvement without adversely affecting functional behaviour. The assembly
+ * instructions generated by the IAR, ARM and GCC compilers when the respective
+ * compiler's options were set for maximum optimisation has been inspected and
+ * deemed to be as intended. That said, as compiler technology advances, and
+ * especially if aggressive cross module optimisation is used (a use case that
+ * has not been exercised to any great extend) then it is feasible that the
+ * volatile qualifier will be needed for correct optimisation. It is expected
+ * that a compiler removing essential code because, without the volatile
+ * qualifier on the list structure members and with aggressive cross module
+ * optimisation, the compiler deemed the code unnecessary will result in
+ * complete and obvious failure of the scheduler. If this is ever experienced
+ * then the volatile qualifier can be inserted in the relevant places within the
+ * list structures by simply defining configLIST_VOLATILE to volatile in
+ * FreeRTOSConfig.h (as per the example at the bottom of this comment block).
+ * If configLIST_VOLATILE is not defined then the preprocessor directives below
+ * will simply #define configLIST_VOLATILE away completely.
+ *
+ * To use volatile list structure members then add the following line to
+ * FreeRTOSConfig.h (without the quotes):
+ * "#define configLIST_VOLATILE volatile"
+ */
+# ifndef configLIST_VOLATILE
+# define configLIST_VOLATILE
+# endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+/* Macros that can be used to place known values within the list structures,
+then check that the known values do not get corrupted during the execution of
+the application. These may catch the list data structures being overwritten in
+memory. They will not catch data errors caused by incorrect configuration or
+use of FreeRTOS.*/
+# if (configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0)
+/* Define the macros to do nothing. */
+# define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE
+# define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE
+# define listFIRST_LIST_INTEGRITY_CHECK_VALUE
+# define listSECOND_LIST_INTEGRITY_CHECK_VALUE
+# define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem)
+# define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem)
+# define listSET_LIST_INTEGRITY_CHECK_1_VALUE(pxList)
+# define listSET_LIST_INTEGRITY_CHECK_2_VALUE(pxList)
+# define listTEST_LIST_ITEM_INTEGRITY(pxItem)
+# define listTEST_LIST_INTEGRITY(pxList)
+# else
+/* Define macros that add new members into the list structures. */
+# define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE \
+ TickType_t xListItemIntegrityValue1;
+# define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE \
+ TickType_t xListItemIntegrityValue2;
+# define listFIRST_LIST_INTEGRITY_CHECK_VALUE \
+ TickType_t xListIntegrityValue1;
+# define listSECOND_LIST_INTEGRITY_CHECK_VALUE \
+ TickType_t xListIntegrityValue2;
+
+/* Define macros that set the new structure members to known values. */
+# define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem) \
+ (pxItem)->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
+# define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem) \
+ (pxItem)->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
+# define listSET_LIST_INTEGRITY_CHECK_1_VALUE(pxList) \
+ (pxList)->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
+# define listSET_LIST_INTEGRITY_CHECK_2_VALUE(pxList) \
+ (pxList)->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
+
+/* Define macros that will assert if one of the structure members does not
+contain its expected value. */
+# define listTEST_LIST_ITEM_INTEGRITY(pxItem) \
+ configASSERT( \
+ ((pxItem)->xListItemIntegrityValue1 == \
+ pdINTEGRITY_CHECK_VALUE) && \
+ ((pxItem)->xListItemIntegrityValue2 == \
+ pdINTEGRITY_CHECK_VALUE))
+# define listTEST_LIST_INTEGRITY(pxList) \
+ configASSERT( \
+ ((pxList)->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE) && \
+ ((pxList)->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE))
+# endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */
+
+/*
+ * Definition of the only type of object that a list can contain.
+ */
+struct xLIST;
+struct xLIST_ITEM {
+ listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+ is set to 1. */
+ configLIST_VOLATILE TickType_t xItemValue; /*< The value
+ being
+ listed. In
+ most cases
+ this is
+ used to
+ sort the
+ list in
+ descending
+ order. */
+ struct xLIST_ITEM *configLIST_VOLATILE
+ pxNext; /*< Pointer to the next ListItem_t in the list. */
+ struct xLIST_ITEM *configLIST_VOLATILE
+ pxPrevious; /*< Pointer to the previous ListItem_t in the list. */
+ void *
+ pvOwner; /*< Pointer to the object (normally a TCB) that contains the
+ list item. There is therefore a two way link between the
+ object containing the list item and the list item itself. */
+ struct xLIST *configLIST_VOLATILE
+ pxContainer; /*< Pointer to the list in which this list item is placed
+ (if any). */
+ listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+ is set to 1. */
+};
+typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two
+ separate definitions. */
+
+struct xMINI_LIST_ITEM {
+ listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+ is set to 1. */
+ configLIST_VOLATILE TickType_t xItemValue;
+ struct xLIST_ITEM *configLIST_VOLATILE pxNext;
+ struct xLIST_ITEM *configLIST_VOLATILE pxPrevious;
+};
+typedef struct xMINI_LIST_ITEM MiniListItem_t;
+
+/*
+ * Definition of the type of queue used by the scheduler.
+ */
+typedef struct xLIST {
+ listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+ is set to 1. */
+ volatile UBaseType_t uxNumberOfItems;
+ ListItem_t *configLIST_VOLATILE
+ pxIndex; /*< Used to walk through the list. Points to the last item
+ returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */
+ MiniListItem_t xListEnd; /*< List item that contains the maximum possible
+ item value meaning it is always at the end of
+ the list and is therefore used as a marker. */
+ listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+ is set to 1. */
+} List_t;
+
+/*
+ * Access macro to set the owner of a list item. The owner of a list item
+ * is the object (usually a TCB) that contains the list item.
+ *
+ * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
+ * \ingroup LinkedList
+ */
+# define listSET_LIST_ITEM_OWNER(pxListItem, pxOwner) \
+ ((pxListItem)->pvOwner = (void *)(pxOwner))
+
+/*
+ * Access macro to get the owner of a list item. The owner of a list item
+ * is the object (usually a TCB) that contains the list item.
+ *
+ * \page listGET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
+ * \ingroup LinkedList
+ */
+# define listGET_LIST_ITEM_OWNER(pxListItem) ((pxListItem)->pvOwner)
+
+/*
+ * Access macro to set the value of the list item. In most cases the value is
+ * used to sort the list in descending order.
+ *
+ * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE
+ * \ingroup LinkedList
+ */
+# define listSET_LIST_ITEM_VALUE(pxListItem, xValue) \
+ ((pxListItem)->xItemValue = (xValue))
+
+/*
+ * Access macro to retrieve the value of the list item. The value can
+ * represent anything - for example the priority of a task, or the time at
+ * which a task should be unblocked.
+ *
+ * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
+ * \ingroup LinkedList
+ */
+# define listGET_LIST_ITEM_VALUE(pxListItem) ((pxListItem)->xItemValue)
+
+/*
+ * Access macro to retrieve the value of the list item at the head of a given
+ * list.
+ *
+ * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
+ * \ingroup LinkedList
+ */
+# define listGET_ITEM_VALUE_OF_HEAD_ENTRY(pxList) \
+ (((pxList)->xListEnd).pxNext->xItemValue)
+
+/*
+ * Return the list item at the head of the list.
+ *
+ * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY
+ * \ingroup LinkedList
+ */
+# define listGET_HEAD_ENTRY(pxList) (((pxList)->xListEnd).pxNext)
+
+/*
+ * Return the next list item.
+ *
+ * \page listGET_NEXT listGET_NEXT
+ * \ingroup LinkedList
+ */
+# define listGET_NEXT(pxListItem) ((pxListItem)->pxNext)
+
+/*
+ * Return the list item that marks the end of the list
+ *
+ * \page listGET_END_MARKER listGET_END_MARKER
+ * \ingroup LinkedList
+ */
+# define listGET_END_MARKER(pxList) \
+ ((ListItem_t const *)(&((pxList)->xListEnd)))
+
+/*
+ * Access macro to determine if a list contains any items. The macro will
+ * only have the value true if the list is empty.
+ *
+ * \page listLIST_IS_EMPTY listLIST_IS_EMPTY
+ * \ingroup LinkedList
+ */
+# define listLIST_IS_EMPTY(pxList) \
+ (((pxList)->uxNumberOfItems == (UBaseType_t)0) ? pdTRUE : pdFALSE)
+
+/*
+ * Access macro to return the number of items in the list.
+ */
+# define listCURRENT_LIST_LENGTH(pxList) ((pxList)->uxNumberOfItems)
+
+/*
+ * Access function to obtain the owner of the next entry in a list.
+ *
+ * The list member pxIndex is used to walk through a list. Calling
+ * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list
+ * and returns that entry's pxOwner parameter. Using multiple calls to this
+ * function it is therefore possible to move through every item contained in
+ * a list.
+ *
+ * The pxOwner parameter of a list item is a pointer to the object that owns
+ * the list item. In the scheduler this is normally a task control block.
+ * The pxOwner parameter effectively creates a two way link between the list
+ * item and its owner.
+ *
+ * @param pxTCB pxTCB is set to the address of the owner of the next list item.
+ * @param pxList The list from which the next item owner is to be returned.
+ *
+ * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY
+ * \ingroup LinkedList
+ */
+# define listGET_OWNER_OF_NEXT_ENTRY(pxTCB, pxList) \
+ { \
+ List_t *const pxConstList = (pxList); \
+ /* Increment the index to the next item and return the item, \
+ * ensuring */ \
+ /* we don't return the marker used at the end of the list. */ \
+ (pxConstList)->pxIndex = (pxConstList)->pxIndex->pxNext; \
+ if ((void *)(pxConstList)->pxIndex == \
+ (void *)&((pxConstList)->xListEnd)) { \
+ (pxConstList)->pxIndex = (pxConstList)->pxIndex->pxNext; \
+ } \
+ (pxTCB) = (pxConstList)->pxIndex->pvOwner; \
+ }
+
+/*
+ * Access function to obtain the owner of the first entry in a list. Lists
+ * are normally sorted in ascending item value order.
+ *
+ * This function returns the pxOwner member of the first item in the list.
+ * The pxOwner parameter of a list item is a pointer to the object that owns
+ * the list item. In the scheduler this is normally a task control block.
+ * The pxOwner parameter effectively creates a two way link between the list
+ * item and its owner.
+ *
+ * @param pxList The list from which the owner of the head item is to be
+ * returned.
+ *
+ * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY
+ * \ingroup LinkedList
+ */
+# define listGET_OWNER_OF_HEAD_ENTRY(pxList) \
+ ((&((pxList)->xListEnd))->pxNext->pvOwner)
+
+/*
+ * Check to see if a list item is within a list. The list item maintains a
+ * "container" pointer that points to the list it is in. All this macro does
+ * is check to see if the container and the list match.
+ *
+ * @param pxList The list we want to know if the list item is within.
+ * @param pxListItem The list item we want to know if is in the list.
+ * @return pdTRUE if the list item is in the list, otherwise pdFALSE.
+ */
+# define listIS_CONTAINED_WITHIN(pxList, pxListItem) \
+ (((pxListItem)->pxContainer == (pxList)) ? (pdTRUE) : (pdFALSE))
+
+/*
+ * Return the list a list item is contained within (referenced from).
+ *
+ * @param pxListItem The list item being queried.
+ * @return A pointer to the List_t object that references the pxListItem
+ */
+# define listLIST_ITEM_CONTAINER(pxListItem) ((pxListItem)->pxContainer)
+
+/*
+ * This provides a crude means of knowing if a list has been initialised, as
+ * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise()
+ * function.
+ */
+# define listLIST_IS_INITIALISED(pxList) \
+ ((pxList)->xListEnd.xItemValue == portMAX_DELAY)
+
+/*
+ * Must be called before a list is used! This initialises all the members
+ * of the list structure and inserts the xListEnd item into the list as a
+ * marker to the back of the list.
+ *
+ * @param pxList Pointer to the list being initialised.
+ *
+ * \page vListInitialise vListInitialise
+ * \ingroup LinkedList
+ */
+void vListInitialise(List_t *const pxList) PRIVILEGED_FUNCTION;
+
+/*
+ * Must be called before a list item is used. This sets the list container to
+ * null so the item does not think that it is already contained in a list.
+ *
+ * @param pxItem Pointer to the list item being initialised.
+ *
+ * \page vListInitialiseItem vListInitialiseItem
+ * \ingroup LinkedList
+ */
+void vListInitialiseItem(ListItem_t *const pxItem) PRIVILEGED_FUNCTION;
+
+/*
+ * Insert a list item into a list. The item will be inserted into the list in
+ * a position determined by its item value (descending item value order).
+ *
+ * @param pxList The list into which the item is to be inserted.
+ *
+ * @param pxNewListItem The item that is to be placed in the list.
+ *
+ * \page vListInsert vListInsert
+ * \ingroup LinkedList
+ */
+void vListInsert(List_t *const pxList, ListItem_t *const pxNewListItem)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Insert a list item into a list. The item will be inserted in a position
+ * such that it will be the last item within the list returned by multiple
+ * calls to listGET_OWNER_OF_NEXT_ENTRY.
+ *
+ * The list member pxIndex is used to walk through a list. Calling
+ * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list.
+ * Placing an item in a list using vListInsertEnd effectively places the item
+ * in the list position pointed to by pxIndex. This means that every other
+ * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before
+ * the pxIndex parameter again points to the item being inserted.
+ *
+ * @param pxList The list into which the item is to be inserted.
+ *
+ * @param pxNewListItem The list item to be inserted into the list.
+ *
+ * \page vListInsertEnd vListInsertEnd
+ * \ingroup LinkedList
+ */
+void vListInsertEnd(List_t *const pxList, ListItem_t *const pxNewListItem)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Remove an item from a list. The list item has a pointer to the list that
+ * it is in, so only the list item need be passed into the function.
+ *
+ * @param uxListRemove The item to be removed. The item will remove itself from
+ * the list pointed to by it's pxContainer parameter.
+ *
+ * @return The number of items that remain in the list after the list item has
+ * been removed.
+ *
+ * \page uxListRemove uxListRemove
+ * \ingroup LinkedList
+ */
+UBaseType_t uxListRemove(ListItem_t *const pxItemToRemove) PRIVILEGED_FUNCTION;
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/mpu_wrappers.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/mpu_wrappers.h
new file mode 100644
index 00000000..e59c57ad
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/mpu_wrappers.h
@@ -0,0 +1,195 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef MPU_WRAPPERS_H
+#define MPU_WRAPPERS_H
+
+/* This file redefines API functions to be called through a wrapper macro, but
+only for ports that are using the MPU. */
+#ifdef portUSING_MPU_WRAPPERS
+
+/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is
+included from queue.c or task.c to prevent it from having an effect within
+those files. */
+# ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/*
+ * Map standard (non MPU) API functions to equivalents that start
+ * "MPU_". This will cause the application code to call the MPU_
+ * version, which wraps the non-MPU version with privilege promoting
+ * then demoting code, so the kernel code always runs will full
+ * privileges.
+ */
+
+/* Map standard tasks.h API functions to the MPU equivalents. */
+# define xTaskCreate MPU_xTaskCreate
+# define xTaskCreateStatic MPU_xTaskCreateStatic
+# define xTaskCreateRestricted MPU_xTaskCreateRestricted
+# define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions
+# define vTaskDelete MPU_vTaskDelete
+# define vTaskDelay MPU_vTaskDelay
+# define vTaskDelayUntil MPU_vTaskDelayUntil
+# define xTaskAbortDelay MPU_xTaskAbortDelay
+# define uxTaskPriorityGet MPU_uxTaskPriorityGet
+# define eTaskGetState MPU_eTaskGetState
+# define vTaskGetInfo MPU_vTaskGetInfo
+# define vTaskPrioritySet MPU_vTaskPrioritySet
+# define vTaskSuspend MPU_vTaskSuspend
+# define vTaskResume MPU_vTaskResume
+# define vTaskSuspendAll MPU_vTaskSuspendAll
+# define xTaskResumeAll MPU_xTaskResumeAll
+# define xTaskGetTickCount MPU_xTaskGetTickCount
+# define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
+# define pcTaskGetName MPU_pcTaskGetName
+# define xTaskGetHandle MPU_xTaskGetHandle
+# define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
+# define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
+# define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
+# define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
+# define vTaskSetThreadLocalStoragePointer \
+ MPU_vTaskSetThreadLocalStoragePointer
+# define pvTaskGetThreadLocalStoragePointer \
+ MPU_pvTaskGetThreadLocalStoragePointer
+# define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
+# define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
+# define uxTaskGetSystemState MPU_uxTaskGetSystemState
+# define vTaskList MPU_vTaskList
+# define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats
+# define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter
+# define xTaskGenericNotify MPU_xTaskGenericNotify
+# define xTaskNotifyWait MPU_xTaskNotifyWait
+# define ulTaskNotifyTake MPU_ulTaskNotifyTake
+# define xTaskNotifyStateClear MPU_xTaskNotifyStateClear
+# define ulTaskNotifyValueClear MPU_ulTaskNotifyValueClear
+# define xTaskCatchUpTicks MPU_xTaskCatchUpTicks
+
+# define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
+# define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
+# define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
+# define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
+
+/* Map standard queue.h API functions to the MPU equivalents. */
+# define xQueueGenericSend MPU_xQueueGenericSend
+# define xQueueReceive MPU_xQueueReceive
+# define xQueuePeek MPU_xQueuePeek
+# define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
+# define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
+# define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
+# define vQueueDelete MPU_vQueueDelete
+# define xQueueCreateMutex MPU_xQueueCreateMutex
+# define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic
+# define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore
+# define xQueueCreateCountingSemaphoreStatic \
+ MPU_xQueueCreateCountingSemaphoreStatic
+# define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
+# define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
+# define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
+# define xQueueGenericCreate MPU_xQueueGenericCreate
+# define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic
+# define xQueueCreateSet MPU_xQueueCreateSet
+# define xQueueAddToSet MPU_xQueueAddToSet
+# define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
+# define xQueueSelectFromSet MPU_xQueueSelectFromSet
+# define xQueueGenericReset MPU_xQueueGenericReset
+
+# if (configQUEUE_REGISTRY_SIZE > 0)
+# define vQueueAddToRegistry MPU_vQueueAddToRegistry
+# define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
+# define pcQueueGetName MPU_pcQueueGetName
+# endif
+
+/* Map standard timer.h API functions to the MPU equivalents. */
+# define xTimerCreate MPU_xTimerCreate
+# define xTimerCreateStatic MPU_xTimerCreateStatic
+# define pvTimerGetTimerID MPU_pvTimerGetTimerID
+# define vTimerSetTimerID MPU_vTimerSetTimerID
+# define xTimerIsTimerActive MPU_xTimerIsTimerActive
+# define xTimerGetTimerDaemonTaskHandle \
+ MPU_xTimerGetTimerDaemonTaskHandle
+# define xTimerPendFunctionCall MPU_xTimerPendFunctionCall
+# define pcTimerGetName MPU_pcTimerGetName
+# define vTimerSetReloadMode MPU_vTimerSetReloadMode
+# define uxTimerGetReloadMode MPU_uxTimerGetReloadMode
+# define xTimerGetPeriod MPU_xTimerGetPeriod
+# define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
+# define xTimerGenericCommand MPU_xTimerGenericCommand
+
+/* Map standard event_group.h API functions to the MPU equivalents. */
+# define xEventGroupCreate MPU_xEventGroupCreate
+# define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
+# define xEventGroupWaitBits MPU_xEventGroupWaitBits
+# define xEventGroupClearBits MPU_xEventGroupClearBits
+# define xEventGroupSetBits MPU_xEventGroupSetBits
+# define xEventGroupSync MPU_xEventGroupSync
+# define vEventGroupDelete MPU_vEventGroupDelete
+
+/* Map standard message/stream_buffer.h API functions to the MPU
+equivalents. */
+# define xStreamBufferSend MPU_xStreamBufferSend
+# define xStreamBufferReceive MPU_xStreamBufferReceive
+# define xStreamBufferNextMessageLengthBytes \
+ MPU_xStreamBufferNextMessageLengthBytes
+# define vStreamBufferDelete MPU_vStreamBufferDelete
+# define xStreamBufferIsFull MPU_xStreamBufferIsFull
+# define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty
+# define xStreamBufferReset MPU_xStreamBufferReset
+# define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable
+# define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable
+# define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel
+# define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
+# define xStreamBufferGenericCreateStatic \
+ MPU_xStreamBufferGenericCreateStatic
+
+/* Remove the privileged function macro, but keep the PRIVILEGED_DATA
+macro so applications can place data in privileged access sections
+(useful when using statically allocated objects). */
+# define PRIVILEGED_FUNCTION
+# define PRIVILEGED_DATA __attribute__((section("privileged_data")))
+# define FREERTOS_SYSTEM_CALL
+
+# else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */
+
+/* Ensure API functions go in the privileged execution section. */
+# define PRIVILEGED_FUNCTION \
+ __attribute__((section("privileged_functions")))
+# define PRIVILEGED_DATA __attribute__((section("privileged_data")))
+# define FREERTOS_SYSTEM_CALL \
+ __attribute__((section("freertos_system_calls")))
+
+# endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */
+
+#else /* portUSING_MPU_WRAPPERS */
+
+# define PRIVILEGED_FUNCTION
+# define PRIVILEGED_DATA
+# define FREERTOS_SYSTEM_CALL
+# define portUSING_MPU_WRAPPERS 0
+
+#endif /* portUSING_MPU_WRAPPERS */
+
+#endif /* MPU_WRAPPERS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/portable.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/portable.h
new file mode 100644
index 00000000..3661a892
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/portable.h
@@ -0,0 +1,236 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/*-----------------------------------------------------------
+ * Portable layer API. Each function must be defined for each port.
+ *----------------------------------------------------------*/
+
+#ifndef PORTABLE_H
+#define PORTABLE_H
+
+/* Each FreeRTOS port has a unique portmacro.h header file. Originally a
+pre-processor definition was used to ensure the pre-processor found the correct
+portmacro.h file for the port being used. That scheme was deprecated in favour
+of setting the compiler's include path such that it found the correct
+portmacro.h file - removing the need for the constant and allowing the
+portmacro.h file to be located anywhere in relation to the port being used.
+Purely for reasons of backward compatibility the old method is still valid, but
+to make it clear that new projects should not use it, support for the port
+specific constants has been moved into the deprecated_definitions.h header
+file. */
+#include "deprecated_definitions.h"
+
+/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h
+did not result in a portmacro.h header file being included - and it should be
+included here. In this case the path to the correct portmacro.h header file
+must be set in the compiler's include path. */
+#ifndef portENTER_CRITICAL
+# include "portmacro.h"
+#endif
+
+#if portBYTE_ALIGNMENT == 32
+# define portBYTE_ALIGNMENT_MASK (0x001f)
+#endif
+
+#if portBYTE_ALIGNMENT == 16
+# define portBYTE_ALIGNMENT_MASK (0x000f)
+#endif
+
+#if portBYTE_ALIGNMENT == 8
+# define portBYTE_ALIGNMENT_MASK (0x0007)
+#endif
+
+#if portBYTE_ALIGNMENT == 4
+# define portBYTE_ALIGNMENT_MASK (0x0003)
+#endif
+
+#if portBYTE_ALIGNMENT == 2
+# define portBYTE_ALIGNMENT_MASK (0x0001)
+#endif
+
+#if portBYTE_ALIGNMENT == 1
+# define portBYTE_ALIGNMENT_MASK (0x0000)
+#endif
+
+#ifndef portBYTE_ALIGNMENT_MASK
+# error "Invalid portBYTE_ALIGNMENT definition"
+#endif
+
+#ifndef portNUM_CONFIGURABLE_REGIONS
+# define portNUM_CONFIGURABLE_REGIONS 1
+#endif
+
+#ifndef portHAS_STACK_OVERFLOW_CHECKING
+# define portHAS_STACK_OVERFLOW_CHECKING 0
+#endif
+
+#ifndef portARCH_NAME
+# define portARCH_NAME NULL
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mpu_wrappers.h"
+
+/*
+ * Setup the stack of a new task so it is ready to be placed under the
+ * scheduler control. The registers have to be placed on the stack in
+ * the order that the port expects to find them.
+ *
+ */
+#if (portUSING_MPU_WRAPPERS == 1)
+# if (portHAS_STACK_OVERFLOW_CHECKING == 1)
+StackType_t *pxPortInitialiseStack(
+ StackType_t *pxTopOfStack,
+ StackType_t *pxEndOfStack,
+ TaskFunction_t pxCode,
+ void *pvParameters,
+ BaseType_t xRunPrivileged) PRIVILEGED_FUNCTION;
+# else
+StackType_t *pxPortInitialiseStack(
+ StackType_t *pxTopOfStack,
+ TaskFunction_t pxCode,
+ void *pvParameters,
+ BaseType_t xRunPrivileged) PRIVILEGED_FUNCTION;
+# endif
+#else
+# if (portHAS_STACK_OVERFLOW_CHECKING == 1)
+StackType_t *pxPortInitialiseStack(
+ StackType_t *pxTopOfStack,
+ StackType_t *pxEndOfStack,
+ TaskFunction_t pxCode,
+ void *pvParameters) PRIVILEGED_FUNCTION;
+# else
+StackType_t *pxPortInitialiseStack(
+ StackType_t *pxTopOfStack,
+ TaskFunction_t pxCode,
+ void *pvParameters) PRIVILEGED_FUNCTION;
+# endif
+#endif
+
+/* Used by heap_5.c to define the start address and size of each memory region
+that together comprise the total FreeRTOS heap space. */
+typedef struct HeapRegion {
+ uint8_t *pucStartAddress;
+ size_t xSizeInBytes;
+} HeapRegion_t;
+
+/* Used to pass information about the heap out of vPortGetHeapStats(). */
+typedef struct xHeapStats {
+ size_t xAvailableHeapSpaceInBytes; /* The total heap size currently
+ available - this is the sum of all the
+ free blocks, not the largest block
+ that can be allocated. */
+ size_t xSizeOfLargestFreeBlockInBytes; /* The maximum size, in bytes, of all
+ the free blocks within the heap at
+ the time vPortGetHeapStats() is
+ called. */
+ size_t xSizeOfSmallestFreeBlockInBytes; /* The minimum size, in bytes, of
+ all the free blocks within the
+ heap at the time
+ vPortGetHeapStats() is called. */
+ size_t
+ xNumberOfFreeBlocks; /* The number of free memory blocks within the heap
+ at the time vPortGetHeapStats() is called. */
+ size_t xMinimumEverFreeBytesRemaining; /* The minimum amount of total free
+ memory (sum of all free blocks)
+ there has been in the heap since
+ the system booted. */
+ size_t xNumberOfSuccessfulAllocations; /* The number of calls to
+ pvPortMalloc() that have returned
+ a valid memory block. */
+ size_t
+ xNumberOfSuccessfulFrees; /* The number of calls to vPortFree() that has
+ successfully freed a block of memory. */
+} HeapStats_t;
+
+/*
+ * Used to define multiple heap regions for use by heap_5.c. This function
+ * must be called before any calls to pvPortMalloc() - not creating a task,
+ * queue, semaphore, mutex, software timer, event group, etc. will result in
+ * pvPortMalloc being called.
+ *
+ * pxHeapRegions passes in an array of HeapRegion_t structures - each of which
+ * defines a region of memory that can be used as the heap. The array is
+ * terminated by a HeapRegions_t structure that has a size of 0. The region
+ * with the lowest start address must appear first in the array.
+ */
+void vPortDefineHeapRegions(const HeapRegion_t *const pxHeapRegions)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Returns a HeapStats_t structure filled with information about the current
+ * heap state.
+ */
+void vPortGetHeapStats(HeapStats_t *pxHeapStats);
+
+/*
+ * Map to the memory management routines required for the port.
+ */
+void *pvPortMalloc(size_t xSize) PRIVILEGED_FUNCTION;
+void vPortFree(void *pv) PRIVILEGED_FUNCTION;
+void vPortInitialiseBlocks(void) PRIVILEGED_FUNCTION;
+size_t xPortGetFreeHeapSize(void) PRIVILEGED_FUNCTION;
+size_t xPortGetMinimumEverFreeHeapSize(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Setup the hardware ready for the scheduler to take control. This generally
+ * sets up a tick interrupt and sets timers for the correct tick frequency.
+ */
+BaseType_t xPortStartScheduler(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so
+ * the hardware is left in its original condition after the scheduler stops
+ * executing.
+ */
+void vPortEndScheduler(void) PRIVILEGED_FUNCTION;
+
+/*
+ * The structures and methods of manipulating the MPU are contained within the
+ * port layer.
+ *
+ * Fills the xMPUSettings structure with the memory region information
+ * contained in xRegions.
+ */
+#if (portUSING_MPU_WRAPPERS == 1)
+struct xMEMORY_REGION;
+void vPortStoreTaskMPUSettings(
+ xMPU_SETTINGS *xMPUSettings,
+ const struct xMEMORY_REGION *const xRegions,
+ StackType_t *pxBottomOfStack,
+ uint32_t ulStackDepth) PRIVILEGED_FUNCTION;
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PORTABLE_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/projdefs.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/projdefs.h
new file mode 100644
index 00000000..09f99345
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/projdefs.h
@@ -0,0 +1,126 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef PROJDEFS_H
+#define PROJDEFS_H
+
+/*
+ * Defines the prototype to which task functions must conform. Defined in this
+ * file to ensure the type is known before portable.h is included.
+ */
+typedef void (*TaskFunction_t)(void *);
+
+/* Converts a time in milliseconds to a time in ticks. This macro can be
+overridden by a macro of the same name defined in FreeRTOSConfig.h in case the
+definition here is not suitable for your application. */
+#ifndef pdMS_TO_TICKS
+# define pdMS_TO_TICKS(xTimeInMs) \
+ ((TickType_t)( \
+ ((TickType_t)(xTimeInMs) * (TickType_t)configTICK_RATE_HZ) / \
+ (TickType_t)1000))
+#endif
+
+#define pdFALSE ((BaseType_t)0)
+#define pdTRUE ((BaseType_t)1)
+
+#define pdPASS (pdTRUE)
+#define pdFAIL (pdFALSE)
+#define errQUEUE_EMPTY ((BaseType_t)0)
+#define errQUEUE_FULL ((BaseType_t)0)
+
+/* FreeRTOS error definitions. */
+#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY (-1)
+#define errQUEUE_BLOCKED (-4)
+#define errQUEUE_YIELD (-5)
+
+/* Macros used for basic data corruption checks. */
+#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
+# define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0
+#endif
+
+#if (configUSE_16_BIT_TICKS == 1)
+# define pdINTEGRITY_CHECK_VALUE 0x5a5a
+#else
+# define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL
+#endif
+
+/* The following errno values are used by FreeRTOS+ components, not FreeRTOS
+itself. */
+#define pdFREERTOS_ERRNO_NONE 0 /* No errors */
+#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */
+#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */
+#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */
+#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */
+#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */
+#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */
+#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */
+#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */
+#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */
+#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */
+#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */
+#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */
+#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */
+#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */
+#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */
+#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */
+#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */
+#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */
+#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */
+#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */
+#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */
+#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */
+#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */
+#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */
+#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */
+#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */
+#define pdFREERTOS_ERRNO_EOPNOTSUPP \
+ 95 /* Operation not supported on transport endpoint */
+#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */
+#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */
+#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */
+#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */
+#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */
+#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */
+#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */
+#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */
+#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */
+#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */
+#define pdFREERTOS_ERRNO_EILSEQ \
+ 138 /* An invalid UTF-16 sequence was encountered. */
+#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */
+
+/* The following endian values are used by FreeRTOS+ components, not FreeRTOS
+itself. */
+#define pdFREERTOS_LITTLE_ENDIAN 0
+#define pdFREERTOS_BIG_ENDIAN 1
+
+/* Re-defining endian values for generic naming. */
+#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN
+#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN
+
+#endif /* PROJDEFS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/queue.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/queue.h
new file mode 100644
index 00000000..50f464df
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/queue.h
@@ -0,0 +1,1783 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef QUEUE_H
+#define QUEUE_H
+
+#ifndef INC_FREERTOS_H
+# error "include FreeRTOS.h" must appear in source files before "include queue.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "task.h"
+
+/**
+ * Type by which queues are referenced. For example, a call to xQueueCreate()
+ * returns an QueueHandle_t variable that can then be used as a parameter to
+ * xQueueSend(), xQueueReceive(), etc.
+ */
+struct QueueDefinition; /* Using old naming convention so as not to break kernel
+ aware debuggers. */
+typedef struct QueueDefinition *QueueHandle_t;
+
+/**
+ * Type by which queue sets are referenced. For example, a call to
+ * xQueueCreateSet() returns an xQueueSet variable that can then be used as a
+ * parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc.
+ */
+typedef struct QueueDefinition *QueueSetHandle_t;
+
+/**
+ * Queue sets can contain both queues and semaphores, so the
+ * QueueSetMemberHandle_t is defined as a type to be used where a parameter or
+ * return value can be either an QueueHandle_t or an SemaphoreHandle_t.
+ */
+typedef struct QueueDefinition *QueueSetMemberHandle_t;
+
+/* For internal use only. */
+#define queueSEND_TO_BACK ((BaseType_t)0)
+#define queueSEND_TO_FRONT ((BaseType_t)1)
+#define queueOVERWRITE ((BaseType_t)2)
+
+/* For internal use only. These definitions *must* match those in queue.c. */
+#define queueQUEUE_TYPE_BASE ((uint8_t)0U)
+#define queueQUEUE_TYPE_SET ((uint8_t)0U)
+#define queueQUEUE_TYPE_MUTEX ((uint8_t)1U)
+#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ((uint8_t)2U)
+#define queueQUEUE_TYPE_BINARY_SEMAPHORE ((uint8_t)3U)
+#define queueQUEUE_TYPE_RECURSIVE_MUTEX ((uint8_t)4U)
+
+/**
+ * queue. h
+ * <pre>
+ QueueHandle_t xQueueCreate(
+ UBaseType_t uxQueueLength,
+ UBaseType_t uxItemSize
+ );
+ * </pre>
+ *
+ * Creates a new queue instance, and returns a handle by which the new queue
+ * can be referenced.
+ *
+ * Internally, within the FreeRTOS implementation, queues use two blocks of
+ * memory. The first block is used to hold the queue's data structures. The
+ * second block is used to hold items placed into the queue. If a queue is
+ * created using xQueueCreate() then both blocks of memory are automatically
+ * dynamically allocated inside the xQueueCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a queue is created using
+ * xQueueCreateStatic() then the application writer must provide the memory that
+ * will get used by the queue. xQueueCreateStatic() therefore allows a queue to
+ * be created without using any dynamic memory allocation.
+ *
+ * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html
+ *
+ * @param uxQueueLength The maximum number of items that the queue can contain.
+ *
+ * @param uxItemSize The number of bytes each item in the queue will require.
+ * Items are queued by copy, not by reference, so this is the number of bytes
+ * that will be copied for each posted item. Each item on the queue must be
+ * the same size.
+ *
+ * @return If the queue is successfully create then a handle to the newly
+ * created queue is returned. If the queue cannot be created then 0 is
+ * returned.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ };
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+ if( xQueue1 == 0 )
+ {
+ // Queue was not created and must not be used.
+ }
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+ if( xQueue2 == 0 )
+ {
+ // Queue was not created and must not be used.
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueCreate xQueueCreate
+ * \ingroup QueueManagement
+ */
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+# define xQueueCreate(uxQueueLength, uxItemSize) \
+ xQueueGenericCreate( \
+ (uxQueueLength), (uxItemSize), (queueQUEUE_TYPE_BASE))
+#endif
+
+/**
+ * queue. h
+ * <pre>
+ QueueHandle_t xQueueCreateStatic(
+ UBaseType_t uxQueueLength,
+ UBaseType_t uxItemSize,
+ uint8_t *pucQueueStorageBuffer,
+ StaticQueue_t *pxQueueBuffer
+ );
+ * </pre>
+ *
+ * Creates a new queue instance, and returns a handle by which the new queue
+ * can be referenced.
+ *
+ * Internally, within the FreeRTOS implementation, queues use two blocks of
+ * memory. The first block is used to hold the queue's data structures. The
+ * second block is used to hold items placed into the queue. If a queue is
+ * created using xQueueCreate() then both blocks of memory are automatically
+ * dynamically allocated inside the xQueueCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a queue is created using
+ * xQueueCreateStatic() then the application writer must provide the memory that
+ * will get used by the queue. xQueueCreateStatic() therefore allows a queue to
+ * be created without using any dynamic memory allocation.
+ *
+ * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html
+ *
+ * @param uxQueueLength The maximum number of items that the queue can contain.
+ *
+ * @param uxItemSize The number of bytes each item in the queue will require.
+ * Items are queued by copy, not by reference, so this is the number of bytes
+ * that will be copied for each posted item. Each item on the queue must be
+ * the same size.
+ *
+ * @param pucQueueStorageBuffer If uxItemSize is not zero then
+ * pucQueueStorageBuffer must point to a uint8_t array that is at least large
+ * enough to hold the maximum number of items that can be in the queue at any
+ * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is
+ * zero then pucQueueStorageBuffer can be NULL.
+ *
+ * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which
+ * will be used to hold the queue's data structure.
+ *
+ * @return If the queue is created then a handle to the created queue is
+ * returned. If pxQueueBuffer is NULL then NULL is returned.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ };
+
+ #define QUEUE_LENGTH 10
+ #define ITEM_SIZE sizeof( uint32_t )
+
+ // xQueueBuffer will hold the queue structure.
+ StaticQueue_t xQueueBuffer;
+
+ // ucQueueStorage will hold the items posted to the queue. Must be at least
+ // [(queue length) * ( queue item size)] bytes long.
+ uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ];
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can
+ hold. ITEM_SIZE // The size of each item in the queue
+ &( ucQueueStorage[ 0 ] ), // The buffer that will
+ hold the items in the queue. &xQueueBuffer ); // The buffer that will hold the
+ queue structure.
+
+ // The queue is guaranteed to be created successfully as no dynamic memory
+ // allocation is used. Therefore xQueue1 is now a handle to a valid queue.
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueCreateStatic xQueueCreateStatic
+ * \ingroup QueueManagement
+ */
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+# define xQueueCreateStatic( \
+ uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer) \
+ xQueueGenericCreateStatic( \
+ (uxQueueLength), \
+ (uxItemSize), \
+ (pucQueueStorage), \
+ (pxQueueBuffer), \
+ (queueQUEUE_TYPE_BASE))
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSendToToFront(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ TickType_t xTicksToWait
+ );
+ * </pre>
+ *
+ * Post an item to the front of a queue. The item is queued by copy, not by
+ * reference. This function must not be called from an interrupt service
+ * routine. See xQueueSendFromISR () for an alternative which may be used
+ * in an ISR.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for space to become available on the queue, should it already
+ * be full. The call will return immediately if this is set to 0 and the
+ * queue is full. The time is defined in tick periods so the constant
+ * portTICK_PERIOD_MS should be used to convert to real time if this is
+ required.
+ *
+ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+ // ...
+
+ if( xQueue1 != 0 )
+ {
+ // Send an uint32_t. Wait for 10 ticks for space to become
+ // available if necessary.
+ if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 )
+ != pdPASS )
+ {
+ // Failed to post the message, even after 10 ticks.
+ }
+ }
+
+ if( xQueue2 != 0 )
+ {
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueSend xQueueSend
+ * \ingroup QueueManagement
+ */
+#define xQueueSendToFront(xQueue, pvItemToQueue, xTicksToWait) \
+ xQueueGenericSend( \
+ (xQueue), (pvItemToQueue), (xTicksToWait), queueSEND_TO_FRONT)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSendToBack(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ TickType_t xTicksToWait
+ );
+ * </pre>
+ *
+ * This is a macro that calls xQueueGenericSend().
+ *
+ * Post an item to the back of a queue. The item is queued by copy, not by
+ * reference. This function must not be called from an interrupt service
+ * routine. See xQueueSendFromISR () for an alternative which may be used
+ * in an ISR.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for space to become available on the queue, should it already
+ * be full. The call will return immediately if this is set to 0 and the queue
+ * is full. The time is defined in tick periods so the constant
+ * portTICK_PERIOD_MS should be used to convert to real time if this is
+ required.
+ *
+ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+ // ...
+
+ if( xQueue1 != 0 )
+ {
+ // Send an uint32_t. Wait for 10 ticks for space to become
+ // available if necessary.
+ if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) !=
+ pdPASS )
+ {
+ // Failed to post the message, even after 10 ticks.
+ }
+ }
+
+ if( xQueue2 != 0 )
+ {
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueSend xQueueSend
+ * \ingroup QueueManagement
+ */
+#define xQueueSendToBack(xQueue, pvItemToQueue, xTicksToWait) \
+ xQueueGenericSend( \
+ (xQueue), (pvItemToQueue), (xTicksToWait), queueSEND_TO_BACK)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSend(
+ QueueHandle_t xQueue,
+ const void * pvItemToQueue,
+ TickType_t xTicksToWait
+ );
+ * </pre>
+ *
+ * This is a macro that calls xQueueGenericSend(). It is included for
+ * backward compatibility with versions of FreeRTOS.org that did not
+ * include the xQueueSendToFront() and xQueueSendToBack() macros. It is
+ * equivalent to xQueueSendToBack().
+ *
+ * Post an item on a queue. The item is queued by copy, not by reference.
+ * This function must not be called from an interrupt service routine.
+ * See xQueueSendFromISR () for an alternative which may be used in an ISR.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for space to become available on the queue, should it already
+ * be full. The call will return immediately if this is set to 0 and the
+ * queue is full. The time is defined in tick periods so the constant
+ * portTICK_PERIOD_MS should be used to convert to real time if this is
+ required.
+ *
+ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+ // ...
+
+ if( xQueue1 != 0 )
+ {
+ // Send an uint32_t. Wait for 10 ticks for space to become
+ // available if necessary.
+ if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) !=
+ pdPASS )
+ {
+ // Failed to post the message, even after 10 ticks.
+ }
+ }
+
+ if( xQueue2 != 0 )
+ {
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueSend xQueueSend
+ * \ingroup QueueManagement
+ */
+#define xQueueSend(xQueue, pvItemToQueue, xTicksToWait) \
+ xQueueGenericSend( \
+ (xQueue), (pvItemToQueue), (xTicksToWait), queueSEND_TO_BACK)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueOverwrite(
+ QueueHandle_t xQueue,
+ const void * pvItemToQueue
+ );
+ * </pre>
+ *
+ * Only for use with queues that have a length of one - so the queue is either
+ * empty or full.
+ *
+ * Post an item on a queue. If the queue is already full then overwrite the
+ * value held in the queue. The item is queued by copy, not by reference.
+ *
+ * This function must not be called from an interrupt service routine.
+ * See xQueueOverwriteFromISR () for an alternative which may be used in an ISR.
+ *
+ * @param xQueue The handle of the queue to which the data is being sent.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and
+ * therefore has the same return values as xQueueSendToFront(). However, pdPASS
+ * is the only value that can be returned because xQueueOverwrite() will write
+ * to the queue even when the queue is already full.
+ *
+ * Example usage:
+ <pre>
+
+ void vFunction( void *pvParameters )
+ {
+ QueueHandle_t xQueue;
+ uint32_t ulVarToSend, ulValReceived;
+
+ // Create a queue to hold one uint32_t value. It is strongly
+ // recommended *not* to use xQueueOverwrite() on queues that can
+ // contain more than one value, and doing so will trigger an assertion
+ // if configASSERT() is defined.
+ xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+
+ // Write the value 10 to the queue using xQueueOverwrite().
+ ulVarToSend = 10;
+ xQueueOverwrite( xQueue, &ulVarToSend );
+
+ // Peeking the queue should now return 10, but leave the value 10 in
+ // the queue. A block time of zero is used as it is known that the
+ // queue holds a value.
+ ulValReceived = 0;
+ xQueuePeek( xQueue, &ulValReceived, 0 );
+
+ if( ulValReceived != 10 )
+ {
+ // Error unless the item was removed by a different task.
+ }
+
+ // The queue is still full. Use xQueueOverwrite() to overwrite the
+ // value held in the queue with 100.
+ ulVarToSend = 100;
+ xQueueOverwrite( xQueue, &ulVarToSend );
+
+ // This time read from the queue, leaving the queue empty once more.
+ // A block time of 0 is used again.
+ xQueueReceive( xQueue, &ulValReceived, 0 );
+
+ // The value read should be the last value written, even though the
+ // queue was already full when the value was written.
+ if( ulValReceived != 100 )
+ {
+ // Error!
+ }
+
+ // ...
+}
+ </pre>
+ * \defgroup xQueueOverwrite xQueueOverwrite
+ * \ingroup QueueManagement
+ */
+#define xQueueOverwrite(xQueue, pvItemToQueue) \
+ xQueueGenericSend((xQueue), (pvItemToQueue), 0, queueOVERWRITE)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueGenericSend(
+ QueueHandle_t xQueue,
+ const void * pvItemToQueue,
+ TickType_t xTicksToWait
+ BaseType_t xCopyPosition
+ );
+ * </pre>
+ *
+ * It is preferred that the macros xQueueSend(), xQueueSendToFront() and
+ * xQueueSendToBack() are used in place of calling this function directly.
+ *
+ * Post an item on a queue. The item is queued by copy, not by reference.
+ * This function must not be called from an interrupt service routine.
+ * See xQueueSendFromISR () for an alternative which may be used in an ISR.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for space to become available on the queue, should it already
+ * be full. The call will return immediately if this is set to 0 and the
+ * queue is full. The time is defined in tick periods so the constant
+ * portTICK_PERIOD_MS should be used to convert to real time if this is
+ required.
+ *
+ * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
+ * item at the back of the queue, or queueSEND_TO_FRONT to place the item
+ * at the front of the queue (for high priority messages).
+ *
+ * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 uint32_t values.
+ xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+ // ...
+
+ if( xQueue1 != 0 )
+ {
+ // Send an uint32_t. Wait for 10 ticks for space to become
+ // available if necessary.
+ if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10,
+ queueSEND_TO_BACK ) != pdPASS )
+ {
+ // Failed to post the message, even after 10 ticks.
+ }
+ }
+
+ if( xQueue2 != 0 )
+ {
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0,
+ queueSEND_TO_BACK );
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueSend xQueueSend
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueueGenericSend(
+ QueueHandle_t xQueue,
+ const void *const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition) PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueuePeek(
+ QueueHandle_t xQueue,
+ void * const pvBuffer,
+ TickType_t xTicksToWait
+ );</pre>
+ *
+ * Receive an item from a queue without removing the item from the queue.
+ * The item is received by copy so a buffer of adequate size must be
+ * provided. The number of bytes copied into the buffer was defined when
+ * the queue was created.
+ *
+ * Successfully received items remain on the queue so will be returned again
+ * by the next call, or a call to xQueueReceive().
+ *
+ * This macro must not be used in an interrupt service routine. See
+ * xQueuePeekFromISR() for an alternative that can be called from an interrupt
+ * service routine.
+ *
+ * @param xQueue The handle to the queue from which the item is to be
+ * received.
+ *
+ * @param pvBuffer Pointer to the buffer into which the received item will
+ * be copied.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for an item to receive should the queue be empty at the time
+ * of the call. The time is defined in tick periods so the constant
+ * portTICK_PERIOD_MS should be used to convert to real time if this is
+ required.
+ * xQueuePeek() will return immediately if xTicksToWait is 0 and the queue
+ * is empty.
+ *
+ * @return pdTRUE if an item was successfully received from the queue,
+ * otherwise pdFALSE.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+ if( xQueue == 0 )
+ {
+ // Failed to create the queue.
+ }
+
+ // ...
+
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+ // ... Rest of task code.
+ }
+
+ // Task to peek the data from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+ if( xQueue != 0 )
+ {
+ // Peek a message on the created queue. Block for 10 ticks if a
+ // message is not immediately available.
+ if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+ {
+ // pcRxedMessage now points to the struct AMessage variable posted
+ // by vATask, but the item still remains on the queue.
+ }
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueuePeek xQueuePeek
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueuePeek(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueuePeekFromISR(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ );</pre>
+ *
+ * A version of xQueuePeek() that can be called from an interrupt service
+ * routine (ISR).
+ *
+ * Receive an item from a queue without removing the item from the queue.
+ * The item is received by copy so a buffer of adequate size must be
+ * provided. The number of bytes copied into the buffer was defined when
+ * the queue was created.
+ *
+ * Successfully received items remain on the queue so will be returned again
+ * by the next call, or a call to xQueueReceive().
+ *
+ * @param xQueue The handle to the queue from which the item is to be
+ * received.
+ *
+ * @param pvBuffer Pointer to the buffer into which the received item will
+ * be copied.
+ *
+ * @return pdTRUE if an item was successfully received from the queue,
+ * otherwise pdFALSE.
+ *
+ * \defgroup xQueuePeekFromISR xQueuePeekFromISR
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueReceive(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ TickType_t xTicksToWait
+ );</pre>
+ *
+ * Receive an item from a queue. The item is received by copy so a buffer of
+ * adequate size must be provided. The number of bytes copied into the buffer
+ * was defined when the queue was created.
+ *
+ * Successfully received items are removed from the queue.
+ *
+ * This function must not be used in an interrupt service routine. See
+ * xQueueReceiveFromISR for an alternative that can.
+ *
+ * @param xQueue The handle to the queue from which the item is to be
+ * received.
+ *
+ * @param pvBuffer Pointer to the buffer into which the received item will
+ * be copied.
+ *
+ * @param xTicksToWait The maximum amount of time the task should block
+ * waiting for an item to receive should the queue be empty at the time
+ * of the call. xQueueReceive() will return immediately if xTicksToWait
+ * is zero and the queue is empty. The time is defined in tick periods so the
+ * constant portTICK_PERIOD_MS should be used to convert to real time if this is
+ * required.
+ *
+ * @return pdTRUE if an item was successfully received from the queue,
+ * otherwise pdFALSE.
+ *
+ * Example usage:
+ <pre>
+ struct AMessage
+ {
+ char ucMessageID;
+ char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+ // Create a queue capable of containing 10 pointers to AMessage structures.
+ // These should be passed by pointer as they contain a lot of data.
+ xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+ if( xQueue == 0 )
+ {
+ // Failed to create the queue.
+ }
+
+ // ...
+
+ // Send a pointer to a struct AMessage object. Don't block if the
+ // queue is already full.
+ pxMessage = & xMessage;
+ xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+ // ... Rest of task code.
+ }
+
+ // Task to receive from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+ if( xQueue != 0 )
+ {
+ // Receive a message on the created queue. Block for 10 ticks if a
+ // message is not immediately available.
+ if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+ {
+ // pcRxedMessage now points to the struct AMessage variable posted
+ // by vATask.
+ }
+ }
+
+ // ... Rest of task code.
+ }
+ </pre>
+ * \defgroup xQueueReceive xQueueReceive
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueueReceive(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );</pre>
+ *
+ * Return the number of messages stored in a queue.
+ *
+ * @param xQueue A handle to the queue being queried.
+ *
+ * @return The number of messages available in the queue.
+ *
+ * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
+ * \ingroup QueueManagement
+ */
+UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );</pre>
+ *
+ * Return the number of free spaces available in a queue. This is equal to the
+ * number of items that can be sent to the queue before the queue becomes full
+ * if no items are removed.
+ *
+ * @param xQueue A handle to the queue being queried.
+ *
+ * @return The number of spaces available in the queue.
+ *
+ * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
+ * \ingroup QueueManagement
+ */
+UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>void vQueueDelete( QueueHandle_t xQueue );</pre>
+ *
+ * Delete a queue - freeing all the memory allocated for storing of items
+ * placed on the queue.
+ *
+ * @param xQueue A handle to the queue to be deleted.
+ *
+ * \defgroup vQueueDelete vQueueDelete
+ * \ingroup QueueManagement
+ */
+void vQueueDelete(QueueHandle_t xQueue) PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSendToFrontFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t *pxHigherPriorityTaskWoken
+ );
+ </pre>
+ *
+ * This is a macro that calls xQueueGenericSendFromISR().
+ *
+ * Post an item to the front of a queue. It is safe to use this macro from
+ * within an interrupt service routine.
+ *
+ * Items are queued by copy not reference so it is preferable to only
+ * queue small items, especially when called from an ISR. In most cases
+ * it would be preferable to store a pointer to the item being queued.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
+ * to unblock, and the unblocked task has a priority higher than the currently
+ * running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then
+ * a context switch should be requested before the interrupt is exited.
+ *
+ * @return pdTRUE if the data was successfully sent to the queue, otherwise
+ * errQUEUE_FULL.
+ *
+ * Example usage for buffered IO (where the ISR can obtain more than one value
+ * per call):
+ <pre>
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPrioritTaskWoken;
+
+ // We have not woken a task at the start of the ISR.
+ xHigherPriorityTaskWoken = pdFALSE;
+
+ // Loop until the buffer is empty.
+ do
+ {
+ // Obtain a byte from the buffer.
+ cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+ // Post the byte.
+ xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+ } while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+ // Now the buffer is empty we can switch context if necessary.
+ if( xHigherPriorityTaskWoken )
+ {
+ taskYIELD ();
+ }
+ }
+ </pre>
+ *
+ * \defgroup xQueueSendFromISR xQueueSendFromISR
+ * \ingroup QueueManagement
+ */
+#define xQueueSendToFrontFromISR( \
+ xQueue, pvItemToQueue, pxHigherPriorityTaskWoken) \
+ xQueueGenericSendFromISR( \
+ (xQueue), \
+ (pvItemToQueue), \
+ (pxHigherPriorityTaskWoken), \
+ queueSEND_TO_FRONT)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSendToBackFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t *pxHigherPriorityTaskWoken
+ );
+ </pre>
+ *
+ * This is a macro that calls xQueueGenericSendFromISR().
+ *
+ * Post an item to the back of a queue. It is safe to use this macro from
+ * within an interrupt service routine.
+ *
+ * Items are queued by copy not reference so it is preferable to only
+ * queue small items, especially when called from an ISR. In most cases
+ * it would be preferable to store a pointer to the item being queued.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
+ * to unblock, and the unblocked task has a priority higher than the currently
+ * running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then
+ * a context switch should be requested before the interrupt is exited.
+ *
+ * @return pdTRUE if the data was successfully sent to the queue, otherwise
+ * errQUEUE_FULL.
+ *
+ * Example usage for buffered IO (where the ISR can obtain more than one value
+ * per call):
+ <pre>
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+ // We have not woken a task at the start of the ISR.
+ xHigherPriorityTaskWoken = pdFALSE;
+
+ // Loop until the buffer is empty.
+ do
+ {
+ // Obtain a byte from the buffer.
+ cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+ // Post the byte.
+ xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+ } while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+ // Now the buffer is empty we can switch context if necessary.
+ if( xHigherPriorityTaskWoken )
+ {
+ taskYIELD ();
+ }
+ }
+ </pre>
+ *
+ * \defgroup xQueueSendFromISR xQueueSendFromISR
+ * \ingroup QueueManagement
+ */
+#define xQueueSendToBackFromISR( \
+ xQueue, pvItemToQueue, pxHigherPriorityTaskWoken) \
+ xQueueGenericSendFromISR( \
+ (xQueue), \
+ (pvItemToQueue), \
+ (pxHigherPriorityTaskWoken), \
+ queueSEND_TO_BACK)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueOverwriteFromISR(
+ QueueHandle_t xQueue,
+ const void * pvItemToQueue,
+ BaseType_t *pxHigherPriorityTaskWoken
+ );
+ * </pre>
+ *
+ * A version of xQueueOverwrite() that can be used in an interrupt service
+ * routine (ISR).
+ *
+ * Only for use with queues that can hold a single item - so the queue is either
+ * empty or full.
+ *
+ * Post an item on a queue. If the queue is already full then overwrite the
+ * value held in the queue. The item is queued by copy, not by reference.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
+ * to unblock, and the unblocked task has a priority higher than the currently
+ * running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then
+ * a context switch should be requested before the interrupt is exited.
+ *
+ * @return xQueueOverwriteFromISR() is a macro that calls
+ * xQueueGenericSendFromISR(), and therefore has the same return values as
+ * xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be
+ * returned because xQueueOverwriteFromISR() will write to the queue even when
+ * the queue is already full.
+ *
+ * Example usage:
+ <pre>
+
+ QueueHandle_t xQueue;
+
+ void vFunction( void *pvParameters )
+ {
+ // Create a queue to hold one uint32_t value. It is strongly
+ // recommended *not* to use xQueueOverwriteFromISR() on queues that can
+ // contain more than one value, and doing so will trigger an assertion
+ // if configASSERT() is defined.
+ xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+}
+
+void vAnInterruptHandler( void )
+{
+// xHigherPriorityTaskWoken must be set to pdFALSE before it is used.
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+uint32_t ulVarToSend, ulValReceived;
+
+ // Write the value 10 to the queue using xQueueOverwriteFromISR().
+ ulVarToSend = 10;
+ xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+ // The queue is full, but calling xQueueOverwriteFromISR() again will still
+ // pass because the value held in the queue will be overwritten with the
+ // new value.
+ ulVarToSend = 100;
+ xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+ // Reading from the queue will now return 100.
+
+ // ...
+
+ if( xHigherPrioritytaskWoken == pdTRUE )
+ {
+ // Writing to the queue caused a task to unblock and the unblocked task
+ // has a priority higher than or equal to the priority of the currently
+ // executing task (the task this interrupt interrupted). Perform a
+context
+ // switch so this interrupt returns directly to the unblocked task.
+ portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the
+port.
+ }
+}
+ </pre>
+ * \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR
+ * \ingroup QueueManagement
+ */
+#define xQueueOverwriteFromISR( \
+ xQueue, pvItemToQueue, pxHigherPriorityTaskWoken) \
+ xQueueGenericSendFromISR( \
+ (xQueue), \
+ (pvItemToQueue), \
+ (pxHigherPriorityTaskWoken), \
+ queueOVERWRITE)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueSendFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t *pxHigherPriorityTaskWoken
+ );
+ </pre>
+ *
+ * This is a macro that calls xQueueGenericSendFromISR(). It is included
+ * for backward compatibility with versions of FreeRTOS.org that did not
+ * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR()
+ * macros.
+ *
+ * Post an item to the back of a queue. It is safe to use this function from
+ * within an interrupt service routine.
+ *
+ * Items are queued by copy not reference so it is preferable to only
+ * queue small items, especially when called from an ISR. In most cases
+ * it would be preferable to store a pointer to the item being queued.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
+ * to unblock, and the unblocked task has a priority higher than the currently
+ * running task. If xQueueSendFromISR() sets this value to pdTRUE then
+ * a context switch should be requested before the interrupt is exited.
+ *
+ * @return pdTRUE if the data was successfully sent to the queue, otherwise
+ * errQUEUE_FULL.
+ *
+ * Example usage for buffered IO (where the ISR can obtain more than one value
+ * per call):
+ <pre>
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+ // We have not woken a task at the start of the ISR.
+ xHigherPriorityTaskWoken = pdFALSE;
+
+ // Loop until the buffer is empty.
+ do
+ {
+ // Obtain a byte from the buffer.
+ cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+ // Post the byte.
+ xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+ } while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+ // Now the buffer is empty we can switch context if necessary.
+ if( xHigherPriorityTaskWoken )
+ {
+ // Actual macro used here is port specific.
+ portYIELD_FROM_ISR ();
+ }
+ }
+ </pre>
+ *
+ * \defgroup xQueueSendFromISR xQueueSendFromISR
+ * \ingroup QueueManagement
+ */
+#define xQueueSendFromISR(xQueue, pvItemToQueue, pxHigherPriorityTaskWoken) \
+ xQueueGenericSendFromISR( \
+ (xQueue), \
+ (pvItemToQueue), \
+ (pxHigherPriorityTaskWoken), \
+ queueSEND_TO_BACK)
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueGenericSendFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t
+ *pxHigherPriorityTaskWoken, BaseType_t xCopyPosition
+ );
+ </pre>
+ *
+ * It is preferred that the macros xQueueSendFromISR(),
+ * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place
+ * of calling this function directly. xQueueGiveFromISR() is an
+ * equivalent for use by semaphores that don't actually copy any data.
+ *
+ * Post an item on a queue. It is safe to use this function from within an
+ * interrupt service routine.
+ *
+ * Items are queued by copy not reference so it is preferable to only
+ * queue small items, especially when called from an ISR. In most cases
+ * it would be preferable to store a pointer to the item being queued.
+ *
+ * @param xQueue The handle to the queue on which the item is to be posted.
+ *
+ * @param pvItemToQueue A pointer to the item that is to be placed on the
+ * queue. The size of the items the queue will hold was defined when the
+ * queue was created, so this many bytes will be copied from pvItemToQueue
+ * into the queue storage area.
+ *
+ * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task
+ * to unblock, and the unblocked task has a priority higher than the currently
+ * running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then
+ * a context switch should be requested before the interrupt is exited.
+ *
+ * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the
+ * item at the back of the queue, or queueSEND_TO_FRONT to place the item
+ * at the front of the queue (for high priority messages).
+ *
+ * @return pdTRUE if the data was successfully sent to the queue, otherwise
+ * errQUEUE_FULL.
+ *
+ * Example usage for buffered IO (where the ISR can obtain more than one value
+ * per call):
+ <pre>
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWokenByPost;
+
+ // We have not woken a task at the start of the ISR.
+ xHigherPriorityTaskWokenByPost = pdFALSE;
+
+ // Loop until the buffer is empty.
+ do
+ {
+ // Obtain a byte from the buffer.
+ cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+ // Post each byte.
+ xQueueGenericSendFromISR( xRxQueue, &cIn,
+ &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK );
+
+ } while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+ // Now the buffer is empty we can switch context if necessary. Note that
+ the
+ // name of the yield function required is port specific.
+ if( xHigherPriorityTaskWokenByPost )
+ {
+ portYIELD_FROM_ISR();
+ }
+ }
+ </pre>
+ *
+ * \defgroup xQueueSendFromISR xQueueSendFromISR
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueueGenericSendFromISR(
+ QueueHandle_t xQueue,
+ const void *const pvItemToQueue,
+ BaseType_t *const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition) PRIVILEGED_FUNCTION;
+BaseType_t xQueueGiveFromISR(
+ QueueHandle_t xQueue,
+ BaseType_t *const pxHigherPriorityTaskWoken) PRIVILEGED_FUNCTION;
+
+/**
+ * queue. h
+ * <pre>
+ BaseType_t xQueueReceiveFromISR(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ BaseType_t *pxTaskWoken
+ );
+ * </pre>
+ *
+ * Receive an item from a queue. It is safe to use this function from within an
+ * interrupt service routine.
+ *
+ * @param xQueue The handle to the queue from which the item is to be
+ * received.
+ *
+ * @param pvBuffer Pointer to the buffer into which the received item will
+ * be copied.
+ *
+ * @param pxTaskWoken A task may be blocked waiting for space to become
+ * available on the queue. If xQueueReceiveFromISR causes such a task to
+ * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will
+ * remain unchanged.
+ *
+ * @return pdTRUE if an item was successfully received from the queue,
+ * otherwise pdFALSE.
+ *
+ * Example usage:
+ <pre>
+
+ QueueHandle_t xQueue;
+
+ // Function to create a queue and post some values.
+ void vAFunction( void *pvParameters )
+ {
+ char cValueToPost;
+ const TickType_t xTicksToWait = ( TickType_t )0xff;
+
+ // Create a queue capable of containing 10 characters.
+ xQueue = xQueueCreate( 10, sizeof( char ) );
+ if( xQueue == 0 )
+ {
+ // Failed to create the queue.
+ }
+
+ // ...
+
+ // Post some characters that will be used within an ISR. If the queue
+ // is full then this task will block for xTicksToWait ticks.
+ cValueToPost = 'a';
+ xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+ cValueToPost = 'b';
+ xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+
+ // ... keep posting characters ... this task may block when the queue
+ // becomes full.
+
+ cValueToPost = 'c';
+ xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+ }
+
+ // ISR that outputs all the characters received on the queue.
+ void vISR_Routine( void )
+ {
+ BaseType_t xTaskWokenByReceive = pdFALSE;
+ char cRxedChar;
+
+ while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar,
+ &xTaskWokenByReceive) )
+ {
+ // A character was received. Output the character now.
+ vOutputCharacter( cRxedChar );
+
+ // If removing the character from the queue woke the task that was
+ // posting onto the queue cTaskWokenByReceive will have been set to
+ // pdTRUE. No matter how many times this loop iterates only one
+ // task will be woken.
+ }
+
+ if( cTaskWokenByPost != ( char ) pdFALSE;
+ {
+ taskYIELD ();
+ }
+ }
+ </pre>
+ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR
+ * \ingroup QueueManagement
+ */
+BaseType_t xQueueReceiveFromISR(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ BaseType_t *const pxHigherPriorityTaskWoken) PRIVILEGED_FUNCTION;
+
+/*
+ * Utilities to query queues that are safe to use from an ISR. These utilities
+ * should be used only from witin an ISR, or within a critical section.
+ */
+BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION;
+BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION;
+UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * The functions defined above are for passing data to and from tasks. The
+ * functions below are the equivalents for passing data to and from
+ * co-routines.
+ *
+ * These functions are called from the co-routine macro implementation and
+ * should not be called directly from application code. Instead use the macro
+ * wrappers defined within croutine.h.
+ */
+BaseType_t xQueueCRSendFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t xCoRoutinePreviouslyWoken);
+BaseType_t xQueueCRReceiveFromISR(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ BaseType_t *pxTaskWoken);
+BaseType_t xQueueCRSend(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ TickType_t xTicksToWait);
+BaseType_t xQueueCRReceive(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ TickType_t xTicksToWait);
+
+/*
+ * For internal use only. Use xSemaphoreCreateMutex(),
+ * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling
+ * these functions directly.
+ */
+QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType) PRIVILEGED_FUNCTION;
+QueueHandle_t xQueueCreateMutexStatic(
+ const uint8_t ucQueueType,
+ StaticQueue_t *pxStaticQueue) PRIVILEGED_FUNCTION;
+QueueHandle_t xQueueCreateCountingSemaphore(
+ const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount) PRIVILEGED_FUNCTION;
+QueueHandle_t xQueueCreateCountingSemaphoreStatic(
+ const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t *pxStaticQueue) PRIVILEGED_FUNCTION;
+BaseType_t xQueueSemaphoreTake(QueueHandle_t xQueue, TickType_t xTicksToWait)
+ PRIVILEGED_FUNCTION;
+TaskHandle_t xQueueGetMutexHolder(QueueHandle_t xSemaphore) PRIVILEGED_FUNCTION;
+TaskHandle_t xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * For internal use only. Use xSemaphoreTakeMutexRecursive() or
+ * xSemaphoreGiveMutexRecursive() instead of calling these functions directly.
+ */
+BaseType_t xQueueTakeMutexRecursive(
+ QueueHandle_t xMutex,
+ TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+BaseType_t xQueueGiveMutexRecursive(QueueHandle_t xMutex) PRIVILEGED_FUNCTION;
+
+/*
+ * Reset a queue back to its original empty state. The return value is now
+ * obsolete and is always set to pdPASS.
+ */
+#define xQueueReset(xQueue) xQueueGenericReset(xQueue, pdFALSE)
+
+/*
+ * The registry is provided as a means for kernel aware debuggers to
+ * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add
+ * a queue, semaphore or mutex handle to the registry if you want the handle
+ * to be available to a kernel aware debugger. If you are not using a kernel
+ * aware debugger then this function can be ignored.
+ *
+ * configQUEUE_REGISTRY_SIZE defines the maximum number of handles the
+ * registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0
+ * within FreeRTOSConfig.h for the registry to be available. Its value
+ * does not effect the number of queues, semaphores and mutexes that can be
+ * created - just the number that the registry can hold.
+ *
+ * @param xQueue The handle of the queue being added to the registry. This
+ * is the handle returned by a call to xQueueCreate(). Semaphore and mutex
+ * handles can also be passed in here.
+ *
+ * @param pcName The name to be associated with the handle. This is the
+ * name that the kernel aware debugger will display. The queue registry only
+ * stores a pointer to the string - so the string must be persistent (global or
+ * preferably in ROM/Flash), not on the stack.
+ */
+#if (configQUEUE_REGISTRY_SIZE > 0)
+void vQueueAddToRegistry(QueueHandle_t xQueue, const char *pcQueueName)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+#endif
+
+/*
+ * The registry is provided as a means for kernel aware debuggers to
+ * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add
+ * a queue, semaphore or mutex handle to the registry if you want the handle
+ * to be available to a kernel aware debugger, and vQueueUnregisterQueue() to
+ * remove the queue, semaphore or mutex from the register. If you are not using
+ * a kernel aware debugger then this function can be ignored.
+ *
+ * @param xQueue The handle of the queue being removed from the registry.
+ */
+#if (configQUEUE_REGISTRY_SIZE > 0)
+void vQueueUnregisterQueue(QueueHandle_t xQueue) PRIVILEGED_FUNCTION;
+#endif
+
+/*
+ * The queue registry is provided as a means for kernel aware debuggers to
+ * locate queues, semaphores and mutexes. Call pcQueueGetName() to look
+ * up and return the name of a queue in the queue registry from the queue's
+ * handle.
+ *
+ * @param xQueue The handle of the queue the name of which will be returned.
+ * @return If the queue is in the registry then a pointer to the name of the
+ * queue is returned. If the queue is not in the registry then NULL is
+ * returned.
+ */
+#if (configQUEUE_REGISTRY_SIZE > 0)
+const char *pcQueueGetName(QueueHandle_t xQueue)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+#endif
+
+/*
+ * Generic version of the function used to creaet a queue using dynamic memory
+ * allocation. This is called by other functions and macros that create other
+ * RTOS objects that use the queue structure as their base.
+ */
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+QueueHandle_t xQueueGenericCreate(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ const uint8_t ucQueueType) PRIVILEGED_FUNCTION;
+#endif
+
+/*
+ * Generic version of the function used to creaet a queue using dynamic memory
+ * allocation. This is called by other functions and macros that create other
+ * RTOS objects that use the queue structure as their base.
+ */
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+QueueHandle_t xQueueGenericCreateStatic(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t *pucQueueStorage,
+ StaticQueue_t *pxStaticQueue,
+ const uint8_t ucQueueType) PRIVILEGED_FUNCTION;
+#endif
+
+/*
+ * Queue sets provide a mechanism to allow a task to block (pend) on a read
+ * operation from multiple queues or semaphores simultaneously.
+ *
+ * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
+ * function.
+ *
+ * A queue set must be explicitly created using a call to xQueueCreateSet()
+ * before it can be used. Once created, standard FreeRTOS queues and semaphores
+ * can be added to the set using calls to xQueueAddToSet().
+ * xQueueSelectFromSet() is then used to determine which, if any, of the queues
+ * or semaphores contained in the set is in a state where a queue read or
+ * semaphore take operation would be successful.
+ *
+ * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html
+ * for reasons why queue sets are very rarely needed in practice as there are
+ * simpler methods of blocking on multiple objects.
+ *
+ * Note 2: Blocking on a queue set that contains a mutex will not cause the
+ * mutex holder to inherit the priority of the blocked task.
+ *
+ * Note 3: An additional 4 bytes of RAM is required for each space in a every
+ * queue added to a queue set. Therefore counting semaphores that have a high
+ * maximum count value should not be added to a queue set.
+ *
+ * Note 4: A receive (in the case of a queue) or take (in the case of a
+ * semaphore) operation must not be performed on a member of a queue set unless
+ * a call to xQueueSelectFromSet() has first returned a handle to that set
+ * member.
+ *
+ * @param uxEventQueueLength Queue sets store events that occur on
+ * the queues and semaphores contained in the set. uxEventQueueLength specifies
+ * the maximum number of events that can be queued at once. To be absolutely
+ * certain that events are not lost uxEventQueueLength should be set to the
+ * total sum of the length of the queues added to the set, where binary
+ * semaphores and mutexes have a length of 1, and counting semaphores have a
+ * length set by their maximum count value. Examples:
+ * + If a queue set is to hold a queue of length 5, another queue of length 12,
+ * and a binary semaphore, then uxEventQueueLength should be set to
+ * (5 + 12 + 1), or 18.
+ * + If a queue set is to hold three binary semaphores then uxEventQueueLength
+ * should be set to (1 + 1 + 1 ), or 3.
+ * + If a queue set is to hold a counting semaphore that has a maximum count of
+ * 5, and a counting semaphore that has a maximum count of 3, then
+ * uxEventQueueLength should be set to (5 + 3), or 8.
+ *
+ * @return If the queue set is created successfully then a handle to the created
+ * queue set is returned. Otherwise NULL is returned.
+ */
+QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Adds a queue or semaphore to a queue set that was previously created by a
+ * call to xQueueCreateSet().
+ *
+ * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
+ * function.
+ *
+ * Note 1: A receive (in the case of a queue) or take (in the case of a
+ * semaphore) operation must not be performed on a member of a queue set unless
+ * a call to xQueueSelectFromSet() has first returned a handle to that set
+ * member.
+ *
+ * @param xQueueOrSemaphore The handle of the queue or semaphore being added to
+ * the queue set (cast to an QueueSetMemberHandle_t type).
+ *
+ * @param xQueueSet The handle of the queue set to which the queue or semaphore
+ * is being added.
+ *
+ * @return If the queue or semaphore was successfully added to the queue set
+ * then pdPASS is returned. If the queue could not be successfully added to the
+ * queue set because it is already a member of a different queue set then pdFAIL
+ * is returned.
+ */
+BaseType_t xQueueAddToSet(
+ QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION;
+
+/*
+ * Removes a queue or semaphore from a queue set. A queue or semaphore can only
+ * be removed from a set if the queue or semaphore is empty.
+ *
+ * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
+ * function.
+ *
+ * @param xQueueOrSemaphore The handle of the queue or semaphore being removed
+ * from the queue set (cast to an QueueSetMemberHandle_t type).
+ *
+ * @param xQueueSet The handle of the queue set in which the queue or semaphore
+ * is included.
+ *
+ * @return If the queue or semaphore was successfully removed from the queue set
+ * then pdPASS is returned. If the queue was not in the queue set, or the
+ * queue (or semaphore) was not empty, then pdFAIL is returned.
+ */
+BaseType_t xQueueRemoveFromSet(
+ QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION;
+
+/*
+ * xQueueSelectFromSet() selects from the members of a queue set a queue or
+ * semaphore that either contains data (in the case of a queue) or is available
+ * to take (in the case of a semaphore). xQueueSelectFromSet() effectively
+ * allows a task to block (pend) on a read operation on all the queues and
+ * semaphores in a queue set simultaneously.
+ *
+ * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this
+ * function.
+ *
+ * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html
+ * for reasons why queue sets are very rarely needed in practice as there are
+ * simpler methods of blocking on multiple objects.
+ *
+ * Note 2: Blocking on a queue set that contains a mutex will not cause the
+ * mutex holder to inherit the priority of the blocked task.
+ *
+ * Note 3: A receive (in the case of a queue) or take (in the case of a
+ * semaphore) operation must not be performed on a member of a queue set unless
+ * a call to xQueueSelectFromSet() has first returned a handle to that set
+ * member.
+ *
+ * @param xQueueSet The queue set on which the task will (potentially) block.
+ *
+ * @param xTicksToWait The maximum time, in ticks, that the calling task will
+ * remain in the Blocked state (with other tasks executing) to wait for a member
+ * of the queue set to be ready for a successful queue read or semaphore take
+ * operation.
+ *
+ * @return xQueueSelectFromSet() will return the handle of a queue (cast to
+ * a QueueSetMemberHandle_t type) contained in the queue set that contains data,
+ * or the handle of a semaphore (cast to a QueueSetMemberHandle_t type)
+ * contained in the queue set that is available, or NULL if no such queue or
+ * semaphore exists before before the specified block time expires.
+ */
+QueueSetMemberHandle_t xQueueSelectFromSet(
+ QueueSetHandle_t xQueueSet,
+ const TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/*
+ * A version of xQueueSelectFromSet() that can be used from an ISR.
+ */
+QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)
+ PRIVILEGED_FUNCTION;
+
+/* Not public API functions. */
+void vQueueWaitForMessageRestricted(
+ QueueHandle_t xQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION;
+BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
+ PRIVILEGED_FUNCTION;
+void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber)
+ PRIVILEGED_FUNCTION;
+UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue) PRIVILEGED_FUNCTION;
+uint8_t ucQueueGetQueueType(QueueHandle_t xQueue) PRIVILEGED_FUNCTION;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* QUEUE_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/stack_macros.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/stack_macros.h
new file mode 100644
index 00000000..30334846
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/stack_macros.h
@@ -0,0 +1,133 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef STACK_MACROS_H
+#define STACK_MACROS_H
+
+/*
+ * Call the stack overflow hook function if the stack of the task being swapped
+ * out is currently overflowed, or looks like it might have overflowed in the
+ * past.
+ *
+ * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check
+ * the current stack state only - comparing the current top of stack value to
+ * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1
+ * will also cause the last few stack bytes to be checked to ensure the value
+ * to which the bytes were set when the task was created have not been
+ * overwritten. Note this second test does not guarantee that an overflowed
+ * stack will always be recognised.
+ */
+
+/*-----------------------------------------------------------*/
+
+#if ((configCHECK_FOR_STACK_OVERFLOW == 1) && (portSTACK_GROWTH < 0))
+
+/* Only the current stack state is to be checked. */
+# define taskCHECK_FOR_STACK_OVERFLOW() \
+ { \
+ /* Is the currently saved stack pointer within the stack limit? */ \
+ if (pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack) { \
+ vApplicationStackOverflowHook( \
+ (TaskHandle_t)pxCurrentTCB, pxCurrentTCB->pcTaskName); \
+ } \
+ }
+
+#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
+/*-----------------------------------------------------------*/
+
+#if ((configCHECK_FOR_STACK_OVERFLOW == 1) && (portSTACK_GROWTH > 0))
+
+/* Only the current stack state is to be checked. */
+# define taskCHECK_FOR_STACK_OVERFLOW() \
+ { \
+ /* Is the currently saved stack pointer within the stack limit? */ \
+ if (pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack) { \
+ vApplicationStackOverflowHook( \
+ (TaskHandle_t)pxCurrentTCB, pxCurrentTCB->pcTaskName); \
+ } \
+ }
+
+#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
+/*-----------------------------------------------------------*/
+
+#if ((configCHECK_FOR_STACK_OVERFLOW > 1) && (portSTACK_GROWTH < 0))
+
+# define taskCHECK_FOR_STACK_OVERFLOW() \
+ { \
+ const uint32_t *const pulStack = \
+ (uint32_t *)pxCurrentTCB->pxStack; \
+ const uint32_t ulCheckValue = (uint32_t)0xa5a5a5a5; \
+\
+ if ((pulStack[0] != ulCheckValue) || \
+ (pulStack[1] != ulCheckValue) || \
+ (pulStack[2] != ulCheckValue) || \
+ (pulStack[3] != ulCheckValue)) { \
+ vApplicationStackOverflowHook( \
+ (TaskHandle_t)pxCurrentTCB, pxCurrentTCB->pcTaskName); \
+ } \
+ }
+
+#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ((configCHECK_FOR_STACK_OVERFLOW > 1) && (portSTACK_GROWTH > 0))
+
+# define taskCHECK_FOR_STACK_OVERFLOW() \
+ { \
+ int8_t *pcEndOfStack = (int8_t *)pxCurrentTCB->pxEndOfStack; \
+ static const uint8_t ucExpectedStackBytes[] = { \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
+ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE \
+ }; \
+\
+ pcEndOfStack -= sizeof(ucExpectedStackBytes); \
+\
+ /* Has the extremity of the task stack ever been written over? */ \
+ if (memcmp( \
+ (void *)pcEndOfStack, \
+ (void *)ucExpectedStackBytes, \
+ sizeof(ucExpectedStackBytes)) != 0) { \
+ vApplicationStackOverflowHook( \
+ (TaskHandle_t)pxCurrentTCB, pxCurrentTCB->pcTaskName); \
+ } \
+ }
+
+#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
+/*-----------------------------------------------------------*/
+
+/* Remove stack overflow macro if not being used. */
+#ifndef taskCHECK_FOR_STACK_OVERFLOW
+# define taskCHECK_FOR_STACK_OVERFLOW()
+#endif
+
+#endif /* STACK_MACROS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/task.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/task.h
new file mode 100644
index 00000000..96d912b7
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/task.h
@@ -0,0 +1,2729 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef INC_TASK_H
+#define INC_TASK_H
+
+#ifndef INC_FREERTOS_H
+# error \
+ "include FreeRTOS.h must appear in source files before include task.h"
+#endif
+
+#include "list.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*-----------------------------------------------------------
+ * MACROS AND DEFINITIONS
+ *----------------------------------------------------------*/
+
+#define tskKERNEL_VERSION_NUMBER "V10.3.1"
+#define tskKERNEL_VERSION_MAJOR 10
+#define tskKERNEL_VERSION_MINOR 3
+#define tskKERNEL_VERSION_BUILD 1
+
+/* MPU region parameters passed in ulParameters
+ * of MemoryRegion_t struct. */
+#define tskMPU_REGION_READ_ONLY (1UL << 0UL)
+#define tskMPU_REGION_READ_WRITE (1UL << 1UL)
+#define tskMPU_REGION_EXECUTE_NEVER (1UL << 2UL)
+#define tskMPU_REGION_NORMAL_MEMORY (1UL << 3UL)
+#define tskMPU_REGION_DEVICE_MEMORY (1UL << 4UL)
+
+/**
+ * task. h
+ *
+ * Type by which tasks are referenced. For example, a call to xTaskCreate
+ * returns (via a pointer parameter) an TaskHandle_t variable that can then
+ * be used as a parameter to vTaskDelete to delete the task.
+ *
+ * \defgroup TaskHandle_t TaskHandle_t
+ * \ingroup Tasks
+ */
+struct tskTaskControlBlock; /* The old naming convention is used to prevent
+ breaking kernel aware debuggers. */
+typedef struct tskTaskControlBlock *TaskHandle_t;
+
+/*
+ * Defines the prototype to which the application task hook function must
+ * conform.
+ */
+typedef BaseType_t (*TaskHookFunction_t)(void *);
+
+/* Task states returned by eTaskGetState. */
+typedef enum {
+ eRunning =
+ 0, /* A task is querying the state of itself, so must be running. */
+ eReady, /* The task being queried is in a read or pending ready list. */
+ eBlocked, /* The task being queried is in the Blocked state. */
+ eSuspended, /* The task being queried is in the Suspended state, or is in
+ the Blocked state with an infinite time out. */
+ eDeleted, /* The task being queried has been deleted, but its TCB has not
+ yet been freed. */
+ eInvalid /* Used as an 'invalid state' value. */
+} eTaskState;
+
+/* Actions that can be performed when vTaskNotify() is called. */
+typedef enum {
+ eNoAction = 0, /* Notify the task without updating its notify value. */
+ eSetBits, /* Set bits in the task's notification value. */
+ eIncrement, /* Increment the task's notification value. */
+ eSetValueWithOverwrite, /* Set the task's notification value to a specific
+ value even if the previous value has not yet been
+ read by the task. */
+ eSetValueWithoutOverwrite /* Set the task's notification value if the
+ previous value has been read by the task. */
+} eNotifyAction;
+
+/*
+ * Used internally only.
+ */
+typedef struct xTIME_OUT {
+ BaseType_t xOverflowCount;
+ TickType_t xTimeOnEntering;
+} TimeOut_t;
+
+/*
+ * Defines the memory ranges allocated to the task when an MPU is used.
+ */
+typedef struct xMEMORY_REGION {
+ void *pvBaseAddress;
+ uint32_t ulLengthInBytes;
+ uint32_t ulParameters;
+} MemoryRegion_t;
+
+/*
+ * Parameters required to create an MPU protected task.
+ */
+typedef struct xTASK_PARAMETERS {
+ TaskFunction_t pvTaskCode;
+ const char *const pcName; /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ configSTACK_DEPTH_TYPE usStackDepth;
+ void *pvParameters;
+ UBaseType_t uxPriority;
+ StackType_t *puxStackBuffer;
+ MemoryRegion_t xRegions[portNUM_CONFIGURABLE_REGIONS];
+#if ((portUSING_MPU_WRAPPERS == 1) && (configSUPPORT_STATIC_ALLOCATION == 1))
+ StaticTask_t *const pxTaskBuffer;
+#endif
+} TaskParameters_t;
+
+/* Used with the uxTaskGetSystemState() function to return the state of each
+task in the system. */
+typedef struct xTASK_STATUS {
+ TaskHandle_t xHandle; /* The handle of the task to which the rest of the
+ information in the structure relates. */
+ const char *pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
+ UBaseType_t xTaskNumber; /* A number unique to the task. */
+ eTaskState eCurrentState; /* The state in which the task existed when the
+ structure was populated. */
+ UBaseType_t
+ uxCurrentPriority; /* The priority at which the task was running (may be
+ inherited) when the structure was populated. */
+ UBaseType_t
+ uxBasePriority; /* The priority to which the task will return if the
+ task's current priority has been inherited to avoid
+ unbounded priority inversion when obtaining a mutex.
+ Only valid if configUSE_MUTEXES is defined as 1 in
+ FreeRTOSConfig.h. */
+ uint32_t
+ ulRunTimeCounter; /* The total run time allocated to the task so far, as
+ defined by the run time stats clock. See
+ http://www.freertos.org/rtos-run-time-stats.html.
+ Only valid when configGENERATE_RUN_TIME_STATS is
+ defined as 1 in FreeRTOSConfig.h. */
+ StackType_t *pxStackBase; /* Points to the lowest address of the task's
+ stack area. */
+ configSTACK_DEPTH_TYPE
+ usStackHighWaterMark; /* The minimum amount of stack space that has
+ remained for the task since the task was
+ created. The closer this value is to zero the
+ closer the task has come to overflowing its
+ stack. */
+} TaskStatus_t;
+
+/* Possible return values for eTaskConfirmSleepModeStatus(). */
+typedef enum {
+ eAbortSleep = 0, /* A task has been made ready or a context switch pended
+ since portSUPPORESS_TICKS_AND_SLEEP() was called - abort
+ entering a sleep mode. */
+ eStandardSleep, /* Enter a sleep mode that will not last any longer than the
+ expected idle time. */
+ eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe
+ to enter a sleep mode that can only be exited by
+ an external interrupt. */
+} eSleepModeStatus;
+
+/**
+ * Defines the priority used by the idle task. This must not be modified.
+ *
+ * \ingroup TaskUtils
+ */
+#define tskIDLE_PRIORITY ((UBaseType_t)0U)
+
+/**
+ * task. h
+ *
+ * Macro for forcing a context switch.
+ *
+ * \defgroup taskYIELD taskYIELD
+ * \ingroup SchedulerControl
+ */
+#define taskYIELD() portYIELD()
+
+/**
+ * task. h
+ *
+ * Macro to mark the start of a critical code region. Preemptive context
+ * switches cannot occur when in a critical region.
+ *
+ * NOTE: This may alter the stack (depending on the portable implementation)
+ * so must be used with care!
+ *
+ * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
+ * \ingroup SchedulerControl
+ */
+#define taskENTER_CRITICAL() portENTER_CRITICAL()
+#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
+
+/**
+ * task. h
+ *
+ * Macro to mark the end of a critical code region. Preemptive context
+ * switches cannot occur when in a critical region.
+ *
+ * NOTE: This may alter the stack (depending on the portable implementation)
+ * so must be used with care!
+ *
+ * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
+ * \ingroup SchedulerControl
+ */
+#define taskEXIT_CRITICAL() portEXIT_CRITICAL()
+#define taskEXIT_CRITICAL_FROM_ISR(x) portCLEAR_INTERRUPT_MASK_FROM_ISR(x)
+/**
+ * task. h
+ *
+ * Macro to disable all maskable interrupts.
+ *
+ * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
+ * \ingroup SchedulerControl
+ */
+#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS()
+
+/**
+ * task. h
+ *
+ * Macro to enable microcontroller interrupts.
+ *
+ * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS
+ * \ingroup SchedulerControl
+ */
+#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS()
+
+/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is
+0 to generate more optimal code when configASSERT() is defined as the constant
+is used in assert() statements. */
+#define taskSCHEDULER_SUSPENDED ((BaseType_t)0)
+#define taskSCHEDULER_NOT_STARTED ((BaseType_t)1)
+#define taskSCHEDULER_RUNNING ((BaseType_t)2)
+
+/*-----------------------------------------------------------
+ * TASK CREATION API
+ *----------------------------------------------------------*/
+
+/**
+ * task. h
+ *<pre>
+ BaseType_t xTaskCreate(
+ TaskFunction_t pvTaskCode,
+ const char * const pcName,
+ configSTACK_DEPTH_TYPE usStackDepth,
+ void *pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t *pvCreatedTask
+ );</pre>
+ *
+ * Create a new task and add it to the list of tasks that are ready to run.
+ *
+ * Internally, within the FreeRTOS implementation, tasks use two blocks of
+ * memory. The first block is used to hold the task's data structures. The
+ * second block is used by the task as its stack. If a task is created using
+ * xTaskCreate() then both blocks of memory are automatically dynamically
+ * allocated inside the xTaskCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a task is created using
+ * xTaskCreateStatic() then the application writer must provide the required
+ * memory. xTaskCreateStatic() therefore allows a task to be created without
+ * using any dynamic memory allocation.
+ *
+ * See xTaskCreateStatic() for a version that does not use any dynamic memory
+ * allocation.
+ *
+ * xTaskCreate() can only be used to create a task that has unrestricted
+ * access to the entire microcontroller memory map. Systems that include MPU
+ * support can alternatively create an MPU constrained task using
+ * xTaskCreateRestricted().
+ *
+ * @param pvTaskCode Pointer to the task entry function. Tasks
+ * must be implemented to never return (i.e. continuous loop).
+ *
+ * @param pcName A descriptive name for the task. This is mainly used to
+ * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN -
+ default
+ * is 16.
+ *
+ * @param usStackDepth The size of the task stack specified as the number of
+ * variables the stack can hold - not the number of bytes. For example, if
+ * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes
+ * will be allocated for stack storage.
+ *
+ * @param pvParameters Pointer that will be used as the parameter for the task
+ * being created.
+ *
+ * @param uxPriority The priority at which the task should run. Systems that
+ * include MPU support can optionally create tasks in a privileged (system)
+ * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For
+ * example, to create a privileged task at priority 2 the uxPriority parameter
+ * should be set to ( 2 | portPRIVILEGE_BIT ).
+ *
+ * @param pvCreatedTask Used to pass back a handle by which the created task
+ * can be referenced.
+ *
+ * @return pdPASS if the task was successfully created and added to a ready
+ * list, otherwise an error code defined in the file projdefs.h
+ *
+ * Example usage:
+ <pre>
+ // Task to be created.
+ void vTaskCode( void * pvParameters )
+ {
+ for( ;; )
+ {
+ // Task code goes here.
+ }
+ }
+
+ // Function that creates a task.
+ void vOtherFunction( void )
+ {
+ static uint8_t ucParameterToPass;
+ TaskHandle_t xHandle = NULL;
+
+ // Create the task, storing the handle. Note that the passed parameter
+ ucParameterToPass
+ // must exist for the lifetime of the task, so in this case is declared
+ static. If it was just an
+ // an automatic stack variable it might no longer exist, or at least have
+ been corrupted, by the time
+ // the new task attempts to access it.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass,
+ tskIDLE_PRIORITY, &xHandle ); configASSERT( xHandle );
+
+ // Use the handle to delete the task.
+ if( xHandle != NULL )
+ {
+ vTaskDelete( xHandle );
+ }
+ }
+ </pre>
+ * \defgroup xTaskCreate xTaskCreate
+ * \ingroup Tasks
+ */
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+BaseType_t xTaskCreate(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const configSTACK_DEPTH_TYPE usStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t *const pxCreatedTask) PRIVILEGED_FUNCTION;
+#endif
+
+/**
+ * task. h
+ *<pre>
+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode,
+ const char * const pcName,
+ uint32_t ulStackDepth,
+ void *pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t *pxStackBuffer,
+ StaticTask_t *pxTaskBuffer );</pre>
+ *
+ * Create a new task and add it to the list of tasks that are ready to run.
+ *
+ * Internally, within the FreeRTOS implementation, tasks use two blocks of
+ * memory. The first block is used to hold the task's data structures. The
+ * second block is used by the task as its stack. If a task is created using
+ * xTaskCreate() then both blocks of memory are automatically dynamically
+ * allocated inside the xTaskCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a task is created using
+ * xTaskCreateStatic() then the application writer must provide the required
+ * memory. xTaskCreateStatic() therefore allows a task to be created without
+ * using any dynamic memory allocation.
+ *
+ * @param pvTaskCode Pointer to the task entry function. Tasks
+ * must be implemented to never return (i.e. continuous loop).
+ *
+ * @param pcName A descriptive name for the task. This is mainly used to
+ * facilitate debugging. The maximum length of the string is defined by
+ * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h.
+ *
+ * @param ulStackDepth The size of the task stack specified as the number of
+ * variables the stack can hold - not the number of bytes. For example, if
+ * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes
+ * will be allocated for stack storage.
+ *
+ * @param pvParameters Pointer that will be used as the parameter for the task
+ * being created.
+ *
+ * @param uxPriority The priority at which the task will run.
+ *
+ * @param pxStackBuffer Must point to a StackType_t array that has at least
+ * ulStackDepth indexes - the array will then be used as the task's stack,
+ * removing the need for the stack to be allocated dynamically.
+ *
+ * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will
+ * then be used to hold the task's data structures, removing the need for the
+ * memory to be allocated dynamically.
+ *
+ * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will
+ * be created and a handle to the created task is returned. If either
+ * pxStackBuffer or pxTaskBuffer are NULL then the task will not be created and
+ * NULL is returned.
+ *
+ * Example usage:
+ <pre>
+
+ // Dimensions the buffer that the task being created will use as its stack.
+ // NOTE: This is the number of words the stack will hold, not the number of
+ // bytes. For example, if each stack item is 32-bits, and this is set to
+ 100,
+ // then 400 bytes (100 * 32-bits) will be allocated.
+ #define STACK_SIZE 200
+
+ // Structure that will hold the TCB of the task being created.
+ StaticTask_t xTaskBuffer;
+
+ // Buffer that the task being created will use as its stack. Note this is
+ // an array of StackType_t variables. The size of StackType_t is dependent
+ on
+ // the RTOS port.
+ StackType_t xStack[ STACK_SIZE ];
+
+ // Function that implements the task being created.
+ void vTaskCode( void * pvParameters )
+ {
+ // The parameter value is expected to be 1 as 1 is passed in the
+ // pvParameters value in the call to xTaskCreateStatic().
+ configASSERT( ( uint32_t ) pvParameters == 1UL );
+
+ for( ;; )
+ {
+ // Task code goes here.
+ }
+ }
+
+ // Function that creates a task.
+ void vOtherFunction( void )
+ {
+ TaskHandle_t xHandle = NULL;
+
+ // Create the task without using any dynamic memory allocation.
+ xHandle = xTaskCreateStatic(
+ vTaskCode, // Function that implements the task.
+ "NAME", // Text name for the task.
+ STACK_SIZE, // Stack size in words, not bytes.
+ ( void * ) 1, // Parameter passed into the task.
+ tskIDLE_PRIORITY,// Priority at which the task is created.
+ xStack, // Array to use as the task's stack.
+ &xTaskBuffer ); // Variable to hold the task's data
+ structure.
+
+ // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
+ // been created, and xHandle will be the task's handle. Use the handle
+ // to suspend the task.
+ vTaskSuspend( xHandle );
+ }
+ </pre>
+ * \defgroup xTaskCreateStatic xTaskCreateStatic
+ * \ingroup Tasks
+ */
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+TaskHandle_t xTaskCreateStatic(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const uint32_t ulStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t *const puxStackBuffer,
+ StaticTask_t *const pxTaskBuffer) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * task. h
+ *<pre>
+ BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition,
+TaskHandle_t *pxCreatedTask );</pre>
+ *
+ * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1.
+ *
+ * xTaskCreateRestricted() should only be used in systems that include an MPU
+ * implementation.
+ *
+ * Create a new task and add it to the list of tasks that are ready to run.
+ * The function parameters define the memory regions and associated access
+ * permissions allocated to the task.
+ *
+ * See xTaskCreateRestrictedStatic() for a version that does not use any
+ * dynamic memory allocation.
+ *
+ * @param pxTaskDefinition Pointer to a structure that contains a member
+ * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API
+ * documentation) plus an optional stack buffer and the memory region
+ * definitions.
+ *
+ * @param pxCreatedTask Used to pass back a handle by which the created task
+ * can be referenced.
+ *
+ * @return pdPASS if the task was successfully created and added to a ready
+ * list, otherwise an error code defined in the file projdefs.h
+ *
+ * Example usage:
+ <pre>
+// Create an TaskParameters_t structure that defines the task to be created.
+static const TaskParameters_t xCheckTaskParameters =
+{
+ vATask, // pvTaskCode - the function that implements the task.
+ "ATask", // pcName - just a text name for the task to assist debugging.
+ 100, // usStackDepth - the stack size DEFINED IN WORDS.
+ NULL, // pvParameters - passed into the task function as the function
+parameters. ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the
+portPRIVILEGE_BIT if the task should run in a privileged state. cStackBuffer,//
+puxStackBuffer - the buffer to be used as the task stack.
+
+ // xRegions - Allocate up to three separate memory regions for access by
+ // the task, with appropriate access permissions. Different processors have
+ // different memory alignment requirements - refer to the FreeRTOS
+documentation
+ // for full information.
+ {
+ // Base address Length Parameters
+ { cReadWriteArray, 32, portMPU_REGION_READ_WRITE },
+ { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY },
+ { cPrivilegedOnlyAccessArray, 128,
+portMPU_REGION_PRIVILEGED_READ_WRITE }
+ }
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+ // Create a task from the const structure defined above. The task handle
+ // is requested (the second parameter is not NULL) but in this case just for
+ // demonstration purposes as its not actually used.
+ xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+ // Start the scheduler.
+ vTaskStartScheduler();
+
+ // Will only get here if there was insufficient memory to create the idle
+ // and/or timer task.
+ for( ;; );
+}
+ </pre>
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted
+ * \ingroup Tasks
+ */
+#if (portUSING_MPU_WRAPPERS == 1)
+BaseType_t xTaskCreateRestricted(
+ const TaskParameters_t *const pxTaskDefinition,
+ TaskHandle_t *pxCreatedTask) PRIVILEGED_FUNCTION;
+#endif
+
+/**
+ * task. h
+ *<pre>
+ BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition,
+TaskHandle_t *pxCreatedTask );</pre>
+ *
+ * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1.
+ *
+ * xTaskCreateRestrictedStatic() should only be used in systems that include an
+ * MPU implementation.
+ *
+ * Internally, within the FreeRTOS implementation, tasks use two blocks of
+ * memory. The first block is used to hold the task's data structures. The
+ * second block is used by the task as its stack. If a task is created using
+ * xTaskCreateRestricted() then the stack is provided by the application writer,
+ * and the memory used to hold the task's data structure is automatically
+ * dynamically allocated inside the xTaskCreateRestricted() function. If a task
+ * is created using xTaskCreateRestrictedStatic() then the application writer
+ * must provide the memory used to hold the task's data structures too.
+ * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be
+ * created without using any dynamic memory allocation.
+ *
+ * @param pxTaskDefinition Pointer to a structure that contains a member
+ * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API
+ * documentation) plus an optional stack buffer and the memory region
+ * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure
+ * contains an additional member, which is used to point to a variable of type
+ * StaticTask_t - which is then used to hold the task's data structure.
+ *
+ * @param pxCreatedTask Used to pass back a handle by which the created task
+ * can be referenced.
+ *
+ * @return pdPASS if the task was successfully created and added to a ready
+ * list, otherwise an error code defined in the file projdefs.h
+ *
+ * Example usage:
+ <pre>
+// Create an TaskParameters_t structure that defines the task to be created.
+// The StaticTask_t variable is only included in the structure when
+// configSUPPORT_STATIC_ALLOCATION is set to 1. The PRIVILEGED_DATA macro can
+// be used to force the variable into the RTOS kernel's privileged data area.
+static PRIVILEGED_DATA StaticTask_t xTaskBuffer;
+static const TaskParameters_t xCheckTaskParameters =
+{
+ vATask, // pvTaskCode - the function that implements the task.
+ "ATask", // pcName - just a text name for the task to assist debugging.
+ 100, // usStackDepth - the stack size DEFINED IN WORDS.
+ NULL, // pvParameters - passed into the task function as the function
+parameters. ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the
+portPRIVILEGE_BIT if the task should run in a privileged state. cStackBuffer,//
+puxStackBuffer - the buffer to be used as the task stack.
+
+ // xRegions - Allocate up to three separate memory regions for access by
+ // the task, with appropriate access permissions. Different processors have
+ // different memory alignment requirements - refer to the FreeRTOS
+documentation
+ // for full information.
+ {
+ // Base address Length Parameters
+ { cReadWriteArray, 32, portMPU_REGION_READ_WRITE },
+ { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY },
+ { cPrivilegedOnlyAccessArray, 128,
+portMPU_REGION_PRIVILEGED_READ_WRITE }
+ }
+
+ &xTaskBuffer; // Holds the task's data structure.
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+ // Create a task from the const structure defined above. The task handle
+ // is requested (the second parameter is not NULL) but in this case just for
+ // demonstration purposes as its not actually used.
+ xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+ // Start the scheduler.
+ vTaskStartScheduler();
+
+ // Will only get here if there was insufficient memory to create the idle
+ // and/or timer task.
+ for( ;; );
+}
+ </pre>
+ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic
+ * \ingroup Tasks
+ */
+#if ((portUSING_MPU_WRAPPERS == 1) && (configSUPPORT_STATIC_ALLOCATION == 1))
+BaseType_t xTaskCreateRestrictedStatic(
+ const TaskParameters_t *const pxTaskDefinition,
+ TaskHandle_t *pxCreatedTask) PRIVILEGED_FUNCTION;
+#endif
+
+/**
+ * task. h
+ *<pre>
+ void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const
+pxRegions );</pre>
+ *
+ * Memory regions are assigned to a restricted task when the task is created by
+ * a call to xTaskCreateRestricted(). These regions can be redefined using
+ * vTaskAllocateMPURegions().
+ *
+ * @param xTask The handle of the task being updated.
+ *
+ * @param xRegions A pointer to an MemoryRegion_t structure that contains the
+ * new memory region definitions.
+ *
+ * Example usage:
+ <pre>
+// Define an array of MemoryRegion_t structures that configures an MPU region
+// allowing read/write access for 1024 bytes starting at the beginning of the
+// ucOneKByte array. The other two of the maximum 3 definable regions are
+// unused so set to zero.
+static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] =
+{
+ // Base address Length Parameters
+ { ucOneKByte, 1024, portMPU_REGION_READ_WRITE },
+ { 0, 0, 0 },
+ { 0, 0, 0 }
+};
+
+void vATask( void *pvParameters )
+{
+ // This task was created such that it has access to certain regions of
+ // memory as defined by the MPU configuration. At some point it is
+ // desired that these MPU regions are replaced with that defined in the
+ // xAltRegions const struct above. Use a call to vTaskAllocateMPURegions()
+ // for this purpose. NULL is used as the task handle to indicate that this
+ // function should modify the MPU regions of the calling task.
+ vTaskAllocateMPURegions( NULL, xAltRegions );
+
+ // Now the task can continue its function, but from this point on can only
+ // access its stack and the ucOneKByte array (unless any other statically
+ // defined or shared regions have been declared elsewhere).
+}
+ </pre>
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted
+ * \ingroup Tasks
+ */
+void vTaskAllocateMPURegions(
+ TaskHandle_t xTask,
+ const MemoryRegion_t *const pxRegions) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskDelete( TaskHandle_t xTask );</pre>
+ *
+ * INCLUDE_vTaskDelete must be defined as 1 for this function to be available.
+ * See the configuration section for more information.
+ *
+ * Remove a task from the RTOS real time kernel's management. The task being
+ * deleted will be removed from all ready, blocked, suspended and event lists.
+ *
+ * NOTE: The idle task is responsible for freeing the kernel allocated
+ * memory from tasks that have been deleted. It is therefore important that
+ * the idle task is not starved of microcontroller processing time if your
+ * application makes any calls to vTaskDelete (). Memory allocated by the
+ * task code is not automatically freed, and should be freed before the task
+ * is deleted.
+ *
+ * See the demo application file death.c for sample code that utilises
+ * vTaskDelete ().
+ *
+ * @param xTask The handle of the task to be deleted. Passing NULL will
+ * cause the calling task to be deleted.
+ *
+ * Example usage:
+ <pre>
+ void vOtherFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+ // Create the task, storing the handle.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY,
+ &xHandle );
+
+ // Use the handle to delete the task.
+ vTaskDelete( xHandle );
+ }
+ </pre>
+ * \defgroup vTaskDelete vTaskDelete
+ * \ingroup Tasks
+ */
+void vTaskDelete(TaskHandle_t xTaskToDelete) PRIVILEGED_FUNCTION;
+
+/*-----------------------------------------------------------
+ * TASK CONTROL API
+ *----------------------------------------------------------*/
+
+/**
+ * task. h
+ * <pre>void vTaskDelay( const TickType_t xTicksToDelay );</pre>
+ *
+ * Delay a task for a given number of ticks. The actual time that the
+ * task remains blocked depends on the tick rate. The constant
+ * portTICK_PERIOD_MS can be used to calculate real time from the tick
+ * rate - with the resolution of one tick period.
+ *
+ * INCLUDE_vTaskDelay must be defined as 1 for this function to be available.
+ * See the configuration section for more information.
+ *
+ *
+ * vTaskDelay() specifies a time at which the task wishes to unblock relative to
+ * the time at which vTaskDelay() is called. For example, specifying a block
+ * period of 100 ticks will cause the task to unblock 100 ticks after
+ * vTaskDelay() is called. vTaskDelay() does not therefore provide a good
+ method
+ * of controlling the frequency of a periodic task as the path taken through the
+ * code, as well as other task and interrupt activity, will effect the frequency
+ * at which vTaskDelay() gets called and therefore the time at which the task
+ * next executes. See vTaskDelayUntil() for an alternative API function
+ designed
+ * to facilitate fixed frequency execution. It does this by specifying an
+ * absolute time (rather than a relative time) at which the calling task should
+ * unblock.
+ *
+ * @param xTicksToDelay The amount of time, in tick periods, that
+ * the calling task should block.
+ *
+ * Example usage:
+
+ void vTaskFunction( void * pvParameters )
+ {
+ // Block for 500ms.
+ const TickType_t xDelay = 500 / portTICK_PERIOD_MS;
+
+ for( ;; )
+ {
+ // Simply toggle the LED every 500ms, blocking between each toggle.
+ vToggleLED();
+ vTaskDelay( xDelay );
+ }
+ }
+
+ * \defgroup vTaskDelay vTaskDelay
+ * \ingroup TaskCtrl
+ */
+void vTaskDelay(const TickType_t xTicksToDelay) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t
+ xTimeIncrement );</pre>
+ *
+ * INCLUDE_vTaskDelayUntil must be defined as 1 for this function to be
+ available.
+ * See the configuration section for more information.
+ *
+ * Delay a task until a specified time. This function can be used by periodic
+ * tasks to ensure a constant execution frequency.
+ *
+ * This function differs from vTaskDelay () in one important aspect: vTaskDelay
+ () will
+ * cause a task to block for the specified number of ticks from the time
+ vTaskDelay () is
+ * called. It is therefore difficult to use vTaskDelay () by itself to generate
+ a fixed
+ * execution frequency as the time between a task starting to execute and that
+ task
+ * calling vTaskDelay () may not be fixed [the task may take a different path
+ though the
+ * code between calls, or may get interrupted or preempted a different number of
+ times
+ * each time it executes].
+ *
+ * Whereas vTaskDelay () specifies a wake time relative to the time at which the
+ function
+ * is called, vTaskDelayUntil () specifies the absolute (exact) time at which it
+ wishes to
+ * unblock.
+ *
+ * The constant portTICK_PERIOD_MS can be used to calculate real time from the
+ tick
+ * rate - with the resolution of one tick period.
+ *
+ * @param pxPreviousWakeTime Pointer to a variable that holds the time at which
+ the
+ * task was last unblocked. The variable must be initialised with the current
+ time
+ * prior to its first use (see the example below). Following this the variable
+ is
+ * automatically updated within vTaskDelayUntil ().
+ *
+ * @param xTimeIncrement The cycle time period. The task will be unblocked at
+ * time *pxPreviousWakeTime + xTimeIncrement. Calling vTaskDelayUntil with the
+ * same xTimeIncrement parameter value will cause the task to execute with
+ * a fixed interface period.
+ *
+ * Example usage:
+ <pre>
+ // Perform an action every 10 ticks.
+ void vTaskFunction( void * pvParameters )
+ {
+ TickType_t xLastWakeTime;
+ const TickType_t xFrequency = 10;
+
+ // Initialise the xLastWakeTime variable with the current time.
+ xLastWakeTime = xTaskGetTickCount ();
+ for( ;; )
+ {
+ // Wait for the next cycle.
+ vTaskDelayUntil( &xLastWakeTime, xFrequency );
+
+ // Perform action here.
+ }
+ }
+ </pre>
+ * \defgroup vTaskDelayUntil vTaskDelayUntil
+ * \ingroup TaskCtrl
+ */
+void vTaskDelayUntil(
+ TickType_t *const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>BaseType_t xTaskAbortDelay( TaskHandle_t xTask );</pre>
+ *
+ * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this
+ * function to be available.
+ *
+ * A task will enter the Blocked state when it is waiting for an event. The
+ * event it is waiting for can be a temporal event (waiting for a time), such
+ * as when vTaskDelay() is called, or an event on an object, such as when
+ * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task
+ * that is in the Blocked state is used in a call to xTaskAbortDelay() then the
+ * task will leave the Blocked state, and return from whichever function call
+ * placed the task into the Blocked state.
+ *
+ * There is no 'FromISR' version of this function as an interrupt would need to
+ * know which object a task was blocked on in order to know which actions to
+ * take. For example, if the task was blocked on a queue the interrupt handler
+ * would then need to know if the queue was locked.
+ *
+ * @param xTask The handle of the task to remove from the Blocked state.
+ *
+ * @return If the task referenced by xTask was not in the Blocked state then
+ * pdFAIL is returned. Otherwise pdPASS is returned.
+ *
+ * \defgroup xTaskAbortDelay xTaskAbortDelay
+ * \ingroup TaskCtrl
+ */
+BaseType_t xTaskAbortDelay(TaskHandle_t xTask) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );</pre>
+ *
+ * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be
+ available.
+ * See the configuration section for more information.
+ *
+ * Obtain the priority of any task.
+ *
+ * @param xTask Handle of the task to be queried. Passing a NULL
+ * handle results in the priority of the calling task being returned.
+ *
+ * @return The priority of xTask.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+ // Create a task, storing the handle.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY,
+ &xHandle );
+
+ // ...
+
+ // Use the handle to obtain the priority of the created task.
+ // It was created with tskIDLE_PRIORITY, but may have changed
+ // it itself.
+ if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY )
+ {
+ // The task has changed it's priority.
+ }
+
+ // ...
+
+ // Is our priority higher than the created task?
+ if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) )
+ {
+ // Our priority (obtained using NULL handle) is higher.
+ }
+ }
+ </pre>
+ * \defgroup uxTaskPriorityGet uxTaskPriorityGet
+ * \ingroup TaskCtrl
+ */
+UBaseType_t uxTaskPriorityGet(const TaskHandle_t xTask) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );</pre>
+ *
+ * A version of uxTaskPriorityGet() that can be used from an ISR.
+ */
+UBaseType_t uxTaskPriorityGetFromISR(const TaskHandle_t xTask)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>eTaskState eTaskGetState( TaskHandle_t xTask );</pre>
+ *
+ * INCLUDE_eTaskGetState must be defined as 1 for this function to be available.
+ * See the configuration section for more information.
+ *
+ * Obtain the state of any task. States are encoded by the eTaskState
+ * enumerated type.
+ *
+ * @param xTask Handle of the task to be queried.
+ *
+ * @return The state of xTask at the time the function was called. Note the
+ * state of the task might change between the function being called, and the
+ * functions return value being tested by the calling task.
+ */
+eTaskState eTaskGetState(TaskHandle_t xTask) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus,
+ BaseType_t xGetFreeStackSpace, eTaskState eState );</pre>
+ *
+ * configUSE_TRACE_FACILITY must be defined as 1 for this function to be
+ * available. See the configuration section for more information.
+ *
+ * Populates a TaskStatus_t structure with information about a task.
+ *
+ * @param xTask Handle of the task being queried. If xTask is NULL then
+ * information will be returned about the calling task.
+ *
+ * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be
+ * filled with information about the task referenced by the handle passed using
+ * the xTask parameter.
+ *
+ * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report
+ * the stack high water mark of the task being queried. Calculating the stack
+ * high water mark takes a relatively long time, and can make the system
+ * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to
+ * allow the high water mark checking to be skipped. The high watermark value
+ * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is
+ * not set to pdFALSE;
+ *
+ * @param eState The TaskStatus_t structure contains a member to report the
+ * state of the task being queried. Obtaining the task state is not as fast as
+ * a simple assignment - so the eState parameter is provided to allow the state
+ * information to be omitted from the TaskStatus_t structure. To obtain state
+ * information then set eState to eInvalid - otherwise the value passed in
+ * eState will be reported as the task state in the TaskStatus_t structure.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+ TaskStatus_t xTaskDetails;
+
+ // Obtain the handle of a task from its name.
+ xHandle = xTaskGetHandle( "Task_Name" );
+
+ // Check the handle is not NULL.
+ configASSERT( xHandle );
+
+ // Use the handle to obtain further information about the task.
+ vTaskGetInfo( xHandle,
+ &xTaskDetails,
+ pdTRUE, // Include the high water mark in xTaskDetails.
+ eInvalid ); // Include the task state in xTaskDetails.
+ }
+ </pre>
+ * \defgroup vTaskGetInfo vTaskGetInfo
+ * \ingroup TaskCtrl
+ */
+void vTaskGetInfo(
+ TaskHandle_t xTask,
+ TaskStatus_t *pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority
+ );</pre>
+ *
+ * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be
+ available.
+ * See the configuration section for more information.
+ *
+ * Set the priority of any task.
+ *
+ * A context switch will occur before the function returns if the priority
+ * being set is higher than the currently executing task.
+ *
+ * @param xTask Handle to the task for which the priority is being set.
+ * Passing a NULL handle results in the priority of the calling task being set.
+ *
+ * @param uxNewPriority The priority to which the task will be set.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+ // Create a task, storing the handle.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY,
+ &xHandle );
+
+ // ...
+
+ // Use the handle to raise the priority of the created task.
+ vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 );
+
+ // ...
+
+ // Use a NULL handle to raise our priority to the same value.
+ vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
+ }
+ </pre>
+ * \defgroup vTaskPrioritySet vTaskPrioritySet
+ * \ingroup TaskCtrl
+ */
+void vTaskPrioritySet(TaskHandle_t xTask, UBaseType_t uxNewPriority)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskSuspend( TaskHandle_t xTaskToSuspend );</pre>
+ *
+ * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available.
+ * See the configuration section for more information.
+ *
+ * Suspend any task. When suspended a task will never get any microcontroller
+ * processing time, no matter what its priority.
+ *
+ * Calls to vTaskSuspend are not accumulative -
+ * i.e. calling vTaskSuspend () twice on the same task still only requires one
+ * call to vTaskResume () to ready the suspended task.
+ *
+ * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL
+ * handle will cause the calling task to be suspended.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+ // Create a task, storing the handle.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY,
+ &xHandle );
+
+ // ...
+
+ // Use the handle to suspend the created task.
+ vTaskSuspend( xHandle );
+
+ // ...
+
+ // The created task will not run during this period, unless
+ // another task calls vTaskResume( xHandle ).
+
+ //...
+
+
+ // Suspend ourselves.
+ vTaskSuspend( NULL );
+
+ // We cannot get here unless another task calls vTaskResume
+ // with our handle as the parameter.
+ }
+ </pre>
+ * \defgroup vTaskSuspend vTaskSuspend
+ * \ingroup TaskCtrl
+ */
+void vTaskSuspend(TaskHandle_t xTaskToSuspend) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskResume( TaskHandle_t xTaskToResume );</pre>
+ *
+ * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available.
+ * See the configuration section for more information.
+ *
+ * Resumes a suspended task.
+ *
+ * A task that has been suspended by one or more calls to vTaskSuspend ()
+ * will be made available for running again by a single call to
+ * vTaskResume ().
+ *
+ * @param xTaskToResume Handle to the task being readied.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+ // Create a task, storing the handle.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY,
+ &xHandle );
+
+ // ...
+
+ // Use the handle to suspend the created task.
+ vTaskSuspend( xHandle );
+
+ // ...
+
+ // The created task will not run during this period, unless
+ // another task calls vTaskResume( xHandle ).
+
+ //...
+
+
+ // Resume the suspended task ourselves.
+ vTaskResume( xHandle );
+
+ // The created task will once again get microcontroller processing
+ // time in accordance with its priority within the system.
+ }
+ </pre>
+ * \defgroup vTaskResume vTaskResume
+ * \ingroup TaskCtrl
+ */
+void vTaskResume(TaskHandle_t xTaskToResume) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void xTaskResumeFromISR( TaskHandle_t xTaskToResume );</pre>
+ *
+ * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be
+ * available. See the configuration section for more information.
+ *
+ * An implementation of vTaskResume() that can be called from within an ISR.
+ *
+ * A task that has been suspended by one or more calls to vTaskSuspend ()
+ * will be made available for running again by a single call to
+ * xTaskResumeFromISR ().
+ *
+ * xTaskResumeFromISR() should not be used to synchronise a task with an
+ * interrupt if there is a chance that the interrupt could arrive prior to the
+ * task being suspended - as this can lead to interrupts being missed. Use of a
+ * semaphore as a synchronisation mechanism would avoid this eventuality.
+ *
+ * @param xTaskToResume Handle to the task being readied.
+ *
+ * @return pdTRUE if resuming the task should result in a context switch,
+ * otherwise pdFALSE. This is used by the ISR to determine if a context switch
+ * may be required following the ISR.
+ *
+ * \defgroup vTaskResumeFromISR vTaskResumeFromISR
+ * \ingroup TaskCtrl
+ */
+BaseType_t xTaskResumeFromISR(TaskHandle_t xTaskToResume) PRIVILEGED_FUNCTION;
+
+/*-----------------------------------------------------------
+ * SCHEDULER CONTROL
+ *----------------------------------------------------------*/
+
+/**
+ * task. h
+ * <pre>void vTaskStartScheduler( void );</pre>
+ *
+ * Starts the real time kernel tick processing. After calling the kernel
+ * has control over which tasks are executed and when.
+ *
+ * See the demo application file main.c for an example of creating
+ * tasks and starting the kernel.
+ *
+ * Example usage:
+ <pre>
+ void vAFunction( void )
+ {
+ // Create at least one task before starting the kernel.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+ // Start the real time kernel with preemption.
+ vTaskStartScheduler ();
+
+ // Will not get here unless a task calls vTaskEndScheduler ()
+ }
+ </pre>
+ *
+ * \defgroup vTaskStartScheduler vTaskStartScheduler
+ * \ingroup SchedulerControl
+ */
+void vTaskStartScheduler(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskEndScheduler( void );</pre>
+ *
+ * NOTE: At the time of writing only the x86 real mode port, which runs on a PC
+ * in place of DOS, implements this function.
+ *
+ * Stops the real time kernel tick. All created tasks will be automatically
+ * deleted and multitasking (either preemptive or cooperative) will
+ * stop. Execution then resumes from the point where vTaskStartScheduler ()
+ * was called, as if vTaskStartScheduler () had just returned.
+ *
+ * See the demo application file main. c in the demo/PC directory for an
+ * example that uses vTaskEndScheduler ().
+ *
+ * vTaskEndScheduler () requires an exit function to be defined within the
+ * portable layer (see vPortEndScheduler () in port. c for the PC port). This
+ * performs hardware specific operations such as stopping the kernel tick.
+ *
+ * vTaskEndScheduler () will cause all of the resources allocated by the
+ * kernel to be freed - but will not free resources allocated by application
+ * tasks.
+ *
+ * Example usage:
+ <pre>
+ void vTaskCode( void * pvParameters )
+ {
+ for( ;; )
+ {
+ // Task code goes here.
+
+ // At some point we want to end the real time kernel processing
+ // so call ...
+ vTaskEndScheduler ();
+ }
+ }
+
+ void vAFunction( void )
+ {
+ // Create at least one task before starting the kernel.
+ xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+ // Start the real time kernel with preemption.
+ vTaskStartScheduler ();
+
+ // Will only get here when the vTaskCode () task has called
+ // vTaskEndScheduler (). When we get here we are back to single task
+ // execution.
+ }
+ </pre>
+ *
+ * \defgroup vTaskEndScheduler vTaskEndScheduler
+ * \ingroup SchedulerControl
+ */
+void vTaskEndScheduler(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>void vTaskSuspendAll( void );</pre>
+ *
+ * Suspends the scheduler without disabling interrupts. Context switches will
+ * not occur while the scheduler is suspended.
+ *
+ * After calling vTaskSuspendAll () the calling task will continue to execute
+ * without risk of being swapped out until a call to xTaskResumeAll () has been
+ * made.
+ *
+ * API functions that have the potential to cause a context switch (for example,
+ * vTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler
+ * is suspended.
+ *
+ * Example usage:
+ <pre>
+ void vTask1( void * pvParameters )
+ {
+ for( ;; )
+ {
+ // Task code goes here.
+
+ // ...
+
+ // At some point the task wants to perform a long operation during
+ // which it does not want to get swapped out. It cannot use
+ // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+ // operation may cause interrupts to be missed - including the
+ // ticks.
+
+ // Prevent the real time kernel swapping out the task.
+ vTaskSuspendAll ();
+
+ // Perform the operation here. There is no need to use critical
+ // sections as we have all the microcontroller processing time.
+ // During this time interrupts will still operate and the kernel
+ // tick count will be maintained.
+
+ // ...
+
+ // The operation is complete. Restart the kernel.
+ xTaskResumeAll ();
+ }
+ }
+ </pre>
+ * \defgroup vTaskSuspendAll vTaskSuspendAll
+ * \ingroup SchedulerControl
+ */
+void vTaskSuspendAll(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <pre>BaseType_t xTaskResumeAll( void );</pre>
+ *
+ * Resumes scheduler activity after it was suspended by a call to
+ * vTaskSuspendAll().
+ *
+ * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks
+ * that were previously suspended by a call to vTaskSuspend().
+ *
+ * @return If resuming the scheduler caused a context switch then pdTRUE is
+ * returned, otherwise pdFALSE is returned.
+ *
+ * Example usage:
+ <pre>
+ void vTask1( void * pvParameters )
+ {
+ for( ;; )
+ {
+ // Task code goes here.
+
+ // ...
+
+ // At some point the task wants to perform a long operation during
+ // which it does not want to get swapped out. It cannot use
+ // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+ // operation may cause interrupts to be missed - including the
+ // ticks.
+
+ // Prevent the real time kernel swapping out the task.
+ vTaskSuspendAll ();
+
+ // Perform the operation here. There is no need to use critical
+ // sections as we have all the microcontroller processing time.
+ // During this time interrupts will still operate and the real
+ // time kernel tick count will be maintained.
+
+ // ...
+
+ // The operation is complete. Restart the kernel. We want to force
+ // a context switch - but there is no point if resuming the scheduler
+ // caused a context switch already.
+ if( !xTaskResumeAll () )
+ {
+ taskYIELD ();
+ }
+ }
+ }
+ </pre>
+ * \defgroup xTaskResumeAll xTaskResumeAll
+ * \ingroup SchedulerControl
+ */
+BaseType_t xTaskResumeAll(void) PRIVILEGED_FUNCTION;
+
+/*-----------------------------------------------------------
+ * TASK UTILITIES
+ *----------------------------------------------------------*/
+
+/**
+ * task. h
+ * <PRE>TickType_t xTaskGetTickCount( void );</PRE>
+ *
+ * @return The count of ticks since vTaskStartScheduler was called.
+ *
+ * \defgroup xTaskGetTickCount xTaskGetTickCount
+ * \ingroup TaskUtils
+ */
+TickType_t xTaskGetTickCount(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>TickType_t xTaskGetTickCountFromISR( void );</PRE>
+ *
+ * @return The count of ticks since vTaskStartScheduler was called.
+ *
+ * This is a version of xTaskGetTickCount() that is safe to be called from an
+ * ISR - provided that TickType_t is the natural word size of the
+ * microcontroller being used or interrupt nesting is either not supported or
+ * not being used.
+ *
+ * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR
+ * \ingroup TaskUtils
+ */
+TickType_t xTaskGetTickCountFromISR(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>uint16_t uxTaskGetNumberOfTasks( void );</PRE>
+ *
+ * @return The number of tasks that the real time kernel is currently managing.
+ * This includes all ready, blocked and suspended tasks. A task that
+ * has been deleted but not yet freed by the idle task will also be
+ * included in the count.
+ *
+ * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks
+ * \ingroup TaskUtils
+ */
+UBaseType_t uxTaskGetNumberOfTasks(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>char *pcTaskGetName( TaskHandle_t xTaskToQuery );</PRE>
+ *
+ * @return The text (human readable) name of the task referenced by the handle
+ * xTaskToQuery. A task can query its own name by either passing in its own
+ * handle, or by setting xTaskToQuery to NULL.
+ *
+ * \defgroup pcTaskGetName pcTaskGetName
+ * \ingroup TaskUtils
+ */
+char *pcTaskGetName(TaskHandle_t xTaskToQuery)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+
+/**
+ * task. h
+ * <PRE>TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );</PRE>
+ *
+ * NOTE: This function takes a relatively long time to complete and should be
+ * used sparingly.
+ *
+ * @return The handle of the task that has the human readable name
+ * pcNameToQuery. NULL is returned if no matching name is found.
+ * INCLUDE_xTaskGetHandle must be set to 1 in FreeRTOSConfig.h for
+ * pcTaskGetHandle() to be available.
+ *
+ * \defgroup pcTaskGetHandle pcTaskGetHandle
+ * \ingroup TaskUtils
+ */
+TaskHandle_t xTaskGetHandle(const char *pcNameToQuery)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+
+/**
+ * task.h
+ * <PRE>UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );</PRE>
+ *
+ * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for
+ * this function to be available.
+ *
+ * Returns the high water mark of the stack associated with xTask. That is,
+ * the minimum free stack space there has been (in words, so on a 32 bit machine
+ * a value of 1 means 4 bytes) since the task started. The smaller the returned
+ * number the closer the task has come to overflowing its stack.
+ *
+ * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
+ * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
+ * user to determine the return type. It gets around the problem of the value
+ * overflowing on 8-bit types without breaking backward compatibility for
+ * applications that expect an 8-bit return type.
+ *
+ * @param xTask Handle of the task associated with the stack to be checked.
+ * Set xTask to NULL to check the stack of the calling task.
+ *
+ * @return The smallest amount of free stack space there has been (in words, so
+ * actual spaces on the stack rather than bytes) since the task referenced by
+ * xTask was created.
+ */
+UBaseType_t uxTaskGetStackHighWaterMark(TaskHandle_t xTask) PRIVILEGED_FUNCTION;
+
+/**
+ * task.h
+ * <PRE>configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask
+ * );</PRE>
+ *
+ * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for
+ * this function to be available.
+ *
+ * Returns the high water mark of the stack associated with xTask. That is,
+ * the minimum free stack space there has been (in words, so on a 32 bit machine
+ * a value of 1 means 4 bytes) since the task started. The smaller the returned
+ * number the closer the task has come to overflowing its stack.
+ *
+ * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
+ * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
+ * user to determine the return type. It gets around the problem of the value
+ * overflowing on 8-bit types without breaking backward compatibility for
+ * applications that expect an 8-bit return type.
+ *
+ * @param xTask Handle of the task associated with the stack to be checked.
+ * Set xTask to NULL to check the stack of the calling task.
+ *
+ * @return The smallest amount of free stack space there has been (in words, so
+ * actual spaces on the stack rather than bytes) since the task referenced by
+ * xTask was created.
+ */
+configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2(TaskHandle_t xTask)
+ PRIVILEGED_FUNCTION;
+
+/* When using trace macros it is sometimes necessary to include task.h before
+FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been
+defined, so the following two prototypes will cause a compilation error. This
+can be fixed by simply guarding against the inclusion of these two prototypes
+unless they are explicitly required by the configUSE_APPLICATION_TASK_TAG
+configuration constant. */
+#ifdef configUSE_APPLICATION_TASK_TAG
+# if configUSE_APPLICATION_TASK_TAG == 1
+/**
+ * task.h
+ * <pre>void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t
+ * pxHookFunction );</pre>
+ *
+ * Sets pxHookFunction to be the task hook function used by the task xTask.
+ * Passing xTask as NULL has the effect of setting the calling tasks hook
+ * function.
+ */
+void vTaskSetApplicationTaskTag(
+ TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction) PRIVILEGED_FUNCTION;
+
+/**
+ * task.h
+ * <pre>void xTaskGetApplicationTaskTag( TaskHandle_t xTask );</pre>
+ *
+ * Returns the pxHookFunction value assigned to the task xTask. Do not
+ * call from an interrupt service routine - call
+ * xTaskGetApplicationTaskTagFromISR() instead.
+ */
+TaskHookFunction_t xTaskGetApplicationTaskTag(TaskHandle_t xTask)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * task.h
+ * <pre>void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );</pre>
+ *
+ * Returns the pxHookFunction value assigned to the task xTask. Can
+ * be called from an interrupt service routine.
+ */
+TaskHookFunction_t xTaskGetApplicationTaskTagFromISR(TaskHandle_t xTask)
+ PRIVILEGED_FUNCTION;
+# endif /* configUSE_APPLICATION_TASK_TAG ==1 */
+#endif /* ifdef configUSE_APPLICATION_TASK_TAG */
+
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0)
+
+/* Each task contains an array of pointers that is dimensioned by the
+configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The
+kernel does not use the pointers itself, so the application writer can use
+the pointers for any purpose they wish. The following two functions are
+used to set and query a pointer respectively. */
+void vTaskSetThreadLocalStoragePointer(
+ TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void *pvValue) PRIVILEGED_FUNCTION;
+void *pvTaskGetThreadLocalStoragePointer(
+ TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex) PRIVILEGED_FUNCTION;
+
+#endif
+
+/**
+ * task.h
+ * <pre>BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void
+ * *pvParameter );</pre>
+ *
+ * Calls the hook function associated with xTask. Passing xTask as NULL has
+ * the effect of calling the Running tasks (the calling task) hook function.
+ *
+ * pvParameter is passed to the hook function for the task to interpret as it
+ * wants. The return value is the value returned by the task hook function
+ * registered by the user.
+ */
+BaseType_t xTaskCallApplicationTaskHook(TaskHandle_t xTask, void *pvParameter)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * xTaskGetIdleTaskHandle() is only available if
+ * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h.
+ *
+ * Simply returns the handle of the idle task. It is not valid to call
+ * xTaskGetIdleTaskHandle() before the scheduler has been started.
+ */
+TaskHandle_t xTaskGetIdleTaskHandle(void) PRIVILEGED_FUNCTION;
+
+/**
+ * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for
+ * uxTaskGetSystemState() to be available.
+ *
+ * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in
+ * the system. TaskStatus_t structures contain, among other things, members
+ * for the task handle, task name, task priority, task state, and total amount
+ * of run time consumed by the task. See the TaskStatus_t structure
+ * definition in this file for the full member list.
+ *
+ * NOTE: This function is intended for debugging use only as its use results in
+ * the scheduler remaining suspended for an extended period.
+ *
+ * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures.
+ * The array must contain at least one TaskStatus_t structure for each task
+ * that is under the control of the RTOS. The number of tasks under the control
+ * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API
+ function.
+ *
+ * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray
+ * parameter. The size is specified as the number of indexes in the array, or
+ * the number of TaskStatus_t structures contained in the array, not by the
+ * number of bytes in the array.
+ *
+ * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in
+ * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to
+ the
+ * total run time (as defined by the run time stats clock, see
+ * http://www.freertos.org/rtos-run-time-stats.html) since the target booted.
+ * pulTotalRunTime can be set to NULL to omit the total run time information.
+ *
+ * @return The number of TaskStatus_t structures that were populated by
+ * uxTaskGetSystemState(). This should equal the number returned by the
+ * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed
+ * in the uxArraySize parameter was too small.
+ *
+ * Example usage:
+ <pre>
+ // This example demonstrates how a human readable table of run time stats
+ // information is generated from raw data provided by
+ uxTaskGetSystemState().
+ // The human readable table is written to pcWriteBuffer
+ void vTaskGetRunTimeStats( char *pcWriteBuffer )
+ {
+ TaskStatus_t *pxTaskStatusArray;
+ volatile UBaseType_t uxArraySize, x;
+ uint32_t ulTotalRunTime, ulStatsAsPercentage;
+
+ // Make sure the write buffer does not contain a string.
+ *pcWriteBuffer = 0x00;
+
+ // Take a snapshot of the number of tasks in case it changes while this
+ // function is executing.
+ uxArraySize = uxTaskGetNumberOfTasks();
+
+ // Allocate a TaskStatus_t structure for each task. An array could be
+ // allocated statically at compile time.
+ pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t )
+ );
+
+ if( pxTaskStatusArray != NULL )
+ {
+ // Generate raw status information about each task.
+ uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize,
+ &ulTotalRunTime );
+
+ // For percentage calculations.
+ ulTotalRunTime /= 100UL;
+
+ // Avoid divide by zero errors.
+ if( ulTotalRunTime > 0 )
+ {
+ // For each populated position in the pxTaskStatusArray array,
+ // format the raw data as human readable ASCII data
+ for( x = 0; x < uxArraySize; x++ )
+ {
+ // What percentage of the total run time has the task used?
+ // This will always be rounded down to the nearest integer.
+ // ulTotalRunTimeDiv100 has already been divided by 100.
+ ulStatsAsPercentage = pxTaskStatusArray[ x
+ ].ulRunTimeCounter / ulTotalRunTime;
+
+ if( ulStatsAsPercentage > 0UL )
+ {
+ sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n",
+ pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter,
+ ulStatsAsPercentage );
+ }
+ else
+ {
+ // If the percentage is zero here then the task has
+ // consumed less than 1% of the total run time.
+ sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n",
+ pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter );
+ }
+
+ pcWriteBuffer += strlen( ( char * ) pcWriteBuffer );
+ }
+ }
+
+ // The array is no longer needed, free the memory it consumes.
+ vPortFree( pxTaskStatusArray );
+ }
+ }
+ </pre>
+ */
+UBaseType_t uxTaskGetSystemState(
+ TaskStatus_t *const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ uint32_t *const pulTotalRunTime) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>void vTaskList( char *pcWriteBuffer );</PRE>
+ *
+ * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must
+ * both be defined as 1 for this function to be available. See the
+ * configuration section of the FreeRTOS.org website for more information.
+ *
+ * NOTE 1: This function will disable interrupts for its duration. It is
+ * not intended for normal application runtime use but as a debug aid.
+ *
+ * Lists all the current tasks, along with their current state and stack
+ * usage high water mark.
+ *
+ * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or
+ * suspended ('S').
+ *
+ * PLEASE NOTE:
+ *
+ * This function is provided for convenience only, and is used by many of the
+ * demo applications. Do not consider it to be part of the scheduler.
+ *
+ * vTaskList() calls uxTaskGetSystemState(), then formats part of the
+ * uxTaskGetSystemState() output into a human readable table that displays task
+ * names, states and stack usage.
+ *
+ * vTaskList() has a dependency on the sprintf() C library function that might
+ * bloat the code size, use a lot of stack, and provide different results on
+ * different platforms. An alternative, tiny, third party, and limited
+ * functionality implementation of sprintf() is provided in many of the
+ * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note
+ * printf-stdarg.c does not provide a full snprintf() implementation!).
+ *
+ * It is recommended that production systems call uxTaskGetSystemState()
+ * directly to get access to raw stats data, rather than indirectly through a
+ * call to vTaskList().
+ *
+ * @param pcWriteBuffer A buffer into which the above mentioned details
+ * will be written, in ASCII form. This buffer is assumed to be large
+ * enough to contain the generated report. Approximately 40 bytes per
+ * task should be sufficient.
+ *
+ * \defgroup vTaskList vTaskList
+ * \ingroup TaskUtils
+ */
+void vTaskList(char *pcWriteBuffer)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+
+/**
+ * task. h
+ * <PRE>void vTaskGetRunTimeStats( char *pcWriteBuffer );</PRE>
+ *
+ * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS
+ * must both be defined as 1 for this function to be available. The application
+ * must also then provide definitions for
+ * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE()
+ * to configure a peripheral timer/counter and return the timers current count
+ * value respectively. The counter should be at least 10 times the frequency of
+ * the tick count.
+ *
+ * NOTE 1: This function will disable interrupts for its duration. It is
+ * not intended for normal application runtime use but as a debug aid.
+ *
+ * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total
+ * accumulated execution time being stored for each task. The resolution
+ * of the accumulated time value depends on the frequency of the timer
+ * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro.
+ * Calling vTaskGetRunTimeStats() writes the total execution time of each
+ * task into a buffer, both as an absolute count value and as a percentage
+ * of the total system execution time.
+ *
+ * NOTE 2:
+ *
+ * This function is provided for convenience only, and is used by many of the
+ * demo applications. Do not consider it to be part of the scheduler.
+ *
+ * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the
+ * uxTaskGetSystemState() output into a human readable table that displays the
+ * amount of time each task has spent in the Running state in both absolute and
+ * percentage terms.
+ *
+ * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function
+ * that might bloat the code size, use a lot of stack, and provide different
+ * results on different platforms. An alternative, tiny, third party, and
+ * limited functionality implementation of sprintf() is provided in many of the
+ * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note
+ * printf-stdarg.c does not provide a full snprintf() implementation!).
+ *
+ * It is recommended that production systems call uxTaskGetSystemState()
+ * directly to get access to raw stats data, rather than indirectly through a
+ * call to vTaskGetRunTimeStats().
+ *
+ * @param pcWriteBuffer A buffer into which the execution times will be
+ * written, in ASCII form. This buffer is assumed to be large enough to
+ * contain the generated report. Approximately 40 bytes per task should
+ * be sufficient.
+ *
+ * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
+ * \ingroup TaskUtils
+ */
+void vTaskGetRunTimeStats(char *pcWriteBuffer)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+
+/**
+ * task. h
+ * <PRE>uint32_t ulTaskGetIdleRunTimeCounter( void );</PRE>
+ *
+ * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS
+ * must both be defined as 1 for this function to be available. The application
+ * must also then provide definitions for
+ * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE()
+ * to configure a peripheral timer/counter and return the timers current count
+ * value respectively. The counter should be at least 10 times the frequency of
+ * the tick count.
+ *
+ * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total
+ * accumulated execution time being stored for each task. The resolution
+ * of the accumulated time value depends on the frequency of the timer
+ * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro.
+ * While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total
+ * execution time of each task into a buffer, ulTaskGetIdleRunTimeCounter()
+ * returns the total execution time of just the idle task.
+ *
+ * @return The total run time of the idle task. This is the amount of time the
+ * idle task has actually been executing. The unit of time is dependent on the
+ * frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
+ * portGET_RUN_TIME_COUNTER_VALUE() macros.
+ *
+ * \defgroup ulTaskGetIdleRunTimeCounter ulTaskGetIdleRunTimeCounter
+ * \ingroup TaskUtils
+ */
+uint32_t ulTaskGetIdleRunTimeCounter(void) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue,
+ * eNotifyAction eAction );</PRE>
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
+ * function to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * A notification sent to a task will remain pending until it is cleared by the
+ * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was
+ * already in the Blocked state to wait for a notification when the notification
+ * arrives then the task will automatically be removed from the Blocked state
+ * (unblocked) and the notification cleared.
+ *
+ * A task can use xTaskNotifyWait() to [optionally] block to wait for a
+ * notification to be pending, or ulTaskNotifyTake() to [optionally] block
+ * to wait for its notification value to have a non-zero value. The task does
+ * not consume any CPU time while it is in the Blocked state.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details.
+ *
+ * @param xTaskToNotify The handle of the task being notified. The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @param ulValue Data that can be sent with the notification. How the data is
+ * used depends on the value of the eAction parameter.
+ *
+ * @param eAction Specifies how the notification updates the task's notification
+ * value, if at all. Valid values for eAction are as follows:
+ *
+ * eSetBits -
+ * The task's notification value is bitwise ORed with ulValue. xTaskNofify()
+ * always returns pdPASS in this case.
+ *
+ * eIncrement -
+ * The task's notification value is incremented. ulValue is not used and
+ * xTaskNotify() always returns pdPASS in this case.
+ *
+ * eSetValueWithOverwrite -
+ * The task's notification value is set to the value of ulValue, even if the
+ * task being notified had not yet processed the previous notification (the
+ * task already had a notification pending). xTaskNotify() always returns
+ * pdPASS in this case.
+ *
+ * eSetValueWithoutOverwrite -
+ * If the task being notified did not already have a notification pending then
+ * the task's notification value is set to ulValue and xTaskNotify() will
+ * return pdPASS. If the task being notified already had a notification
+ * pending then no action is performed and pdFAIL is returned.
+ *
+ * eNoAction -
+ * The task receives a notification without its notification value being
+ * updated. ulValue is not used and xTaskNotify() always returns pdPASS in
+ * this case.
+ *
+ * pulPreviousNotificationValue -
+ * Can be used to pass out the subject task's notification value before any
+ * bits are modified by the notify function.
+ *
+ * @return Dependent on the value of eAction. See the description of the
+ * eAction parameter.
+ *
+ * \defgroup xTaskNotify xTaskNotify
+ * \ingroup TaskNotifications
+ */
+BaseType_t xTaskGenericNotify(
+ TaskHandle_t xTaskToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t *pulPreviousNotificationValue) PRIVILEGED_FUNCTION;
+#define xTaskNotify(xTaskToNotify, ulValue, eAction) \
+ xTaskGenericNotify((xTaskToNotify), (ulValue), (eAction), NULL)
+#define xTaskNotifyAndQuery( \
+ xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue) \
+ xTaskGenericNotify( \
+ (xTaskToNotify), (ulValue), (eAction), (pulPreviousNotifyValue))
+
+/**
+ * task. h
+ * <PRE>BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t
+ * ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken
+ * );</PRE>
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
+ * function to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * A version of xTaskNotify() that can be used from an interrupt service routine
+ * (ISR).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * A notification sent to a task will remain pending until it is cleared by the
+ * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was
+ * already in the Blocked state to wait for a notification when the notification
+ * arrives then the task will automatically be removed from the Blocked state
+ * (unblocked) and the notification cleared.
+ *
+ * A task can use xTaskNotifyWait() to [optionally] block to wait for a
+ * notification to be pending, or ulTaskNotifyTake() to [optionally] block
+ * to wait for its notification value to have a non-zero value. The task does
+ * not consume any CPU time while it is in the Blocked state.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details.
+ *
+ * @param xTaskToNotify The handle of the task being notified. The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @param ulValue Data that can be sent with the notification. How the data is
+ * used depends on the value of the eAction parameter.
+ *
+ * @param eAction Specifies how the notification updates the task's notification
+ * value, if at all. Valid values for eAction are as follows:
+ *
+ * eSetBits -
+ * The task's notification value is bitwise ORed with ulValue. xTaskNofify()
+ * always returns pdPASS in this case.
+ *
+ * eIncrement -
+ * The task's notification value is incremented. ulValue is not used and
+ * xTaskNotify() always returns pdPASS in this case.
+ *
+ * eSetValueWithOverwrite -
+ * The task's notification value is set to the value of ulValue, even if the
+ * task being notified had not yet processed the previous notification (the
+ * task already had a notification pending). xTaskNotify() always returns
+ * pdPASS in this case.
+ *
+ * eSetValueWithoutOverwrite -
+ * If the task being notified did not already have a notification pending then
+ * the task's notification value is set to ulValue and xTaskNotify() will
+ * return pdPASS. If the task being notified already had a notification
+ * pending then no action is performed and pdFAIL is returned.
+ *
+ * eNoAction -
+ * The task receives a notification without its notification value being
+ * updated. ulValue is not used and xTaskNotify() always returns pdPASS in
+ * this case.
+ *
+ * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
+ * task to which the notification was sent to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently running task. If
+ * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should
+ * be requested before the interrupt is exited. How a context switch is
+ * requested from an ISR is dependent on the port - see the documentation page
+ * for the port in use.
+ *
+ * @return Dependent on the value of eAction. See the description of the
+ * eAction parameter.
+ *
+ * \defgroup xTaskNotify xTaskNotify
+ * \ingroup TaskNotifications
+ */
+BaseType_t xTaskGenericNotifyFromISR(
+ TaskHandle_t xTaskToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t *pulPreviousNotificationValue,
+ BaseType_t *pxHigherPriorityTaskWoken) PRIVILEGED_FUNCTION;
+#define xTaskNotifyFromISR( \
+ xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken) \
+ xTaskGenericNotifyFromISR( \
+ (xTaskToNotify), \
+ (ulValue), \
+ (eAction), \
+ NULL, \
+ (pxHigherPriorityTaskWoken))
+#define xTaskNotifyAndQueryFromISR( \
+ xTaskToNotify, \
+ ulValue, \
+ eAction, \
+ pulPreviousNotificationValue, \
+ pxHigherPriorityTaskWoken) \
+ xTaskGenericNotifyFromISR( \
+ (xTaskToNotify), \
+ (ulValue), \
+ (eAction), \
+ (pulPreviousNotificationValue), \
+ (pxHigherPriorityTaskWoken))
+
+/**
+ * task. h
+ * <PRE>BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t
+ * ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait
+ * );</pre>
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
+ * function to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * A notification sent to a task will remain pending until it is cleared by the
+ * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was
+ * already in the Blocked state to wait for a notification when the notification
+ * arrives then the task will automatically be removed from the Blocked state
+ * (unblocked) and the notification cleared.
+ *
+ * A task can use xTaskNotifyWait() to [optionally] block to wait for a
+ * notification to be pending, or ulTaskNotifyTake() to [optionally] block
+ * to wait for its notification value to have a non-zero value. The task does
+ * not consume any CPU time while it is in the Blocked state.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details.
+ *
+ * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value
+ * will be cleared in the calling task's notification value before the task
+ * checks to see if any notifications are pending, and optionally blocks if no
+ * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if
+ * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have
+ * the effect of resetting the task's notification value to 0. Setting
+ * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged.
+ *
+ * @param ulBitsToClearOnExit If a notification is pending or received before
+ * the calling task exits the xTaskNotifyWait() function then the task's
+ * notification value (see the xTaskNotify() API function) is passed out using
+ * the pulNotificationValue parameter. Then any bits that are set in
+ * ulBitsToClearOnExit will be cleared in the task's notification value (note
+ * *pulNotificationValue is set before any bits are cleared). Setting
+ * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL
+ * (if limits.h is not included) will have the effect of resetting the task's
+ * notification value to 0 before the function exits. Setting
+ * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged
+ * when the function exits (in which case the value passed out in
+ * pulNotificationValue will match the task's notification value).
+ *
+ * @param pulNotificationValue Used to pass the task's notification value out
+ * of the function. Note the value passed out will not be effected by the
+ * clearing of any bits caused by ulBitsToClearOnExit being non-zero.
+ *
+ * @param xTicksToWait The maximum amount of time that the task should wait in
+ * the Blocked state for a notification to be received, should a notification
+ * not already be pending when xTaskNotifyWait() was called. The task
+ * will not consume any processing time while it is in the Blocked state. This
+ * is specified in kernel ticks, the macro pdMS_TO_TICSK( value_in_ms ) can be
+ * used to convert a time specified in milliseconds to a time specified in
+ * ticks.
+ *
+ * @return If a notification was received (including notifications that were
+ * already pending when xTaskNotifyWait was called) then pdPASS is
+ * returned. Otherwise pdFAIL is returned.
+ *
+ * \defgroup xTaskNotifyWait xTaskNotifyWait
+ * \ingroup TaskNotifications
+ */
+BaseType_t xTaskNotifyWait(
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t *pulNotificationValue,
+ TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );</PRE>
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro
+ * to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * xTaskNotifyGive() is a helper macro intended for use when task notifications
+ * are used as light weight and faster binary or counting semaphore equivalents.
+ * Actual FreeRTOS semaphores are given using the xSemaphoreGive() API function,
+ * the equivalent action that instead uses a task notification is
+ * xTaskNotifyGive().
+ *
+ * When task notifications are being used as a binary or counting semaphore
+ * equivalent then the task being notified should wait for the notification
+ * using the ulTaskNotificationTake() API function rather than the
+ * xTaskNotifyWait() API function.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
+ *
+ * @param xTaskToNotify The handle of the task being notified. The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the
+ * eAction parameter set to eIncrement - so pdPASS is always returned.
+ *
+ * \defgroup xTaskNotifyGive xTaskNotifyGive
+ * \ingroup TaskNotifications
+ */
+#define xTaskNotifyGive(xTaskToNotify) \
+ xTaskGenericNotify((xTaskToNotify), (0), eIncrement, NULL)
+
+/**
+ * task. h
+ * <PRE>void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t
+ * *pxHigherPriorityTaskWoken );
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro
+ * to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * A version of xTaskNotifyGive() that can be called from an interrupt service
+ * routine (ISR).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * vTaskNotifyGiveFromISR() is intended for use when task notifications are
+ * used as light weight and faster binary or counting semaphore equivalents.
+ * Actual FreeRTOS semaphores are given from an ISR using the
+ * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses
+ * a task notification is vTaskNotifyGiveFromISR().
+ *
+ * When task notifications are being used as a binary or counting semaphore
+ * equivalent then the task being notified should wait for the notification
+ * using the ulTaskNotificationTake() API function rather than the
+ * xTaskNotifyWait() API function.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
+ *
+ * @param xTaskToNotify The handle of the task being notified. The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @param pxHigherPriorityTaskWoken vTaskNotifyGiveFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
+ * task to which the notification was sent to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently running task. If
+ * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch
+ * should be requested before the interrupt is exited. How a context switch is
+ * requested from an ISR is dependent on the port - see the documentation page
+ * for the port in use.
+ *
+ * \defgroup xTaskNotifyWait xTaskNotifyWait
+ * \ingroup TaskNotifications
+ */
+void vTaskNotifyGiveFromISR(
+ TaskHandle_t xTaskToNotify,
+ BaseType_t *pxHigherPriorityTaskWoken) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t
+ * xTicksToWait );</pre>
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this
+ * function to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * Events can be sent to a task using an intermediary object. Examples of such
+ * objects are queues, semaphores, mutexes and event groups. Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value. In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * ulTaskNotifyTake() is intended for use when a task notification is used as a
+ * faster and lighter weight binary or counting semaphore alternative. Actual
+ * FreeRTOS semaphores are taken using the xSemaphoreTake() API function, the
+ * equivalent action that instead uses a task notification is
+ * ulTaskNotifyTake().
+ *
+ * When a task is using its notification value as a binary or counting semaphore
+ * other tasks should send notifications to it using the xTaskNotifyGive()
+ * macro, or xTaskNotify() function with the eAction parameter set to
+ * eIncrement.
+ *
+ * ulTaskNotifyTake() can either clear the task's notification value to
+ * zero on exit, in which case the notification value acts like a binary
+ * semaphore, or decrement the task's notification value on exit, in which case
+ * the notification value acts like a counting semaphore.
+ *
+ * A task can use ulTaskNotifyTake() to [optionally] block to wait for a
+ * the task's notification value to be non-zero. The task does not consume any
+ * CPU time while it is in the Blocked state.
+ *
+ * Where as xTaskNotifyWait() will return when a notification is pending,
+ * ulTaskNotifyTake() will return when the task's notification value is
+ * not zero.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details.
+ *
+ * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's
+ * notification value is decremented when the function exits. In this way the
+ * notification value acts like a counting semaphore. If xClearCountOnExit is
+ * not pdFALSE then the task's notification value is cleared to zero when the
+ * function exits. In this way the notification value acts like a binary
+ * semaphore.
+ *
+ * @param xTicksToWait The maximum amount of time that the task should wait in
+ * the Blocked state for the task's notification value to be greater than zero,
+ * should the count not already be greater than zero when
+ * ulTaskNotifyTake() was called. The task will not consume any processing
+ * time while it is in the Blocked state. This is specified in kernel ticks,
+ * the macro pdMS_TO_TICSK( value_in_ms ) can be used to convert a time
+ * specified in milliseconds to a time specified in ticks.
+ *
+ * @return The task's notification count before it is either cleared to zero or
+ * decremented (see the xClearCountOnExit parameter).
+ *
+ * \defgroup ulTaskNotifyTake ulTaskNotifyTake
+ * \ingroup TaskNotifications
+ */
+uint32_t ulTaskNotifyTake(BaseType_t xClearCountOnExit, TickType_t xTicksToWait)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * <PRE>BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );</pre>
+ *
+ * If the notification state of the task referenced by the handle xTask is
+ * eNotified, then set the task's notification state to eNotWaitingNotification.
+ * The task's notification value is not altered. Set xTask to NULL to clear the
+ * notification state of the calling task.
+ *
+ * @return pdTRUE if the task's notification state was set to
+ * eNotWaitingNotification, otherwise pdFALSE.
+ * \defgroup xTaskNotifyStateClear xTaskNotifyStateClear
+ * \ingroup TaskNotifications
+ */
+BaseType_t xTaskNotifyStateClear(TaskHandle_t xTask);
+
+/**
+ * task. h
+ * <PRE>uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t
+ * ulBitsToClear );</pre>
+ *
+ * Clears the bits specified by the ulBitsToClear bit mask in the notification
+ * value of the task referenced by xTask.
+ *
+ * Set ulBitsToClear to 0xffffffff (UINT_MAX on 32-bit architectures) to clear
+ * the notification value to 0. Set ulBitsToClear to 0 to query the task's
+ * notification value without clearing any bits.
+ *
+ * @return The value of the target task's notification value before the bits
+ * specified by ulBitsToClear were cleared.
+ * \defgroup ulTaskNotifyValueClear ulTaskNotifyValueClear
+ * \ingroup TaskNotifications
+ */
+uint32_t ulTaskNotifyValueClear(TaskHandle_t xTask, uint32_t ulBitsToClear)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * task.h
+ * <pre>void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )</pre>
+ *
+ * Capture the current time for future use with xTaskCheckForTimeOut().
+ *
+ * @param pxTimeOut Pointer to a timeout object into which the current time
+ * is to be captured. The captured time includes the tick count and the number
+ * of times the tick count has overflowed since the system first booted.
+ * \defgroup vTaskSetTimeOutState vTaskSetTimeOutState
+ * \ingroup TaskCtrl
+ */
+void vTaskSetTimeOutState(TimeOut_t *const pxTimeOut) PRIVILEGED_FUNCTION;
+
+/**
+ * task.h
+ * <pre>BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t
+ * const pxTicksToWait );</pre>
+ *
+ * Determines if pxTicksToWait ticks has passed since a time was captured
+ * using a call to vTaskSetTimeOutState(). The captured time includes the tick
+ * count and the number of times the tick count has overflowed.
+ *
+ * @param pxTimeOut The time status as captured previously using
+ * vTaskSetTimeOutState. If the timeout has not yet occurred, it is updated
+ * to reflect the current time status.
+ * @param pxTicksToWait The number of ticks to check for timeout i.e. if
+ * pxTicksToWait ticks have passed since pxTimeOut was last updated (either by
+ * vTaskSetTimeOutState() or xTaskCheckForTimeOut()), the timeout has occurred.
+ * If the timeout has not occurred, pxTIcksToWait is updated to reflect the
+ * number of remaining ticks.
+ *
+ * @return If timeout has occurred, pdTRUE is returned. Otherwise pdFALSE is
+ * returned and pxTicksToWait is updated to reflect the number of remaining
+ * ticks.
+ *
+ * @see https://www.freertos.org/xTaskCheckForTimeOut.html
+ *
+ * Example Usage:
+ * <pre>
+ // Driver library function used to receive uxWantedBytes from an Rx buffer
+ // that is filled by a UART interrupt. If there are not enough bytes in the
+ // Rx buffer then the task enters the Blocked state until it is notified
+ that
+ // more data has been placed into the buffer. If there is still not enough
+ // data then the task re-enters the Blocked state, and
+ xTaskCheckForTimeOut()
+ // is used to re-calculate the Block time to ensure the total amount of time
+ // spent in the Blocked state does not exceed MAX_TIME_TO_WAIT. This
+ // continues until either the buffer contains at least uxWantedBytes bytes,
+ // or the total amount of time spent in the Blocked state reaches
+ // MAX_TIME_TO_WAIT – at which point the task reads however many bytes are
+ // available up to a maximum of uxWantedBytes.
+
+ size_t xUART_Receive( uint8_t *pucBuffer, size_t uxWantedBytes )
+ {
+ size_t uxReceived = 0;
+ TickType_t xTicksToWait = MAX_TIME_TO_WAIT;
+ TimeOut_t xTimeOut;
+
+ // Initialize xTimeOut. This records the time at which this function
+ // was entered.
+ vTaskSetTimeOutState( &xTimeOut );
+
+ // Loop until the buffer contains the wanted number of bytes, or a
+ // timeout occurs.
+ while( UART_bytes_in_rx_buffer( pxUARTInstance ) < uxWantedBytes )
+ {
+ // The buffer didn't contain enough data so this task is going to
+ // enter the Blocked state. Adjusting xTicksToWait to account for
+ // any time that has been spent in the Blocked state within this
+ // function so far to ensure the total amount of time spent in the
+ // Blocked state does not exceed MAX_TIME_TO_WAIT.
+ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) != pdFALSE )
+ {
+ //Timed out before the wanted number of bytes were available,
+ // exit the loop.
+ break;
+ }
+
+ // Wait for a maximum of xTicksToWait ticks to be notified that the
+ // receive interrupt has placed more data into the buffer.
+ ulTaskNotifyTake( pdTRUE, xTicksToWait );
+ }
+
+ // Attempt to read uxWantedBytes from the receive buffer into pucBuffer.
+ // The actual number of bytes read (which might be less than
+ // uxWantedBytes) is returned.
+ uxReceived = UART_read_from_receive_buffer( pxUARTInstance,
+ pucBuffer,
+ uxWantedBytes );
+
+ return uxReceived;
+ }
+ </pre>
+ * \defgroup xTaskCheckForTimeOut xTaskCheckForTimeOut
+ * \ingroup TaskCtrl
+ */
+BaseType_t xTaskCheckForTimeOut(
+ TimeOut_t *const pxTimeOut,
+ TickType_t *const pxTicksToWait) PRIVILEGED_FUNCTION;
+
+/*-----------------------------------------------------------
+ * SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES
+ *----------------------------------------------------------*/
+
+/*
+ * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
+ * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS
+ * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
+ *
+ * Called from the real time kernel tick (either preemptive or cooperative),
+ * this increments the tick count and checks if any tasks that are blocked
+ * for a finite period required removing from a blocked list and placing on
+ * a ready list. If a non-zero value is returned then a context switch is
+ * required because either:
+ * + A task was removed from a blocked list because its timeout had expired,
+ * or
+ * + Time slicing is in use and there is a task of equal priority to the
+ * currently running task.
+ */
+BaseType_t xTaskIncrementTick(void) PRIVILEGED_FUNCTION;
+
+/*
+ * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
+ * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
+ *
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED.
+ *
+ * Removes the calling task from the ready list and places it both
+ * on the list of tasks waiting for a particular event, and the
+ * list of delayed tasks. The task will be removed from both lists
+ * and replaced on the ready list should either the event occur (and
+ * there be no higher priority tasks waiting on the same event) or
+ * the delay period expires.
+ *
+ * The 'unordered' version replaces the event list item value with the
+ * xItemValue value, and inserts the list item at the end of the list.
+ *
+ * The 'ordered' version uses the existing event list item value (which is the
+ * owning tasks priority) to insert the list item into the event list is task
+ * priority order.
+ *
+ * @param pxEventList The list containing tasks that are blocked waiting
+ * for the event to occur.
+ *
+ * @param xItemValue The item value to use for the event list item when the
+ * event list is not ordered by task priority.
+ *
+ * @param xTicksToWait The maximum amount of time that the task should wait
+ * for the event to occur. This is specified in kernel ticks,the constant
+ * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time
+ * period.
+ */
+void vTaskPlaceOnEventList(
+ List_t *const pxEventList,
+ const TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+void vTaskPlaceOnUnorderedEventList(
+ List_t *pxEventList,
+ const TickType_t xItemValue,
+ const TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/*
+ * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
+ * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
+ *
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED.
+ *
+ * This function performs nearly the same function as vTaskPlaceOnEventList().
+ * The difference being that this function does not permit tasks to block
+ * indefinitely, whereas vTaskPlaceOnEventList() does.
+ *
+ */
+void vTaskPlaceOnEventListRestricted(
+ List_t *const pxEventList,
+ TickType_t xTicksToWait,
+ const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION;
+
+/*
+ * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
+ * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
+ *
+ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED.
+ *
+ * Removes a task from both the specified event list and the list of blocked
+ * tasks, and places it on a ready queue.
+ *
+ * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called
+ * if either an event occurs to unblock a task, or the block timeout period
+ * expires.
+ *
+ * xTaskRemoveFromEventList() is used when the event list is in task priority
+ * order. It removes the list item from the head of the event list as that will
+ * have the highest priority owning task of all the tasks on the event list.
+ * vTaskRemoveFromUnorderedEventList() is used when the event list is not
+ * ordered and the event list items hold something other than the owning tasks
+ * priority. In this case the event list item value is updated to the value
+ * passed in the xItemValue parameter.
+ *
+ * @return pdTRUE if the task being removed has a higher priority than the task
+ * making the call, otherwise pdFALSE.
+ */
+BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList)
+ PRIVILEGED_FUNCTION;
+void vTaskRemoveFromUnorderedEventList(
+ ListItem_t *pxEventListItem,
+ const TickType_t xItemValue) PRIVILEGED_FUNCTION;
+
+/*
+ * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
+ * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS
+ * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
+ *
+ * Sets the pointer to the current TCB to the TCB of the highest priority task
+ * that is ready to run.
+ */
+portDONT_DISCARD void vTaskSwitchContext(void) PRIVILEGED_FUNCTION;
+
+/*
+ * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY
+ * THE EVENT BITS MODULE.
+ */
+TickType_t uxTaskResetEventItemValue(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Return the handle of the calling task.
+ */
+TaskHandle_t xTaskGetCurrentTaskHandle(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Shortcut used by the queue implementation to prevent unnecessary call to
+ * taskYIELD();
+ */
+void vTaskMissedYield(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Returns the scheduler state as taskSCHEDULER_RUNNING,
+ * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED.
+ */
+BaseType_t xTaskGetSchedulerState(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Raises the priority of the mutex holder to that of the calling task should
+ * the mutex holder have a priority less than the calling task.
+ */
+BaseType_t xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Set the priority of a task back to its proper priority in the case that it
+ * inherited a higher priority while it was holding a semaphore.
+ */
+BaseType_t xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * If a higher priority task attempting to obtain a mutex caused a lower
+ * priority task to inherit the higher priority task's priority - but the higher
+ * priority task then timed out without obtaining the mutex, then the lower
+ * priority task will disinherit the priority again - but only down as far as
+ * the highest priority task that is still waiting for the mutex (if there were
+ * more than one task waiting for the mutex).
+ */
+void vTaskPriorityDisinheritAfterTimeout(
+ TaskHandle_t const pxMutexHolder,
+ UBaseType_t uxHighestPriorityWaitingTask) PRIVILEGED_FUNCTION;
+
+/*
+ * Get the uxTCBNumber assigned to the task referenced by the xTask parameter.
+ */
+UBaseType_t uxTaskGetTaskNumber(TaskHandle_t xTask) PRIVILEGED_FUNCTION;
+
+/*
+ * Set the uxTaskNumber of the task referenced by the xTask parameter to
+ * uxHandle.
+ */
+void vTaskSetTaskNumber(TaskHandle_t xTask, const UBaseType_t uxHandle)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * Only available when configUSE_TICKLESS_IDLE is set to 1.
+ * If tickless mode is being used, or a low power mode is implemented, then
+ * the tick interrupt will not execute during idle periods. When this is the
+ * case, the tick count value maintained by the scheduler needs to be kept up
+ * to date with the actual execution time by being skipped forward by a time
+ * equal to the idle period.
+ */
+void vTaskStepTick(const TickType_t xTicksToJump) PRIVILEGED_FUNCTION;
+
+/* Correct the tick count value after the application code has held
+interrupts disabled for an extended period. xTicksToCatchUp is the number
+of tick interrupts that have been missed due to interrupts being disabled.
+Its value is not computed automatically, so must be computed by the
+application writer.
+
+This function is similar to vTaskStepTick(), however, unlike
+vTaskStepTick(), xTaskCatchUpTicks() may move the tick count forward past a
+time at which a task should be removed from the blocked state. That means
+tasks may have to be removed from the blocked state as the tick count is
+moved. */
+BaseType_t xTaskCatchUpTicks(TickType_t xTicksToCatchUp) PRIVILEGED_FUNCTION;
+
+/*
+ * Only available when configUSE_TICKLESS_IDLE is set to 1.
+ * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port
+ * specific sleep function to determine if it is ok to proceed with the sleep,
+ * and if it is ok to proceed, if it is ok to sleep indefinitely.
+ *
+ * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only
+ * called with the scheduler suspended, not from within a critical section. It
+ * is therefore possible for an interrupt to request a context switch between
+ * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being
+ * entered. eTaskConfirmSleepModeStatus() should be called from a short
+ * critical section between the timer being stopped and the sleep mode being
+ * entered to ensure it is ok to proceed into the sleep mode.
+ */
+eSleepModeStatus eTaskConfirmSleepModeStatus(void) PRIVILEGED_FUNCTION;
+
+/*
+ * For internal use only. Increment the mutex held count when a mutex is
+ * taken and return the handle of the task that has taken the mutex.
+ */
+TaskHandle_t pvTaskIncrementMutexHeldCount(void) PRIVILEGED_FUNCTION;
+
+/*
+ * For internal use only. Same as vTaskSetTimeOutState(), but without a critial
+ * section.
+ */
+void vTaskInternalSetTimeOutState(TimeOut_t *const pxTimeOut)
+ PRIVILEGED_FUNCTION;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* INC_TASK_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/include/timers.h b/product/rcar/src/CMSIS-FreeRTOS/Source/include/timers.h
new file mode 100644
index 00000000..028657cd
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/include/timers.h
@@ -0,0 +1,1401 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef TIMERS_H
+#define TIMERS_H
+
+#ifndef INC_FREERTOS_H
+# error \
+ "include FreeRTOS.h must appear in source files before include timers.h"
+#endif
+
+/*lint -save -e537 This headers are only multiply included if the application
+code happens to also be including task.h. */
+#include "task.h"
+/*lint -restore */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*-----------------------------------------------------------
+ * MACROS AND DEFINITIONS
+ *----------------------------------------------------------*/
+
+/* IDs for commands that can be sent/received on the timer queue. These are to
+be used solely through the macros that make up the public software timer API,
+as defined below. The commands that are sent from interrupts must use the
+highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task
+or interrupt version of the queue send function should be used. */
+#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR ((BaseType_t)-2)
+#define tmrCOMMAND_EXECUTE_CALLBACK ((BaseType_t)-1)
+#define tmrCOMMAND_START_DONT_TRACE ((BaseType_t)0)
+#define tmrCOMMAND_START ((BaseType_t)1)
+#define tmrCOMMAND_RESET ((BaseType_t)2)
+#define tmrCOMMAND_STOP ((BaseType_t)3)
+#define tmrCOMMAND_CHANGE_PERIOD ((BaseType_t)4)
+#define tmrCOMMAND_DELETE ((BaseType_t)5)
+
+#define tmrFIRST_FROM_ISR_COMMAND ((BaseType_t)6)
+#define tmrCOMMAND_START_FROM_ISR ((BaseType_t)6)
+#define tmrCOMMAND_RESET_FROM_ISR ((BaseType_t)7)
+#define tmrCOMMAND_STOP_FROM_ISR ((BaseType_t)8)
+#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ((BaseType_t)9)
+
+/**
+ * Type by which software timers are referenced. For example, a call to
+ * xTimerCreate() returns an TimerHandle_t variable that can then be used to
+ * reference the subject timer in calls to other software timer API functions
+ * (for example, xTimerStart(), xTimerReset(), etc.).
+ */
+struct tmrTimerControl; /* The old naming convention is used to prevent breaking
+ kernel aware debuggers. */
+typedef struct tmrTimerControl *TimerHandle_t;
+
+/*
+ * Defines the prototype to which timer callback functions must conform.
+ */
+typedef void (*TimerCallbackFunction_t)(TimerHandle_t xTimer);
+
+/*
+ * Defines the prototype to which functions used with the
+ * xTimerPendFunctionCallFromISR() function must conform.
+ */
+typedef void (*PendedFunction_t)(void *, uint32_t);
+
+/**
+ * TimerHandle_t xTimerCreate( const char * const pcTimerName,
+ * TickType_t xTimerPeriodInTicks,
+ * UBaseType_t uxAutoReload,
+ * void * pvTimerID,
+ * TimerCallbackFunction_t pxCallbackFunction );
+ *
+ * Creates a new software timer instance, and returns a handle by which the
+ * created software timer can be referenced.
+ *
+ * Internally, within the FreeRTOS implementation, software timers use a block
+ * of memory, in which the timer data structure is stored. If a software timer
+ * is created using xTimerCreate() then the required memory is automatically
+ * dynamically allocated inside the xTimerCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a software timer is created using
+ * xTimerCreateStatic() then the application writer must provide the memory that
+ * will get used by the software timer. xTimerCreateStatic() therefore allows a
+ * software timer to be created without using any dynamic memory allocation.
+ *
+ * Timers are created in the dormant state. The xTimerStart(), xTimerReset(),
+ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
+ * xTimerChangePeriodFromISR() API functions can all be used to transition a
+ * timer into the active state.
+ *
+ * @param pcTimerName A text name that is assigned to the timer. This is done
+ * purely to assist debugging. The kernel itself only ever references a timer
+ * by its handle, and never by its name.
+ *
+ * @param xTimerPeriodInTicks The timer period. The time is defined in tick
+ * periods so the constant portTICK_PERIOD_MS can be used to convert a time that
+ * has been specified in milliseconds. For example, if the timer must expire
+ * after 100 ticks, then xTimerPeriodInTicks should be set to 100.
+ * Alternatively, if the timer must expire after 500ms, then xPeriod can be set
+ * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or
+ * equal to 1000.
+ *
+ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
+ * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter.
+ * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
+ * enter the dormant state after it expires.
+ *
+ * @param pvTimerID An identifier that is assigned to the timer being created.
+ * Typically this would be used in the timer callback function to identify which
+ * timer expired when the same callback function is assigned to more than one
+ * timer.
+ *
+ * @param pxCallbackFunction The function to call when the timer expires.
+ * Callback functions must have the prototype defined by
+ * TimerCallbackFunction_t, which is "void vCallbackFunction( TimerHandle_t
+ * xTimer );".
+ *
+ * @return If the timer is successfully created then a handle to the newly
+ * created timer is returned. If the timer cannot be created (because either
+ * there is insufficient FreeRTOS heap remaining to allocate the timer
+ * structures, or the timer period was set to 0) then NULL is returned.
+ *
+ * Example usage:
+ * @verbatim
+ * #define NUM_TIMERS 5
+ *
+ * // An array to hold handles to the created timers.
+ * TimerHandle_t xTimers[ NUM_TIMERS ];
+ *
+ * // An array to hold a count of the number of times each timer expires.
+ * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 };
+ *
+ * // Define a callback function that will be used by multiple timer instances.
+ * // The callback function does nothing but count the number of times the
+ * // associated timer expires, and stop the timer once the timer has expired
+ * // 10 times.
+ * void vTimerCallback( TimerHandle_t pxTimer )
+ * {
+ * int32_t lArrayIndex;
+ * const int32_t xMaxExpiryCountBeforeStopping = 10;
+ *
+ * // Optionally do something if the pxTimer parameter is NULL.
+ * configASSERT( pxTimer );
+ *
+ * // Which timer expired?
+ * lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer );
+ *
+ * // Increment the number of times that pxTimer has expired.
+ * lExpireCounters[ lArrayIndex ] += 1;
+ *
+ * // If the timer has expired 10 times then stop it from running.
+ * if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping )
+ * {
+ * // Do not use a block time if calling a timer API function from a
+ * // timer callback function, as doing so could cause a deadlock!
+ * xTimerStop( pxTimer, 0 );
+ * }
+ * }
+ *
+ * void main( void )
+ * {
+ * int32_t x;
+ *
+ * // Create then start some timers. Starting the timers before the
+ * scheduler
+ * // has been started means the timers will start running immediately that
+ * // the scheduler starts.
+ * for( x = 0; x < NUM_TIMERS; x++ )
+ * {
+ * xTimers[ x ] = xTimerCreate( "Timer", // Just a text name,
+ * not used by the kernel. ( 100 * x ), // The timer period in ticks. pdTRUE,
+ * // The timers will auto-reload themselves when they expire. ( void * ) x, //
+ * Assign each timer a unique id equal to its array index. vTimerCallback //
+ * Each timer calls the same callback when it expires.
+ * );
+ *
+ * if( xTimers[ x ] == NULL )
+ * {
+ * // The timer was not created.
+ * }
+ * else
+ * {
+ * // Start the timer. No block time is specified, and even if one
+ * was
+ * // it would be ignored because the scheduler has not yet been
+ * // started.
+ * if( xTimerStart( xTimers[ x ], 0 ) != pdPASS )
+ * {
+ * // The timer could not be set into the Active state.
+ * }
+ * }
+ * }
+ *
+ * // ...
+ * // Create tasks here.
+ * // ...
+ *
+ * // Starting the scheduler will start the timers running as they have
+ * already
+ * // been set into the active state.
+ * vTaskStartScheduler();
+ *
+ * // Should not reach here.
+ * for( ;; );
+ * }
+ * @endverbatim
+ */
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+TimerHandle_t xTimerCreate(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction) PRIVILEGED_FUNCTION;
+#endif
+
+/**
+ * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName,
+ * TickType_t xTimerPeriodInTicks,
+ * UBaseType_t uxAutoReload,
+ * void * pvTimerID,
+ * TimerCallbackFunction_t pxCallbackFunction,
+ * StaticTimer_t *pxTimerBuffer );
+ *
+ * Creates a new software timer instance, and returns a handle by which the
+ * created software timer can be referenced.
+ *
+ * Internally, within the FreeRTOS implementation, software timers use a block
+ * of memory, in which the timer data structure is stored. If a software timer
+ * is created using xTimerCreate() then the required memory is automatically
+ * dynamically allocated inside the xTimerCreate() function. (see
+ * http://www.freertos.org/a00111.html). If a software timer is created using
+ * xTimerCreateStatic() then the application writer must provide the memory that
+ * will get used by the software timer. xTimerCreateStatic() therefore allows a
+ * software timer to be created without using any dynamic memory allocation.
+ *
+ * Timers are created in the dormant state. The xTimerStart(), xTimerReset(),
+ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
+ * xTimerChangePeriodFromISR() API functions can all be used to transition a
+ * timer into the active state.
+ *
+ * @param pcTimerName A text name that is assigned to the timer. This is done
+ * purely to assist debugging. The kernel itself only ever references a timer
+ * by its handle, and never by its name.
+ *
+ * @param xTimerPeriodInTicks The timer period. The time is defined in tick
+ * periods so the constant portTICK_PERIOD_MS can be used to convert a time that
+ * has been specified in milliseconds. For example, if the timer must expire
+ * after 100 ticks, then xTimerPeriodInTicks should be set to 100.
+ * Alternatively, if the timer must expire after 500ms, then xPeriod can be set
+ * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or
+ * equal to 1000.
+ *
+ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
+ * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter.
+ * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
+ * enter the dormant state after it expires.
+ *
+ * @param pvTimerID An identifier that is assigned to the timer being created.
+ * Typically this would be used in the timer callback function to identify which
+ * timer expired when the same callback function is assigned to more than one
+ * timer.
+ *
+ * @param pxCallbackFunction The function to call when the timer expires.
+ * Callback functions must have the prototype defined by
+ *TimerCallbackFunction_t, which is "void vCallbackFunction( TimerHandle_t
+ *xTimer );".
+ *
+ * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which
+ * will be then be used to hold the software timer's data structures, removing
+ * the need for the memory to be allocated dynamically.
+ *
+ * @return If the timer is created then a handle to the created timer is
+ * returned. If pxTimerBuffer was NULL then NULL is returned.
+ *
+ * Example usage:
+ * @verbatim
+ *
+ * // The buffer used to hold the software timer's data structure.
+ * static StaticTimer_t xTimerBuffer;
+ *
+ * // A variable that will be incremented by the software timer's callback
+ * // function.
+ * UBaseType_t uxVariableToIncrement = 0;
+ *
+ * // A software timer callback function that increments a variable passed to
+ * // it when the software timer was created. After the 5th increment the
+ * // callback function stops the software timer.
+ * static void prvTimerCallback( TimerHandle_t xExpiredTimer )
+ * {
+ * UBaseType_t *puxVariableToIncrement;
+ * BaseType_t xReturned;
+ *
+ * // Obtain the address of the variable to increment from the timer ID.
+ * puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID(
+ *xExpiredTimer );
+ *
+ * // Increment the variable to show the timer callback has executed.
+ * ( *puxVariableToIncrement )++;
+ *
+ * // If this callback has executed the required number of times, stop the
+ * // timer.
+ * if( *puxVariableToIncrement == 5 )
+ * {
+ * // This is called from a timer callback so must not block.
+ * xTimerStop( xExpiredTimer, staticDONT_BLOCK );
+ * }
+ * }
+ *
+ *
+ * void main( void )
+ * {
+ * // Create the software time. xTimerCreateStatic() has an extra parameter
+ * // than the normal xTimerCreate() API function. The parameter is a
+ *pointer
+ * // to the StaticTimer_t structure that will hold the software timer
+ * // structure. If the parameter is passed as NULL then the structure will
+ *be
+ * // allocated dynamically, just as if xTimerCreate() had been called.
+ * xTimer = xTimerCreateStatic( "T1", // Text name for the task.
+ *Helps debugging only. Not used by FreeRTOS. xTimerPeriod, // The period
+ *of the timer in ticks. pdTRUE, // This is an auto-reload timer. (
+ *void * ) &uxVariableToIncrement, // A variable incremented by the software
+ *timer's callback function prvTimerCallback, // The function to execute when
+ *the timer expires. &xTimerBuffer ); // The buffer that will hold the software
+ *timer structure.
+ *
+ * // The scheduler has not started yet so a block time is not used.
+ * xReturned = xTimerStart( xTimer, 0 );
+ *
+ * // ...
+ * // Create tasks here.
+ * // ...
+ *
+ * // Starting the scheduler will start the timers running as they have
+ *already
+ * // been set into the active state.
+ * vTaskStartScheduler();
+ *
+ * // Should not reach here.
+ * for( ;; );
+ * }
+ * @endverbatim
+ */
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+TimerHandle_t xTimerCreateStatic(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t *pxTimerBuffer) PRIVILEGED_FUNCTION;
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+/**
+ * void *pvTimerGetTimerID( TimerHandle_t xTimer );
+ *
+ * Returns the ID assigned to the timer.
+ *
+ * IDs are assigned to timers using the pvTimerID parameter of the call to
+ * xTimerCreated() that was used to create the timer, and by calling the
+ * vTimerSetTimerID() API function.
+ *
+ * If the same callback function is assigned to multiple timers then the timer
+ * ID can be used as time specific (timer local) storage.
+ *
+ * @param xTimer The timer being queried.
+ *
+ * @return The ID assigned to the timer being queried.
+ *
+ * Example usage:
+ *
+ * See the xTimerCreate() API function example usage scenario.
+ */
+void *pvTimerGetTimerID(const TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+
+/**
+ * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID );
+ *
+ * Sets the ID assigned to the timer.
+ *
+ * IDs are assigned to timers using the pvTimerID parameter of the call to
+ * xTimerCreated() that was used to create the timer.
+ *
+ * If the same callback function is assigned to multiple timers then the timer
+ * ID can be used as time specific (timer local) storage.
+ *
+ * @param xTimer The timer being updated.
+ *
+ * @param pvNewID The ID to assign to the timer.
+ *
+ * Example usage:
+ *
+ * See the xTimerCreate() API function example usage scenario.
+ */
+void vTimerSetTimerID(TimerHandle_t xTimer, void *pvNewID) PRIVILEGED_FUNCTION;
+
+/**
+ * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer );
+ *
+ * Queries a timer to see if it is active or dormant.
+ *
+ * A timer will be dormant if:
+ * 1) It has been created but not started, or
+ * 2) It is an expired one-shot timer that has not been restarted.
+ *
+ * Timers are created in the dormant state. The xTimerStart(), xTimerReset(),
+ * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and
+ * xTimerChangePeriodFromISR() API functions can all be used to transition a
+ * timer into the active state.
+ *
+ * @param xTimer The timer being queried.
+ *
+ * @return pdFALSE will be returned if the timer is dormant. A value other than
+ * pdFALSE will be returned if the timer is active.
+ *
+ * Example usage:
+ * @verbatim
+ * // This function assumes xTimer has already been created.
+ * void vAFunction( TimerHandle_t xTimer )
+ * {
+ * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and
+ * equivalently "if( xTimerIsTimerActive( xTimer ) )"
+ * {
+ * // xTimer is active, do something.
+ * }
+ * else
+ * {
+ * // xTimer is not active, do something else.
+ * }
+ * }
+ * @endverbatim
+ */
+BaseType_t xTimerIsTimerActive(TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+
+/**
+ * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void );
+ *
+ * Simply returns the handle of the timer service/daemon task. It it not valid
+ * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been
+ * started.
+ */
+TaskHandle_t xTimerGetTimerDaemonTaskHandle(void) PRIVILEGED_FUNCTION;
+
+/**
+ * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait );
+ *
+ * Timer functionality is provided by a timer service/daemon task. Many of the
+ * public FreeRTOS timer API functions send commands to the timer service task
+ * through a queue called the timer command queue. The timer command queue is
+ * private to the kernel itself and is not directly accessible to application
+ * code. The length of the timer command queue is set by the
+ * configTIMER_QUEUE_LENGTH configuration constant.
+ *
+ * xTimerStart() starts a timer that was previously created using the
+ * xTimerCreate() API function. If the timer had already been started and was
+ * already in the active state, then xTimerStart() has equivalent functionality
+ * to the xTimerReset() API function.
+ *
+ * Starting a timer ensures the timer is in the active state. If the timer
+ * is not stopped, deleted, or reset in the mean time, the callback function
+ * associated with the timer will get called 'n' ticks after xTimerStart() was
+ * called, where 'n' is the timers defined period.
+ *
+ * It is valid to call xTimerStart() before the scheduler has been started, but
+ * when this is done the timer will not actually start until the scheduler is
+ * started, and the timers expiry time will be relative to when the scheduler is
+ * started, not relative to when xTimerStart() was called.
+ *
+ * The configUSE_TIMERS configuration constant must be set to 1 for
+ * xTimerStart() to be available.
+ *
+ * @param xTimer The handle of the timer being started/restarted.
+ *
+ * @param xTicksToWait Specifies the time, in ticks, that the calling task
+ * should be held in the Blocked state to wait for the start command to be
+ * successfully sent to the timer command queue, should the queue already be
+ * full when xTimerStart() was called. xTicksToWait is ignored if xTimerStart()
+ * is called before the scheduler is started.
+ *
+ * @return pdFAIL will be returned if the start command could not be sent to
+ * the timer command queue even after xTicksToWait ticks had passed. pdPASS
+ * will be returned if the command was successfully sent to the timer command
+ * queue. When the command is actually processed will depend on the priority of
+ * the timer service/daemon task relative to other tasks in the system, although
+ * the timers expiry time is relative to when xTimerStart() is actually called.
+ * The timer service/daemon task priority is set by the
+ * configTIMER_TASK_PRIORITY configuration constant.
+ *
+ * Example usage:
+ *
+ * See the xTimerCreate() API function example usage scenario.
+ *
+ */
+#define xTimerStart(xTimer, xTicksToWait) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_START, \
+ (xTaskGetTickCount()), \
+ NULL, \
+ (xTicksToWait))
+
+/**
+ * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait );
+ *
+ * Timer functionality is provided by a timer service/daemon task. Many of the
+ * public FreeRTOS timer API functions send commands to the timer service task
+ * through a queue called the timer command queue. The timer command queue is
+ * private to the kernel itself and is not directly accessible to application
+ * code. The length of the timer command queue is set by the
+ * configTIMER_QUEUE_LENGTH configuration constant.
+ *
+ * xTimerStop() stops a timer that was previously started using either of the
+ * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(),
+ * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions.
+ *
+ * Stopping a timer ensures the timer is not in the active state.
+ *
+ * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop()
+ * to be available.
+ *
+ * @param xTimer The handle of the timer being stopped.
+ *
+ * @param xTicksToWait Specifies the time, in ticks, that the calling task
+ * should be held in the Blocked state to wait for the stop command to be
+ * successfully sent to the timer command queue, should the queue already be
+ * full when xTimerStop() was called. xTicksToWait is ignored if xTimerStop()
+ * is called before the scheduler is started.
+ *
+ * @return pdFAIL will be returned if the stop command could not be sent to
+ * the timer command queue even after xTicksToWait ticks had passed. pdPASS
+ * will be returned if the command was successfully sent to the timer command
+ * queue. When the command is actually processed will depend on the priority of
+ * the timer service/daemon task relative to other tasks in the system. The
+ * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY
+ * configuration constant.
+ *
+ * Example usage:
+ *
+ * See the xTimerCreate() API function example usage scenario.
+ *
+ */
+#define xTimerStop(xTimer, xTicksToWait) \
+ xTimerGenericCommand((xTimer), tmrCOMMAND_STOP, 0U, NULL, (xTicksToWait))
+
+/**
+ * BaseType_t xTimerChangePeriod( TimerHandle_t xTimer,
+ * TickType_t xNewPeriod,
+ * TickType_t xTicksToWait );
+ *
+ * Timer functionality is provided by a timer service/daemon task. Many of the
+ * public FreeRTOS timer API functions send commands to the timer service task
+ * through a queue called the timer command queue. The timer command queue is
+ * private to the kernel itself and is not directly accessible to application
+ * code. The length of the timer command queue is set by the
+ * configTIMER_QUEUE_LENGTH configuration constant.
+ *
+ * xTimerChangePeriod() changes the period of a timer that was previously
+ * created using the xTimerCreate() API function.
+ *
+ * xTimerChangePeriod() can be called to change the period of an active or
+ * dormant state timer.
+ *
+ * The configUSE_TIMERS configuration constant must be set to 1 for
+ * xTimerChangePeriod() to be available.
+ *
+ * @param xTimer The handle of the timer that is having its period changed.
+ *
+ * @param xNewPeriod The new period for xTimer. Timer periods are specified in
+ * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a
+ *time that has been specified in milliseconds. For example, if the timer must
+ * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively,
+ * if the timer must expire after 500ms, then xNewPeriod can be set to
+ * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than
+ * or equal to 1000.
+ *
+ * @param xTicksToWait Specifies the time, in ticks, that the calling task
+ *should be held in the Blocked state to wait for the change period command to
+ *be successfully sent to the timer command queue, should the queue already be
+ * full when xTimerChangePeriod() was called. xTicksToWait is ignored if
+ * xTimerChangePeriod() is called before the scheduler is started.
+ *
+ * @return pdFAIL will be returned if the change period command could not be
+ * sent to the timer command queue even after xTicksToWait ticks had passed.
+ * pdPASS will be returned if the command was successfully sent to the timer
+ * command queue. When the command is actually processed will depend on the
+ * priority of the timer service/daemon task relative to other tasks in the
+ * system. The timer service/daemon task priority is set by the
+ * configTIMER_TASK_PRIORITY configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // This function assumes xTimer has already been created. If the timer
+ * // referenced by xTimer is already active when it is called, then the timer
+ * // is deleted. If the timer referenced by xTimer is not active when it is
+ * // called, then the period of the timer is set to 500ms and the timer is
+ * // started.
+ * void vAFunction( TimerHandle_t xTimer )
+ * {
+ * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and
+ *equivalently "if( xTimerIsTimerActive( xTimer ) )"
+ * {
+ * // xTimer is already active - delete it.
+ * xTimerDelete( xTimer );
+ * }
+ * else
+ * {
+ * // xTimer is not active, change its period to 500ms. This will also
+ * // cause the timer to start. Block for a maximum of 100 ticks if the
+ * // change period command cannot immediately be sent to the timer
+ * // command queue.
+ * if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) ==
+ *pdPASS )
+ * {
+ * // The command was successfully sent.
+ * }
+ * else
+ * {
+ * // The command could not be sent, even after waiting for 100
+ *ticks
+ * // to pass. Take appropriate action here.
+ * }
+ * }
+ * }
+ * @endverbatim
+ */
+#define xTimerChangePeriod(xTimer, xNewPeriod, xTicksToWait) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_CHANGE_PERIOD, \
+ (xNewPeriod), \
+ NULL, \
+ (xTicksToWait))
+
+/**
+ * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait );
+ *
+ * Timer functionality is provided by a timer service/daemon task. Many of the
+ * public FreeRTOS timer API functions send commands to the timer service task
+ * through a queue called the timer command queue. The timer command queue is
+ * private to the kernel itself and is not directly accessible to application
+ * code. The length of the timer command queue is set by the
+ * configTIMER_QUEUE_LENGTH configuration constant.
+ *
+ * xTimerDelete() deletes a timer that was previously created using the
+ * xTimerCreate() API function.
+ *
+ * The configUSE_TIMERS configuration constant must be set to 1 for
+ * xTimerDelete() to be available.
+ *
+ * @param xTimer The handle of the timer being deleted.
+ *
+ * @param xTicksToWait Specifies the time, in ticks, that the calling task
+ * should be held in the Blocked state to wait for the delete command to be
+ * successfully sent to the timer command queue, should the queue already be
+ * full when xTimerDelete() was called. xTicksToWait is ignored if
+ * xTimerDelete() is called before the scheduler is started.
+ *
+ * @return pdFAIL will be returned if the delete command could not be sent to
+ * the timer command queue even after xTicksToWait ticks had passed. pdPASS
+ * will be returned if the command was successfully sent to the timer command
+ * queue. When the command is actually processed will depend on the priority of
+ * the timer service/daemon task relative to other tasks in the system. The
+ * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY
+ * configuration constant.
+ *
+ * Example usage:
+ *
+ * See the xTimerChangePeriod() API function example usage scenario.
+ */
+#define xTimerDelete(xTimer, xTicksToWait) \
+ xTimerGenericCommand((xTimer), tmrCOMMAND_DELETE, 0U, NULL, (xTicksToWait))
+
+/**
+ * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait );
+ *
+ * Timer functionality is provided by a timer service/daemon task. Many of the
+ * public FreeRTOS timer API functions send commands to the timer service task
+ * through a queue called the timer command queue. The timer command queue is
+ * private to the kernel itself and is not directly accessible to application
+ * code. The length of the timer command queue is set by the
+ * configTIMER_QUEUE_LENGTH configuration constant.
+ *
+ * xTimerReset() re-starts a timer that was previously created using the
+ * xTimerCreate() API function. If the timer had already been started and was
+ * already in the active state, then xTimerReset() will cause the timer to
+ * re-evaluate its expiry time so that it is relative to when xTimerReset() was
+ * called. If the timer was in the dormant state then xTimerReset() has
+ * equivalent functionality to the xTimerStart() API function.
+ *
+ * Resetting a timer ensures the timer is in the active state. If the timer
+ * is not stopped, deleted, or reset in the mean time, the callback function
+ * associated with the timer will get called 'n' ticks after xTimerReset() was
+ * called, where 'n' is the timers defined period.
+ *
+ * It is valid to call xTimerReset() before the scheduler has been started, but
+ * when this is done the timer will not actually start until the scheduler is
+ * started, and the timers expiry time will be relative to when the scheduler is
+ * started, not relative to when xTimerReset() was called.
+ *
+ * The configUSE_TIMERS configuration constant must be set to 1 for
+ * xTimerReset() to be available.
+ *
+ * @param xTimer The handle of the timer being reset/started/restarted.
+ *
+ * @param xTicksToWait Specifies the time, in ticks, that the calling task
+ * should be held in the Blocked state to wait for the reset command to be
+ * successfully sent to the timer command queue, should the queue already be
+ * full when xTimerReset() was called. xTicksToWait is ignored if xTimerReset()
+ * is called before the scheduler is started.
+ *
+ * @return pdFAIL will be returned if the reset command could not be sent to
+ * the timer command queue even after xTicksToWait ticks had passed. pdPASS
+ * will be returned if the command was successfully sent to the timer command
+ * queue. When the command is actually processed will depend on the priority of
+ * the timer service/daemon task relative to other tasks in the system, although
+ * the timers expiry time is relative to when xTimerStart() is actually called.
+ * The timer service/daemon task priority is set by the
+ * configTIMER_TASK_PRIORITY configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // When a key is pressed, an LCD back-light is switched on. If 5 seconds
+ * pass
+ * // without a key being pressed, then the LCD back-light is switched off. In
+ * // this case, the timer is a one-shot timer.
+ *
+ * TimerHandle_t xBacklightTimer = NULL;
+ *
+ * // The callback function assigned to the one-shot timer. In this case the
+ * // parameter is not used.
+ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
+ * {
+ * // The timer expired, therefore 5 seconds must have passed since a key
+ * // was pressed. Switch off the LCD back-light.
+ * vSetBacklightState( BACKLIGHT_OFF );
+ * }
+ *
+ * // The key press event handler.
+ * void vKeyPressEventHandler( char cKey )
+ * {
+ * // Ensure the LCD back-light is on, then reset the timer that is
+ * // responsible for turning the back-light off after 5 seconds of
+ * // key inactivity. Wait 10 ticks for the command to be successfully sent
+ * // if it cannot be sent immediately.
+ * vSetBacklightState( BACKLIGHT_ON );
+ * if( xTimerReset( xBacklightTimer, 100 ) != pdPASS )
+ * {
+ * // The reset command was not executed successfully. Take appropriate
+ * // action here.
+ * }
+ *
+ * // Perform the rest of the key processing here.
+ * }
+ *
+ * void main( void )
+ * {
+ * int32_t x;
+ *
+ * // Create then start the one-shot timer that is responsible for turning
+ * // the back-light off if no keys are pressed within a 5 second period.
+ * xBacklightTimer = xTimerCreate( "BacklightTimer", // Just a
+ * text name, not used by the kernel. ( 5000 / portTICK_PERIOD_MS), // The timer
+ * period in ticks. pdFALSE, // The timer is a one-shot
+ * timer. 0, // The id is not used by the callback so
+ * can take any value. vBacklightTimerCallback // The callback function that
+ * switches the LCD back-light off.
+ * );
+ *
+ * if( xBacklightTimer == NULL )
+ * {
+ * // The timer was not created.
+ * }
+ * else
+ * {
+ * // Start the timer. No block time is specified, and even if one was
+ * // it would be ignored because the scheduler has not yet been
+ * // started.
+ * if( xTimerStart( xBacklightTimer, 0 ) != pdPASS )
+ * {
+ * // The timer could not be set into the Active state.
+ * }
+ * }
+ *
+ * // ...
+ * // Create tasks here.
+ * // ...
+ *
+ * // Starting the scheduler will start the timer running as it has already
+ * // been set into the active state.
+ * vTaskStartScheduler();
+ *
+ * // Should not reach here.
+ * for( ;; );
+ * }
+ * @endverbatim
+ */
+#define xTimerReset(xTimer, xTicksToWait) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_RESET, \
+ (xTaskGetTickCount()), \
+ NULL, \
+ (xTicksToWait))
+
+/**
+ * BaseType_t xTimerStartFromISR( TimerHandle_t xTimer,
+ * BaseType_t *pxHigherPriorityTaskWoken );
+ *
+ * A version of xTimerStart() that can be called from an interrupt service
+ * routine.
+ *
+ * @param xTimer The handle of the timer being started/restarted.
+ *
+ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
+ * of its time in the Blocked state, waiting for messages to arrive on the timer
+ * command queue. Calling xTimerStartFromISR() writes a message to the timer
+ * command queue, so has the potential to transition the timer service/daemon
+ * task out of the Blocked state. If calling xTimerStartFromISR() causes the
+ * timer service/daemon task to leave the Blocked state, and the timer service/
+ * daemon task has a priority equal to or greater than the currently executing
+ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
+ * get set to pdTRUE internally within the xTimerStartFromISR() function. If
+ * xTimerStartFromISR() sets this value to pdTRUE then a context switch should
+ * be performed before the interrupt exits.
+ *
+ * @return pdFAIL will be returned if the start command could not be sent to
+ * the timer command queue. pdPASS will be returned if the command was
+ * successfully sent to the timer command queue. When the command is actually
+ * processed will depend on the priority of the timer service/daemon task
+ * relative to other tasks in the system, although the timers expiry time is
+ * relative to when xTimerStartFromISR() is actually called. The timer
+ * service/daemon task priority is set by the configTIMER_TASK_PRIORITY
+ * configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // This scenario assumes xBacklightTimer has already been created. When a
+ * // key is pressed, an LCD back-light is switched on. If 5 seconds pass
+ * // without a key being pressed, then the LCD back-light is switched off. In
+ * // this case, the timer is a one-shot timer, and unlike the example given for
+ * // the xTimerReset() function, the key press event handler is an interrupt
+ * // service routine.
+ *
+ * // The callback function assigned to the one-shot timer. In this case the
+ * // parameter is not used.
+ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
+ * {
+ * // The timer expired, therefore 5 seconds must have passed since a key
+ * // was pressed. Switch off the LCD back-light.
+ * vSetBacklightState( BACKLIGHT_OFF );
+ * }
+ *
+ * // The key press interrupt service routine.
+ * void vKeyPressEventInterruptHandler( void )
+ * {
+ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+ *
+ * // Ensure the LCD back-light is on, then restart the timer that is
+ * // responsible for turning the back-light off after 5 seconds of
+ * // key inactivity. This is an interrupt service routine so can only
+ * // call FreeRTOS API functions that end in "FromISR".
+ * vSetBacklightState( BACKLIGHT_ON );
+ *
+ * // xTimerStartFromISR() or xTimerResetFromISR() could be called here
+ * // as both cause the timer to re-calculate its expiry time.
+ * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was
+ * // declared (in this function).
+ * if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) !=
+ *pdPASS )
+ * {
+ * // The start command was not executed successfully. Take appropriate
+ * // action here.
+ * }
+ *
+ * // Perform the rest of the key processing here.
+ *
+ * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
+ * // should be performed. The syntax required to perform a context switch
+ * // from inside an ISR varies from port to port, and from compiler to
+ * // compiler. Inspect the demos for the port you are using to find the
+ * // actual syntax required.
+ * if( xHigherPriorityTaskWoken != pdFALSE )
+ * {
+ * // Call the interrupt safe yield function here (actual function
+ * // depends on the FreeRTOS port being used).
+ * }
+ * }
+ * @endverbatim
+ */
+#define xTimerStartFromISR(xTimer, pxHigherPriorityTaskWoken) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_START_FROM_ISR, \
+ (xTaskGetTickCountFromISR()), \
+ (pxHigherPriorityTaskWoken), \
+ 0U)
+
+/**
+ * BaseType_t xTimerStopFromISR( TimerHandle_t xTimer,
+ * BaseType_t *pxHigherPriorityTaskWoken );
+ *
+ * A version of xTimerStop() that can be called from an interrupt service
+ * routine.
+ *
+ * @param xTimer The handle of the timer being stopped.
+ *
+ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
+ * of its time in the Blocked state, waiting for messages to arrive on the timer
+ * command queue. Calling xTimerStopFromISR() writes a message to the timer
+ * command queue, so has the potential to transition the timer service/daemon
+ * task out of the Blocked state. If calling xTimerStopFromISR() causes the
+ * timer service/daemon task to leave the Blocked state, and the timer service/
+ * daemon task has a priority equal to or greater than the currently executing
+ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
+ * get set to pdTRUE internally within the xTimerStopFromISR() function. If
+ * xTimerStopFromISR() sets this value to pdTRUE then a context switch should
+ * be performed before the interrupt exits.
+ *
+ * @return pdFAIL will be returned if the stop command could not be sent to
+ * the timer command queue. pdPASS will be returned if the command was
+ * successfully sent to the timer command queue. When the command is actually
+ * processed will depend on the priority of the timer service/daemon task
+ * relative to other tasks in the system. The timer service/daemon task
+ * priority is set by the configTIMER_TASK_PRIORITY configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // This scenario assumes xTimer has already been created and started. When
+ * // an interrupt occurs, the timer should be simply stopped.
+ *
+ * // The interrupt service routine that stops the timer.
+ * void vAnExampleInterruptServiceRoutine( void )
+ * {
+ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+ *
+ * // The interrupt has occurred - simply stop the timer.
+ * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined
+ * // (within this function). As this is an interrupt service routine, only
+ * // FreeRTOS API functions that end in "FromISR" can be used.
+ * if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS )
+ * {
+ * // The stop command was not executed successfully. Take appropriate
+ * // action here.
+ * }
+ *
+ * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
+ * // should be performed. The syntax required to perform a context switch
+ * // from inside an ISR varies from port to port, and from compiler to
+ * // compiler. Inspect the demos for the port you are using to find the
+ * // actual syntax required.
+ * if( xHigherPriorityTaskWoken != pdFALSE )
+ * {
+ * // Call the interrupt safe yield function here (actual function
+ * // depends on the FreeRTOS port being used).
+ * }
+ * }
+ * @endverbatim
+ */
+#define xTimerStopFromISR(xTimer, pxHigherPriorityTaskWoken) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_STOP_FROM_ISR, \
+ 0, \
+ (pxHigherPriorityTaskWoken), \
+ 0U)
+
+/**
+ * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer,
+ * TickType_t xNewPeriod,
+ * BaseType_t *pxHigherPriorityTaskWoken
+ *);
+ *
+ * A version of xTimerChangePeriod() that can be called from an interrupt
+ * service routine.
+ *
+ * @param xTimer The handle of the timer that is having its period changed.
+ *
+ * @param xNewPeriod The new period for xTimer. Timer periods are specified in
+ * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a
+ *time that has been specified in milliseconds. For example, if the timer must
+ * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively,
+ * if the timer must expire after 500ms, then xNewPeriod can be set to
+ * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than
+ * or equal to 1000.
+ *
+ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
+ * of its time in the Blocked state, waiting for messages to arrive on the timer
+ * command queue. Calling xTimerChangePeriodFromISR() writes a message to the
+ * timer command queue, so has the potential to transition the timer service/
+ * daemon task out of the Blocked state. If calling xTimerChangePeriodFromISR()
+ * causes the timer service/daemon task to leave the Blocked state, and the
+ * timer service/daemon task has a priority equal to or greater than the
+ * currently executing task (the task that was interrupted), then
+ * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the
+ * xTimerChangePeriodFromISR() function. If xTimerChangePeriodFromISR() sets
+ * this value to pdTRUE then a context switch should be performed before the
+ * interrupt exits.
+ *
+ * @return pdFAIL will be returned if the command to change the timers period
+ * could not be sent to the timer command queue. pdPASS will be returned if the
+ * command was successfully sent to the timer command queue. When the command
+ * is actually processed will depend on the priority of the timer service/daemon
+ * task relative to other tasks in the system. The timer service/daemon task
+ * priority is set by the configTIMER_TASK_PRIORITY configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // This scenario assumes xTimer has already been created and started. When
+ * // an interrupt occurs, the period of xTimer should be changed to 500ms.
+ *
+ * // The interrupt service routine that changes the period of xTimer.
+ * void vAnExampleInterruptServiceRoutine( void )
+ * {
+ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+ *
+ * // The interrupt has occurred - change the period of xTimer to 500ms.
+ * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined
+ * // (within this function). As this is an interrupt service routine, only
+ * // FreeRTOS API functions that end in "FromISR" can be used.
+ * if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) !=
+ *pdPASS )
+ * {
+ * // The command to change the timers period was not executed
+ * // successfully. Take appropriate action here.
+ * }
+ *
+ * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
+ * // should be performed. The syntax required to perform a context switch
+ * // from inside an ISR varies from port to port, and from compiler to
+ * // compiler. Inspect the demos for the port you are using to find the
+ * // actual syntax required.
+ * if( xHigherPriorityTaskWoken != pdFALSE )
+ * {
+ * // Call the interrupt safe yield function here (actual function
+ * // depends on the FreeRTOS port being used).
+ * }
+ * }
+ * @endverbatim
+ */
+#define xTimerChangePeriodFromISR( \
+ xTimer, xNewPeriod, pxHigherPriorityTaskWoken) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, \
+ (xNewPeriod), \
+ (pxHigherPriorityTaskWoken), \
+ 0U)
+
+/**
+ * BaseType_t xTimerResetFromISR( TimerHandle_t xTimer,
+ * BaseType_t *pxHigherPriorityTaskWoken );
+ *
+ * A version of xTimerReset() that can be called from an interrupt service
+ * routine.
+ *
+ * @param xTimer The handle of the timer that is to be started, reset, or
+ * restarted.
+ *
+ * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most
+ * of its time in the Blocked state, waiting for messages to arrive on the timer
+ * command queue. Calling xTimerResetFromISR() writes a message to the timer
+ * command queue, so has the potential to transition the timer service/daemon
+ * task out of the Blocked state. If calling xTimerResetFromISR() causes the
+ * timer service/daemon task to leave the Blocked state, and the timer service/
+ * daemon task has a priority equal to or greater than the currently executing
+ * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will
+ * get set to pdTRUE internally within the xTimerResetFromISR() function. If
+ * xTimerResetFromISR() sets this value to pdTRUE then a context switch should
+ * be performed before the interrupt exits.
+ *
+ * @return pdFAIL will be returned if the reset command could not be sent to
+ * the timer command queue. pdPASS will be returned if the command was
+ * successfully sent to the timer command queue. When the command is actually
+ * processed will depend on the priority of the timer service/daemon task
+ * relative to other tasks in the system, although the timers expiry time is
+ * relative to when xTimerResetFromISR() is actually called. The timer
+ *service/daemon task priority is set by the configTIMER_TASK_PRIORITY
+ *configuration constant.
+ *
+ * Example usage:
+ * @verbatim
+ * // This scenario assumes xBacklightTimer has already been created. When a
+ * // key is pressed, an LCD back-light is switched on. If 5 seconds pass
+ * // without a key being pressed, then the LCD back-light is switched off. In
+ * // this case, the timer is a one-shot timer, and unlike the example given for
+ * // the xTimerReset() function, the key press event handler is an interrupt
+ * // service routine.
+ *
+ * // The callback function assigned to the one-shot timer. In this case the
+ * // parameter is not used.
+ * void vBacklightTimerCallback( TimerHandle_t pxTimer )
+ * {
+ * // The timer expired, therefore 5 seconds must have passed since a key
+ * // was pressed. Switch off the LCD back-light.
+ * vSetBacklightState( BACKLIGHT_OFF );
+ * }
+ *
+ * // The key press interrupt service routine.
+ * void vKeyPressEventInterruptHandler( void )
+ * {
+ * BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+ *
+ * // Ensure the LCD back-light is on, then reset the timer that is
+ * // responsible for turning the back-light off after 5 seconds of
+ * // key inactivity. This is an interrupt service routine so can only
+ * // call FreeRTOS API functions that end in "FromISR".
+ * vSetBacklightState( BACKLIGHT_ON );
+ *
+ * // xTimerStartFromISR() or xTimerResetFromISR() could be called here
+ * // as both cause the timer to re-calculate its expiry time.
+ * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was
+ * // declared (in this function).
+ * if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) !=
+ *pdPASS )
+ * {
+ * // The reset command was not executed successfully. Take appropriate
+ * // action here.
+ * }
+ *
+ * // Perform the rest of the key processing here.
+ *
+ * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch
+ * // should be performed. The syntax required to perform a context switch
+ * // from inside an ISR varies from port to port, and from compiler to
+ * // compiler. Inspect the demos for the port you are using to find the
+ * // actual syntax required.
+ * if( xHigherPriorityTaskWoken != pdFALSE )
+ * {
+ * // Call the interrupt safe yield function here (actual function
+ * // depends on the FreeRTOS port being used).
+ * }
+ * }
+ * @endverbatim
+ */
+#define xTimerResetFromISR(xTimer, pxHigherPriorityTaskWoken) \
+ xTimerGenericCommand( \
+ (xTimer), \
+ tmrCOMMAND_RESET_FROM_ISR, \
+ (xTaskGetTickCountFromISR()), \
+ (pxHigherPriorityTaskWoken), \
+ 0U)
+
+/**
+ * BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend,
+ * void *pvParameter1,
+ * uint32_t ulParameter2,
+ * BaseType_t
+ **pxHigherPriorityTaskWoken );
+ *
+ *
+ * Used from application interrupt service routines to defer the execution of a
+ * function to the RTOS daemon task (the timer service task, hence this function
+ * is implemented in timers.c and is prefixed with 'Timer').
+ *
+ * Ideally an interrupt service routine (ISR) is kept as short as possible, but
+ * sometimes an ISR either has a lot of processing to do, or needs to perform
+ * processing that is not deterministic. In these cases
+ * xTimerPendFunctionCallFromISR() can be used to defer processing of a function
+ * to the RTOS daemon task.
+ *
+ * A mechanism is provided that allows the interrupt to return directly to the
+ * task that will subsequently execute the pended callback function. This
+ * allows the callback function to execute contiguously in time with the
+ * interrupt - just as if the callback had executed in the interrupt itself.
+ *
+ * @param xFunctionToPend The function to execute from the timer service/
+ * daemon task. The function must conform to the PendedFunction_t
+ * prototype.
+ *
+ * @param pvParameter1 The value of the callback function's first parameter.
+ * The parameter has a void * type to allow it to be used to pass any type.
+ * For example, unsigned longs can be cast to a void *, or the void * can be
+ * used to point to a structure.
+ *
+ * @param ulParameter2 The value of the callback function's second parameter.
+ *
+ * @param pxHigherPriorityTaskWoken As mentioned above, calling this function
+ * will result in a message being sent to the timer daemon task. If the
+ * priority of the timer daemon task (which is set using
+ * configTIMER_TASK_PRIORITY in FreeRTOSConfig.h) is higher than the priority of
+ * the currently running task (the task the interrupt interrupted) then
+ * *pxHigherPriorityTaskWoken will be set to pdTRUE within
+ * xTimerPendFunctionCallFromISR(), indicating that a context switch should be
+ * requested before the interrupt exits. For that reason
+ * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the
+ * example code below.
+ *
+ * @return pdPASS is returned if the message was successfully sent to the
+ * timer daemon task, otherwise pdFALSE is returned.
+ *
+ * Example usage:
+ * @verbatim
+ *
+ * // The callback function that will execute in the context of the daemon
+ *task.
+ * // Note callback functions must all use this same prototype.
+ * void vProcessInterface( void *pvParameter1, uint32_t ulParameter2 )
+ * {
+ * BaseType_t xInterfaceToService;
+ *
+ * // The interface that requires servicing is passed in the second
+ * // parameter. The first parameter is not used in this case.
+ * xInterfaceToService = ( BaseType_t ) ulParameter2;
+ *
+ * // ...Perform the processing here...
+ * }
+ *
+ * // An ISR that receives data packets from multiple interfaces
+ * void vAnISR( void )
+ * {
+ * BaseType_t xInterfaceToService, xHigherPriorityTaskWoken;
+ *
+ * // Query the hardware to determine which interface needs processing.
+ * xInterfaceToService = prvCheckInterfaces();
+ *
+ * // The actual processing is to be deferred to a task. Request the
+ * // vProcessInterface() callback function is executed, passing in the
+ * // number of the interface that needs processing. The interface to
+ * // service is passed in the second parameter. The first parameter is
+ * // not used in this case.
+ * xHigherPriorityTaskWoken = pdFALSE;
+ * xTimerPendFunctionCallFromISR( vProcessInterface, NULL, ( uint32_t )
+ *xInterfaceToService, &xHigherPriorityTaskWoken );
+ *
+ * // If xHigherPriorityTaskWoken is now set to pdTRUE then a context
+ * // switch should be requested. The macro used is port specific and will
+ * // be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - refer to
+ * // the documentation page for the port being used.
+ * portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+ *
+ * }
+ * @endverbatim
+ */
+BaseType_t xTimerPendFunctionCallFromISR(
+ PendedFunction_t xFunctionToPend,
+ void *pvParameter1,
+ uint32_t ulParameter2,
+ BaseType_t *pxHigherPriorityTaskWoken) PRIVILEGED_FUNCTION;
+
+/**
+ * BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend,
+ * void *pvParameter1,
+ * uint32_t ulParameter2,
+ * TickType_t xTicksToWait );
+ *
+ *
+ * Used to defer the execution of a function to the RTOS daemon task (the timer
+ * service task, hence this function is implemented in timers.c and is prefixed
+ * with 'Timer').
+ *
+ * @param xFunctionToPend The function to execute from the timer service/
+ * daemon task. The function must conform to the PendedFunction_t
+ * prototype.
+ *
+ * @param pvParameter1 The value of the callback function's first parameter.
+ * The parameter has a void * type to allow it to be used to pass any type.
+ * For example, unsigned longs can be cast to a void *, or the void * can be
+ * used to point to a structure.
+ *
+ * @param ulParameter2 The value of the callback function's second parameter.
+ *
+ * @param xTicksToWait Calling this function will result in a message being
+ * sent to the timer daemon task on a queue. xTicksToWait is the amount of
+ * time the calling task should remain in the Blocked state (so not using any
+ * processing time) for space to become available on the timer queue if the
+ * queue is found to be full.
+ *
+ * @return pdPASS is returned if the message was successfully sent to the
+ * timer daemon task, otherwise pdFALSE is returned.
+ *
+ */
+BaseType_t xTimerPendFunctionCall(
+ PendedFunction_t xFunctionToPend,
+ void *pvParameter1,
+ uint32_t ulParameter2,
+ TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+/**
+ * const char * const pcTimerGetName( TimerHandle_t xTimer );
+ *
+ * Returns the name that was assigned to a timer when the timer was created.
+ *
+ * @param xTimer The handle of the timer being queried.
+ *
+ * @return The name assigned to the timer specified by the xTimer parameter.
+ */
+const char *pcTimerGetName(TimerHandle_t xTimer)
+ PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+
+/**
+ * void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t
+ * uxAutoReload );
+ *
+ * Updates a timer to be either an auto-reload timer, in which case the timer
+ * automatically resets itself each time it expires, or a one-shot timer, in
+ * which case the timer will only expire once unless it is manually restarted.
+ *
+ * @param xTimer The handle of the timer being updated.
+ *
+ * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will
+ * expire repeatedly with a frequency set by the timer's period (see the
+ * xTimerPeriodInTicks parameter of the xTimerCreate() API function). If
+ * uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and
+ * enter the dormant state after it expires.
+ */
+void vTimerSetReloadMode(TimerHandle_t xTimer, const UBaseType_t uxAutoReload)
+ PRIVILEGED_FUNCTION;
+
+/**
+ * UBaseType_t uxTimerGetReloadMode( TimerHandle_t xTimer );
+ *
+ * Queries a timer to determine if it is an auto-reload timer, in which case the
+ * timer automatically resets itself each time it expires, or a one-shot timer,
+ * in which case the timer will only expire once unless it is manually
+ * restarted.
+ *
+ * @param xTimer The handle of the timer being queried.
+ *
+ * @return If the timer is an auto-reload timer then pdTRUE is returned,
+ * otherwise pdFALSE is returned.
+ */
+UBaseType_t uxTimerGetReloadMode(TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+
+/**
+ * TickType_t xTimerGetPeriod( TimerHandle_t xTimer );
+ *
+ * Returns the period of a timer.
+ *
+ * @param xTimer The handle of the timer being queried.
+ *
+ * @return The period of the timer in ticks.
+ */
+TickType_t xTimerGetPeriod(TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+
+/**
+ * TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer );
+ *
+ * Returns the time in ticks at which the timer will expire. If this is less
+ * than the current tick count then the expiry time has overflowed from the
+ * current time.
+ *
+ * @param xTimer The handle of the timer being queried.
+ *
+ * @return If the timer is running then the time in ticks at which the timer
+ * will next expire is returned. If the timer is not running then the return
+ * value is undefined.
+ */
+TickType_t xTimerGetExpiryTime(TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+
+/*
+ * Functions beyond this part are not part of the public API and are intended
+ * for use by the kernel only.
+ */
+BaseType_t xTimerCreateTimerTask(void) PRIVILEGED_FUNCTION;
+BaseType_t xTimerGenericCommand(
+ TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t *const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait) PRIVILEGED_FUNCTION;
+
+#if (configUSE_TRACE_FACILITY == 1)
+void vTimerSetTimerNumber(TimerHandle_t xTimer, UBaseType_t uxTimerNumber)
+ PRIVILEGED_FUNCTION;
+UBaseType_t uxTimerGetTimerNumber(TimerHandle_t xTimer) PRIVILEGED_FUNCTION;
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* TIMERS_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/list.c b/product/rcar/src/CMSIS-FreeRTOS/Source/list.c
new file mode 100644
index 00000000..61d4fcad
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/list.c
@@ -0,0 +1,205 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#include "FreeRTOS.h"
+#include "list.h"
+
+#include <stdlib.h>
+
+/*-----------------------------------------------------------
+ * PUBLIC LIST API documented in list.h
+ *----------------------------------------------------------*/
+
+void vListInitialise(List_t *const pxList)
+{
+ /* The list structure contains a list item which is used to mark the
+ end of the list. To initialise the list the list end is inserted
+ as the only list entry. */
+ pxList->pxIndex = (ListItem_t *)&(
+ pxList->xListEnd); /*lint !e826 !e740 !e9087 The mini list structure is
+ used as the list end to save RAM. This is checked
+ and valid. */
+
+ /* The list end value is the highest possible value in the list to
+ ensure it remains at the end of the list. */
+ pxList->xListEnd.xItemValue = portMAX_DELAY;
+
+ /* The list end next and previous pointers point to itself so we know
+ when the list is empty. */
+ pxList->xListEnd.pxNext = (ListItem_t *)&(
+ pxList->xListEnd); /*lint !e826 !e740 !e9087 The mini list structure is
+ used as the list end to save RAM. This is checked
+ and valid. */
+ pxList->xListEnd.pxPrevious = (ListItem_t *)&(
+ pxList->xListEnd); /*lint !e826 !e740 !e9087 The mini list structure is
+ used as the list end to save RAM. This is checked
+ and valid. */
+
+ pxList->uxNumberOfItems = (UBaseType_t)0U;
+
+ /* Write known values into the list if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ listSET_LIST_INTEGRITY_CHECK_1_VALUE(pxList);
+ listSET_LIST_INTEGRITY_CHECK_2_VALUE(pxList);
+}
+/*-----------------------------------------------------------*/
+
+void vListInitialiseItem(ListItem_t *const pxItem)
+{
+ /* Make sure the list item is not recorded as being on a list. */
+ pxItem->pxContainer = NULL;
+
+ /* Write known values into the list item if
+ configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
+ listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem);
+ listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE(pxItem);
+}
+/*-----------------------------------------------------------*/
+
+void vListInsertEnd(List_t *const pxList, ListItem_t *const pxNewListItem)
+{
+ ListItem_t *const pxIndex = pxList->pxIndex;
+
+ /* Only effective when configASSERT() is also defined, these tests may catch
+ the list data structures being overwritten in memory. They will not catch
+ data errors caused by incorrect configuration or use of FreeRTOS. */
+ listTEST_LIST_INTEGRITY(pxList);
+ listTEST_LIST_ITEM_INTEGRITY(pxNewListItem);
+
+ /* Insert a new list item into pxList, but rather than sort the list,
+ makes the new list item the last item to be removed by a call to
+ listGET_OWNER_OF_NEXT_ENTRY(). */
+ pxNewListItem->pxNext = pxIndex;
+ pxNewListItem->pxPrevious = pxIndex->pxPrevious;
+
+ /* Only used during decision coverage testing. */
+ mtCOVERAGE_TEST_DELAY();
+
+ pxIndex->pxPrevious->pxNext = pxNewListItem;
+ pxIndex->pxPrevious = pxNewListItem;
+
+ /* Remember which list the item is in. */
+ pxNewListItem->pxContainer = pxList;
+
+ (pxList->uxNumberOfItems)++;
+}
+/*-----------------------------------------------------------*/
+
+void vListInsert(List_t *const pxList, ListItem_t *const pxNewListItem)
+{
+ ListItem_t *pxIterator;
+ const TickType_t xValueOfInsertion = pxNewListItem->xItemValue;
+
+ /* Only effective when configASSERT() is also defined, these tests may catch
+ the list data structures being overwritten in memory. They will not catch
+ data errors caused by incorrect configuration or use of FreeRTOS. */
+ listTEST_LIST_INTEGRITY(pxList);
+ listTEST_LIST_ITEM_INTEGRITY(pxNewListItem);
+
+ /* Insert the new list item into the list, sorted in xItemValue order.
+
+ If the list already contains a list item with the same item value then the
+ new list item should be placed after it. This ensures that TCBs which are
+ stored in ready lists (all of which have the same xItemValue value) get a
+ share of the CPU. However, if the xItemValue is the same as the back marker
+ the iteration loop below will not end. Therefore the value is checked
+ first, and the algorithm slightly modified if necessary. */
+ if (xValueOfInsertion == portMAX_DELAY) {
+ pxIterator = pxList->xListEnd.pxPrevious;
+ } else {
+ /* *** NOTE ***********************************************************
+ If you find your application is crashing here then likely causes are
+ listed below. In addition see https://www.freertos.org/FAQHelp.html for
+ more tips, and ensure configASSERT() is defined!
+ https://www.freertos.org/a00110.html#configASSERT
+
+ 1) Stack overflow -
+ see
+ https://www.freertos.org/Stacks-and-stack-overflow-checking.html 2)
+ Incorrect interrupt priority assignment, especially on Cortex-M parts
+ where numerically high priority values denote low actual interrupt
+ priorities, which can seem counter intuitive. See
+ https://www.freertos.org/RTOS-Cortex-M3-M4.html and the
+ definition of configMAX_SYSCALL_INTERRUPT_PRIORITY on
+ https://www.freertos.org/a00110.html
+ 3) Calling an API function from within a critical section or when
+ the scheduler is suspended, or calling an API function that does
+ not end in "FromISR" from an interrupt.
+ 4) Using a queue or semaphore before it has been initialised or
+ before the scheduler has been started (are interrupts firing
+ before vTaskStartScheduler() has been called?).
+ **********************************************************************/
+
+ for (
+ pxIterator = (ListItem_t *)&(pxList->xListEnd); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator =
+ pxIterator
+ ->pxNext) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ /*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */
+ {
+ /* There is nothing to do here, just iterating to the wanted
+ insertion position. */
+ }
+ }
+
+ pxNewListItem->pxNext = pxIterator->pxNext;
+ pxNewListItem->pxNext->pxPrevious = pxNewListItem;
+ pxNewListItem->pxPrevious = pxIterator;
+ pxIterator->pxNext = pxNewListItem;
+
+ /* Remember which list the item is in. This allows fast removal of the
+ item later. */
+ pxNewListItem->pxContainer = pxList;
+
+ (pxList->uxNumberOfItems)++;
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxListRemove(ListItem_t *const pxItemToRemove)
+{
+ /* The list item knows which list it is in. Obtain the list from the list
+ item. */
+ List_t *const pxList = pxItemToRemove->pxContainer;
+
+ pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
+ pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
+
+ /* Only used during decision coverage testing. */
+ mtCOVERAGE_TEST_DELAY();
+
+ /* Make sure the index is left pointing to a valid item. */
+ if (pxList->pxIndex == pxItemToRemove) {
+ pxList->pxIndex = pxItemToRemove->pxPrevious;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ pxItemToRemove->pxContainer = NULL;
+ (pxList->uxNumberOfItems)--;
+
+ return pxList->uxNumberOfItems;
+}
+/*-----------------------------------------------------------*/
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/port.c b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/port.c
new file mode 100644
index 00000000..663b758e
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/port.c
@@ -0,0 +1,299 @@
+/*
+ * FreeRTOS Kernel V10.2.0
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/* Scheduler includes. */
+#include "FreeRTOS.h"
+#include "task.h"
+
+#ifndef configSETUP_TICK_INTERRUPT
+# error configSETUP_TICK_INTERRUPT() must be defined. See http://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors.html
+#endif /* configSETUP_TICK_INTERRUPT */
+
+/* Some vendor specific files default configCLEAR_TICK_INTERRUPT() in
+portmacro.h. */
+#ifndef configCLEAR_TICK_INTERRUPT
+# define configCLEAR_TICK_INTERRUPT()
+#endif
+
+/* A critical section is exited when the critical section nesting count reaches
+this value. */
+#define portNO_CRITICAL_NESTING ((size_t)0)
+
+/* Tasks are not created with a floating point context, but can be given a
+floating point context after they have been created. A variable is stored as
+part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
+does not have an FPU context, or any other value if the task does have an FPU
+context. */
+#define portNO_FLOATING_POINT_CONTEXT ((StackType_t)0)
+
+/* Constants required to setup the initial task context. */
+#define portSP_ELx ((StackType_t)0x01)
+#define portSP_EL0 ((StackType_t)0x00)
+
+#define portEL1 ((StackType_t)0x04)
+#define portEL3 ((StackType_t)0x0C)
+#if 0
+# define portINITIAL_PSTATE (portEL1 | portSP_EL0)
+#else
+# define portINITIAL_PSTATE (portEL3 | portSP_EL0)
+#endif
+
+/* Masks all bits in the APSR other than the mode bits. */
+#define portAPSR_MODE_BITS_MASK (0x0C)
+
+/* Used in the ASM code. */
+__attribute__((used)) const uint64_t ulCORE0_INT_SRC = 0x40000060;
+
+/*-----------------------------------------------------------*/
+
+/*
+ * Starts the first task executing. This function is necessarily written in
+ * assembly code so is implemented in portASM.s.
+ */
+extern void vPortRestoreTaskContext(void);
+
+/*-----------------------------------------------------------*/
+
+/* A variable is used to keep track of the critical section nesting. This
+variable has to be stored as part of the task context and must be initialised to
+a non zero value to ensure interrupts don't inadvertently become unmasked before
+the scheduler starts. As it is stored as part of the task context it will
+automatically be set to 0 when the first task is started. */
+volatile uint64_t ullCriticalNesting = 9999ULL;
+
+/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
+then floating point context must be saved and restored for the task. */
+uint64_t ullPortTaskHasFPUContext = pdFALSE;
+
+/* Set to 1 to pend a context switch from an ISR. */
+uint64_t ullPortYieldRequired = pdFALSE;
+
+/* Counts the interrupt nesting depth. A context switch is only performed if
+if the nesting depth is 0. */
+uint64_t ullPortInterruptNesting = 0;
+
+/*-----------------------------------------------------------*/
+/*
+ * See header file for description.
+ */
+StackType_t *pxPortInitialiseStack(
+ StackType_t *pxTopOfStack,
+ TaskFunction_t pxCode,
+ void *pvParameters)
+{
+ /* Setup the initial stack of the task. The stack is set exactly as
+ expected by the portRESTORE_CONTEXT() macro. */
+
+ /* First all the general purpose registers. */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0101010101010101ULL; /* R1 */
+ pxTopOfStack--;
+ *pxTopOfStack = (StackType_t)pvParameters; /* R0 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0303030303030303ULL; /* R3 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0202020202020202ULL; /* R2 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0505050505050505ULL; /* R5 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0404040404040404ULL; /* R4 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0707070707070707ULL; /* R7 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0606060606060606ULL; /* R6 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0909090909090909ULL; /* R9 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x0808080808080808ULL; /* R8 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1111111111111111ULL; /* R11 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1010101010101010ULL; /* R10 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1313131313131313ULL; /* R13 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1212121212121212ULL; /* R12 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1515151515151515ULL; /* R15 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1414141414141414ULL; /* R14 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1717171717171717ULL; /* R17 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1616161616161616ULL; /* R16 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1919191919191919ULL; /* R19 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x1818181818181818ULL; /* R18 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2121212121212121ULL; /* R21 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2020202020202020ULL; /* R20 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2323232323232323ULL; /* R23 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2222222222222222ULL; /* R22 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2525252525252525ULL; /* R25 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2424242424242424ULL; /* R24 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2727272727272727ULL; /* R27 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2626262626262626ULL; /* R26 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2929292929292929ULL; /* R29 */
+ pxTopOfStack--;
+ *pxTopOfStack = 0x2828282828282828ULL; /* R28 */
+ pxTopOfStack--;
+ *pxTopOfStack = (StackType_t)0x00; /* XZR */
+ /* - has no effect, used so there are an even number of registers. */
+ pxTopOfStack--;
+ *pxTopOfStack = (StackType_t)0x00; /* R30 */
+ /* - procedure call link register. */
+ pxTopOfStack--;
+
+ *pxTopOfStack = portINITIAL_PSTATE;
+ pxTopOfStack--;
+
+ *pxTopOfStack = (StackType_t)pxCode; /* Exception return address. */
+ pxTopOfStack--;
+
+ /* The task will start with a critical nesting count of 0 as interrupts are
+ enabled. */
+ *pxTopOfStack = portNO_CRITICAL_NESTING;
+ pxTopOfStack--;
+
+ /* The task will start without a floating point context. A task that uses
+ the floating point hardware must call vPortTaskUsesFPU() before executing
+ any floating point instructions. */
+ *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
+
+ return pxTopOfStack;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xPortStartScheduler(void)
+{
+ uint32_t ulAPSR;
+
+ /* At the time of writing, the BSP only supports EL3. */
+ __asm volatile("MRS %0, CurrentEL" : "=r"(ulAPSR));
+ ulAPSR &= portAPSR_MODE_BITS_MASK;
+
+ configASSERT(ulAPSR == portEL3);
+ if (ulAPSR == portEL3) {
+ {
+ /* Interrupts are turned off in the CPU itself to ensure a tick does
+ not execute while the scheduler is being started. Interrupts are
+ automatically turned back on in the CPU when the first task starts
+ executing. */
+ portDISABLE_INTERRUPTS();
+
+ /* Start the timer that generates the tick ISR. */
+ configSETUP_TICK_INTERRUPT();
+
+ /* Start the first task executing. */
+ vPortRestoreTaskContext();
+ }
+ }
+
+ return 0;
+}
+/*-----------------------------------------------------------*/
+
+void vPortEndScheduler(void)
+{
+ /* Not implemented in ports where there is nothing to return to.
+ Artificially force an assert. */
+ configASSERT(ullCriticalNesting == 1000ULL);
+}
+/*-----------------------------------------------------------*/
+
+void vPortEnterCritical(void)
+{
+ portDISABLE_INTERRUPTS();
+
+ /* Now interrupts are disabled ullCriticalNesting can be accessed
+ directly. Increment ullCriticalNesting to keep a count of how many times
+ portENTER_CRITICAL() has been called. */
+ ullCriticalNesting++;
+
+ /* This is not the interrupt safe version of the enter critical function so
+ assert() if it is being called from an interrupt context. Only API
+ functions that end in "FromISR" can be used in an interrupt. Only assert if
+ the critical nesting count is 1 to protect against recursive calls if the
+ assert function also uses a critical section. */
+ if (ullCriticalNesting == 1ULL) {
+ configASSERT(ullPortInterruptNesting == 0);
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vPortExitCritical(void)
+{
+ if (ullCriticalNesting > portNO_CRITICAL_NESTING) {
+ /* Decrement the nesting count as the critical section is being
+ exited. */
+ ullCriticalNesting--;
+
+ /* If the nesting level has reached zero then all interrupt
+ priorities must be re-enabled. */
+ if (ullCriticalNesting == portNO_CRITICAL_NESTING) {
+ /* Critical nesting has reached zero so interrupts
+ should be enabled. */
+ portENABLE_INTERRUPTS();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+void FreeRTOS_Tick_Handler(void)
+{
+/* Interrupts should not be enabled before this point. */
+#if (configASSERT_DEFINED == 1)
+ {
+ uint32_t ulMaskBits;
+
+ __asm volatile("mrs %0, daif" : "=r"(ulMaskBits)::"memory");
+ configASSERT((ulMaskBits & portDAIF_I) != 0);
+ }
+#endif /* configASSERT_DEFINED */
+
+ /* Ok to enable interrupts after the interrupt source has been cleared. */
+ configCLEAR_TICK_INTERRUPT();
+ portENABLE_INTERRUPTS();
+
+ /* Increment the RTOS tick. */
+ if (xTaskIncrementTick() != pdFALSE) {
+ ullPortYieldRequired = pdTRUE;
+ }
+}
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portASM.S b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portASM.S
new file mode 100644
index 00000000..602e8c28
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portASM.S
@@ -0,0 +1,352 @@
+/*
+ * FreeRTOS Kernel V10.2.0
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+ .text
+
+ /* Variables and functions. */
+ .extern ullMaxAPIPriorityMask
+ .extern pxCurrentTCB
+ .extern _freertos_vector_table
+
+ .globl FreeRTOS_SWI_Handler
+ .globl FreeRTOS_IRQ_Handler
+ .globl vPortRestoreTaskContext
+
+; /**********************************************************************/
+
+.macro portSAVE_CONTEXT
+
+ /* Switch to use the EL0 stack pointer. */
+ MSR SPSEL, #0
+
+ /* Save the entire context. */
+ STP X0, X1, [SP, #-0x10]!
+ STP X2, X3, [SP, #-0x10]!
+ STP X4, X5, [SP, #-0x10]!
+ STP X6, X7, [SP, #-0x10]!
+ STP X8, X9, [SP, #-0x10]!
+ STP X10, X11, [SP, #-0x10]!
+ STP X12, X13, [SP, #-0x10]!
+ STP X14, X15, [SP, #-0x10]!
+ STP X16, X17, [SP, #-0x10]!
+ STP X18, X19, [SP, #-0x10]!
+ STP X20, X21, [SP, #-0x10]!
+ STP X22, X23, [SP, #-0x10]!
+ STP X24, X25, [SP, #-0x10]!
+ STP X26, X27, [SP, #-0x10]!
+ STP X28, X29, [SP, #-0x10]!
+ STP X30, XZR, [SP, #-0x10]!
+
+ /* Save the SPSR. */
+ MRS X3, SPSR_EL3
+ MRS X2, ELR_EL3
+
+ STP X2, X3, [SP, #-0x10]!
+
+ /* Save the critical section nesting depth. */
+ LDR X0, ullCriticalNestingConst
+ LDR X3, [X0]
+
+ /* Save the FPU context indicator. */
+ LDR X0, ullPortTaskHasFPUContextConst
+ LDR X2, [X0]
+
+ /* Save the FPU context, if any (32 128-bit registers). */
+ CMP X2, #0
+ B.EQ 1f
+ STP Q0, Q1, [SP,#-0x20]!
+ STP Q2, Q3, [SP,#-0x20]!
+ STP Q4, Q5, [SP,#-0x20]!
+ STP Q6, Q7, [SP,#-0x20]!
+ STP Q8, Q9, [SP,#-0x20]!
+ STP Q10, Q11, [SP,#-0x20]!
+ STP Q12, Q13, [SP,#-0x20]!
+ STP Q14, Q15, [SP,#-0x20]!
+ STP Q16, Q17, [SP,#-0x20]!
+ STP Q18, Q19, [SP,#-0x20]!
+ STP Q20, Q21, [SP,#-0x20]!
+ STP Q22, Q23, [SP,#-0x20]!
+ STP Q24, Q25, [SP,#-0x20]!
+ STP Q26, Q27, [SP,#-0x20]!
+ STP Q28, Q29, [SP,#-0x20]!
+ STP Q30, Q31, [SP,#-0x20]!
+
+1:
+ /* Store the critical nesting count and FPU context indicator. */
+ STP X2, X3, [SP, #-0x10]!
+
+ LDR X0, pxCurrentTCBConst
+ LDR X1, [X0]
+ MOV X0, SP /* Move SP into X0 for saving. */
+ STR X0, [X1]
+
+ /* Switch to use the ELx stack pointer. */
+ MSR SPSEL, #1
+
+ .endm
+
+
+.macro portRESTORE_CONTEXT
+
+ /* Switch to use the EL0 stack pointer. */
+ MSR SPSEL, #0
+
+ /* Set the SP to point to the stack of the task being restored. */
+ LDR X0, pxCurrentTCBConst
+ LDR X1, [X0]
+ LDR X0, [X1]
+ MOV SP, X0
+
+ LDP X2, X3, [SP], #0x10 /* Critical nesting and FPU context. */
+
+ /* Set the PMR register to be correct for the current critical nesting
+ depth. */
+ /* X0 holds the address of ullCriticalNesting. */
+ LDR X0, ullCriticalNestingConst
+ /* Restore the task's critical nesting count. */
+ STR X3, [X0]
+
+ /* Restore the FPU context indicator. */
+ LDR X0, ullPortTaskHasFPUContextConst
+ STR X2, [X0]
+
+ /* Restore the FPU context, if any. */
+ CMP X2, #0
+ B.EQ 1f
+ LDP Q30, Q31, [SP], #0x20
+ LDP Q28, Q29, [SP], #0x20
+ LDP Q26, Q27, [SP], #0x20
+ LDP Q24, Q25, [SP], #0x20
+ LDP Q22, Q23, [SP], #0x20
+ LDP Q20, Q21, [SP], #0x20
+ LDP Q18, Q19, [SP], #0x20
+ LDP Q16, Q17, [SP], #0x20
+ LDP Q14, Q15, [SP], #0x20
+ LDP Q12, Q13, [SP], #0x20
+ LDP Q10, Q11, [SP], #0x20
+ LDP Q8, Q9, [SP], #0x20
+ LDP Q6, Q7, [SP], #0x20
+ LDP Q4, Q5, [SP], #0x20
+ LDP Q2, Q3, [SP], #0x20
+ LDP Q0, Q1, [SP], #0x20
+1:
+ LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
+
+ /* Restore the SPSR. */
+ MSR SPSR_EL3, X3
+ /* Restore the ELR. */
+ MSR ELR_EL3, X2
+
+ LDP X30, XZR, [SP], #0x10
+ LDP X28, X29, [SP], #0x10
+ LDP X26, X27, [SP], #0x10
+ LDP X24, X25, [SP], #0x10
+ LDP X22, X23, [SP], #0x10
+ LDP X20, X21, [SP], #0x10
+ LDP X18, X19, [SP], #0x10
+ LDP X16, X17, [SP], #0x10
+ LDP X14, X15, [SP], #0x10
+ LDP X12, X13, [SP], #0x10
+ LDP X10, X11, [SP], #0x10
+ LDP X8, X9, [SP], #0x10
+ LDP X6, X7, [SP], #0x10
+ LDP X4, X5, [SP], #0x10
+ LDP X2, X3, [SP], #0x10
+ LDP X0, X1, [SP], #0x10
+
+ /* Switch to use the ELx stack pointer. _RB_ Might not be required. */
+ MSR SPSEL, #1
+
+ ERET
+
+ .endm
+
+/******************************************************************************
+ * FreeRTOS_SWI_Handler handler is used to perform a context switch.
+ *****************************************************************************/
+.align 8
+.type FreeRTOS_SWI_Handler, %function
+FreeRTOS_SWI_Handler:
+ /* Save the context of the current task and select a new task to run. */
+ portSAVE_CONTEXT
+ MRS X0, ESR_EL3
+
+ LSR X1, X0, #26
+
+ CMP X1, #0x15 /* 0x15 = SVC instruction. */
+
+ B.NE FreeRTOS_Abort
+ BL vTaskSwitchContext
+
+ portRESTORE_CONTEXT
+
+FreeRTOS_Abort:
+ /* Full ESR is in X0, exception class code is in X1. */
+ B .
+
+/******************************************************************************
+ * vPortRestoreTaskContext is used to start the scheduler.
+ *****************************************************************************/
+.align 8
+.type vPortRestoreTaskContext, %function
+vPortRestoreTaskContext:
+.set freertos_vector_base, _freertos_vector_table
+
+ /* Install the FreeRTOS interrupt handlers. */
+ LDR X1, =freertos_vector_base
+ MSR VBAR_EL3, X1
+ DSB SY
+ ISB SY
+
+ /* Start the first task. */
+ portRESTORE_CONTEXT
+
+/******************************************************************************
+ * FreeRTOS_IRQ_Handler handles IRQ entry and exit.
+ *****************************************************************************/
+.align 8
+.type FreeRTOS_IRQ_Handler, %function
+FreeRTOS_IRQ_Handler:
+ /* Save volatile registers. */
+ STP X0, X1, [SP, #-0x10]!
+ STP X2, X3, [SP, #-0x10]!
+ STP X4, X5, [SP, #-0x10]!
+ STP X6, X7, [SP, #-0x10]!
+ STP X8, X9, [SP, #-0x10]!
+ STP X10, X11, [SP, #-0x10]!
+ STP X12, X13, [SP, #-0x10]!
+ STP X14, X15, [SP, #-0x10]!
+ STP X16, X17, [SP, #-0x10]!
+ STP X18, X19, [SP, #-0x10]!
+ STP X29, X30, [SP, #-0x10]!
+
+ /* Save the SPSR and ELR. */
+ MRS X3, SPSR_EL3
+ MRS X2, ELR_EL3
+ STP X2, X3, [SP, #-0x10]!
+
+ /* Increment the interrupt nesting counter. */
+ LDR X5, ullPortInterruptNestingConst
+ LDR X1, [X5] /* Old nesting count in X1. */
+ ADD X6, X1, #1
+ STR X6, [X5] /* Address of nesting count variable in X5. */
+
+ /* Maintain the interrupt nesting information across the function call. */
+ STP X1, X5, [SP, #-0x10]!
+
+#if 0
+ /* Read Cor0 interrupt Source */
+ ldr x2, ulCORE0_INT_SRCConst
+ ldr x3, [x2]
+ ldr w0, [x3] /* set parametor for handler */
+#endif
+
+ /* Call the C handler. */
+ BL vApplicationIRQHandler
+
+ /* Disable interrupts. */
+ MSR DAIFSET, #1 /* IRQ -> FIQ */
+ DSB SY
+ ISB SY
+
+ /* Restore the critical nesting count. */
+ LDP X1, X5, [SP], #0x10
+ STR X1, [X5]
+
+ /* Has interrupt nesting unwound? */
+ CMP X1, #0
+ B.NE Exit_IRQ_No_Context_Switch
+
+ /* Is a context switch required? */
+ LDR X0, ullPortYieldRequiredConst
+ LDR X1, [X0]
+ CMP X1, #0
+ B.EQ Exit_IRQ_No_Context_Switch
+
+ /* Reset ullPortYieldRequired to 0. */
+ MOV X2, #0
+ STR X2, [X0]
+
+ /* Restore volatile registers. */
+ LDP X4, X5, [SP], #0x10 /* SPSR and ELR. */
+ MSR SPSR_EL3, X5
+ MSR ELR_EL3, X4
+ DSB SY
+ ISB SY
+
+ LDP X29, X30, [SP], #0x10
+ LDP X18, X19, [SP], #0x10
+ LDP X16, X17, [SP], #0x10
+ LDP X14, X15, [SP], #0x10
+ LDP X12, X13, [SP], #0x10
+ LDP X10, X11, [SP], #0x10
+ LDP X8, X9, [SP], #0x10
+ LDP X6, X7, [SP], #0x10
+ LDP X4, X5, [SP], #0x10
+ LDP X2, X3, [SP], #0x10
+ LDP X0, X1, [SP], #0x10
+
+ /* Save the context of the current task and select a new task to run. */
+ portSAVE_CONTEXT
+ BL vTaskSwitchContext
+ portRESTORE_CONTEXT
+
+Exit_IRQ_No_Context_Switch:
+ /* Restore volatile registers. */
+ LDP X4, X5, [SP], #0x10 /* SPSR and ELR. */
+ MSR SPSR_EL3, X5
+ MSR ELR_EL3, X4
+ DSB SY
+ ISB SY
+
+ LDP X29, X30, [SP], #0x10
+ LDP X18, X19, [SP], #0x10
+ LDP X16, X17, [SP], #0x10
+ LDP X14, X15, [SP], #0x10
+ LDP X12, X13, [SP], #0x10
+ LDP X10, X11, [SP], #0x10
+ LDP X8, X9, [SP], #0x10
+ LDP X6, X7, [SP], #0x10
+ LDP X4, X5, [SP], #0x10
+ LDP X2, X3, [SP], #0x10
+ LDP X0, X1, [SP], #0x10
+
+ ERET
+
+
+.align 8
+pxCurrentTCBConst: .dword pxCurrentTCB
+ullCriticalNestingConst: .dword ullCriticalNesting
+ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
+
+vApplicationIRQHandlerConst: .word vApplicationIRQHandler
+ .word 0
+ullPortInterruptNestingConst: .dword ullPortInterruptNesting
+ullPortYieldRequiredConst: .dword ullPortYieldRequired
+
+ulCORE0_INT_SRCConst: .dword ulCORE0_INT_SRC
+.end
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portmacro.h b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portmacro.h
new file mode 100644
index 00000000..120ccb3f
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/GCC/ARM_CA53_64_Rcar/portmacro.h
@@ -0,0 +1,116 @@
+/*
+ * FreeRTOS Kernel V10.2.0
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#ifndef PORTMACRO_H
+#define PORTMACRO_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*-----------------------------------------------------------
+ * Port specific definitions.
+ *
+ * The settings in this file configure FreeRTOS correctly for the given hardware
+ * and compiler.
+ *
+ * These settings should not be altered.
+ *-----------------------------------------------------------
+ */
+
+/* Type definitions. */
+#define portCHAR char
+#define portFLOAT float
+#define portDOUBLE double
+#define portLONG long
+#define portSHORT short
+#define portSTACK_TYPE size_t
+#define portBASE_TYPE long
+
+typedef portSTACK_TYPE StackType_t;
+typedef portBASE_TYPE BaseType_t;
+typedef uint64_t UBaseType_t;
+
+typedef uint64_t TickType_t;
+#define portMAX_DELAY ((TickType_t)0xffffffffffffffff)
+
+/*-----------------------------------------------------------*/
+
+/* Task utilities. */
+
+#define portYIELD() __asm volatile("SVC 0" ::: "memory")
+
+/*-----------------------------------------------------------
+ * Critical section control
+ *----------------------------------------------------------*/
+
+extern void vPortEnterCritical(void);
+extern void vPortExitCritical(void);
+
+#define portDISABLE_INTERRUPTS() \
+ __asm volatile("MSR DAIFSET, #1" ::: "memory"); \
+ __asm volatile("DSB SY"); \
+ __asm volatile("ISB SY");
+
+#define portENABLE_INTERRUPTS() \
+ __asm volatile("MSR DAIFCLR, #1" ::: "memory"); \
+ __asm volatile("DSB SY"); \
+ __asm volatile("ISB SY");
+
+/* These macros do not globally disable/enable interrupts. They do mask off
+interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */
+#define portENTER_CRITICAL() vPortEnterCritical();
+#define portEXIT_CRITICAL() vPortExitCritical();
+
+/*-----------------------------------------------------------*/
+
+/* Task function macros as described on the FreeRTOS.org WEB site. These are
+not required for this port but included in case common demo code that uses these
+macros is used. */
+#define portTASK_FUNCTION_PROTO(vFunction, pvParameters) \
+ void vFunction(void *pvParameters)
+#define portTASK_FUNCTION(vFunction, pvParameters) \
+ void vFunction(void *pvParameters)
+
+/* Prototype of the FreeRTOS tick handler. This must be installed as the
+handler for whichever peripheral is used to generate the RTOS tick. */
+void FreeRTOS_Tick_Handler(void);
+
+/*-----------------------------------------------------------*/
+
+/* Hardware specifics. */
+#define portSTACK_GROWTH (-1)
+#define portTICK_PERIOD_MS ((TickType_t)1000 / configTICK_RATE_HZ)
+#define portBYTE_ALIGNMENT 16
+#define portPOINTER_SIZE_TYPE uint64_t
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* PORTMACRO_H */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/portable/MemMang/heap_1.c b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/MemMang/heap_1.c
new file mode 100644
index 00000000..ae03b9f9
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/portable/MemMang/heap_1.c
@@ -0,0 +1,144 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/*
+ * The simplest possible implementation of pvPortMalloc(). Note that this
+ * implementation does NOT allow allocated memory to be freed again.
+ *
+ * See heap_2.c, heap_3.c and heap_4.c for alternative implementations, and the
+ * memory management pages of http://www.FreeRTOS.org for more information.
+ */
+#include <stdlib.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+all the API functions to use the MPU wrappers. That should only be done when
+task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#include "FreeRTOS.h"
+#include "task.h"
+
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 0)
+# error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
+#endif
+
+/* A few bytes might be lost to byte aligning the heap start address. */
+#define configADJUSTED_HEAP_SIZE (configTOTAL_HEAP_SIZE - portBYTE_ALIGNMENT)
+
+/* Allocate the memory for the heap. */
+#if (configAPPLICATION_ALLOCATED_HEAP == 1)
+/* The application writer has already defined the array used for the RTOS
+heap - probably so it can be placed in a special segment or address. */
+extern uint8_t ucHeap[configTOTAL_HEAP_SIZE];
+#else
+static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
+#endif /* configAPPLICATION_ALLOCATED_HEAP */
+
+/* Index into the ucHeap array. */
+static size_t xNextFreeByte = (size_t)0;
+
+/*-----------------------------------------------------------*/
+
+void *pvPortMalloc(size_t xWantedSize)
+{
+ void *pvReturn = NULL;
+ static uint8_t *pucAlignedHeap = NULL;
+
+/* Ensure that blocks are always aligned to the required number of bytes. */
+#if (portBYTE_ALIGNMENT != 1)
+ {
+ if (xWantedSize & portBYTE_ALIGNMENT_MASK) {
+ /* Byte alignment required. */
+ xWantedSize +=
+ (portBYTE_ALIGNMENT - (xWantedSize & portBYTE_ALIGNMENT_MASK));
+ }
+ }
+#endif
+
+ vTaskSuspendAll();
+ {
+ if (pucAlignedHeap == NULL) {
+ /* Ensure the heap starts on a correctly aligned boundary. */
+ pucAlignedHeap =
+ (uint8_t
+ *)(((portPOINTER_SIZE_TYPE)&ucHeap[portBYTE_ALIGNMENT]) & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
+ }
+
+ /* Check there is enough room left for the allocation. */
+ if (((xNextFreeByte + xWantedSize) < configADJUSTED_HEAP_SIZE) &&
+ ((xNextFreeByte + xWantedSize) >
+ xNextFreeByte)) /* Check for overflow. */
+ {
+ /* Return the next free byte then increment the index past this
+ block. */
+ pvReturn = pucAlignedHeap + xNextFreeByte;
+ xNextFreeByte += xWantedSize;
+ }
+
+ traceMALLOC(pvReturn, xWantedSize);
+ }
+ (void)xTaskResumeAll();
+
+#if (configUSE_MALLOC_FAILED_HOOK == 1)
+ {
+ if (pvReturn == NULL) {
+ extern void vApplicationMallocFailedHook(void);
+ vApplicationMallocFailedHook();
+ }
+ }
+#endif
+
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vPortFree(void *pv)
+{
+ /* Memory cannot be freed using this scheme. See heap_2.c, heap_3.c and
+ heap_4.c for alternative implementations, and the memory management pages of
+ http://www.FreeRTOS.org for more information. */
+ (void)pv;
+
+ /* Force an assert as it is invalid to call this function. */
+ configASSERT(pv == NULL);
+}
+/*-----------------------------------------------------------*/
+
+void vPortInitialiseBlocks(void)
+{
+ /* Only required when static memory is not cleared. */
+ xNextFreeByte = (size_t)0;
+}
+/*-----------------------------------------------------------*/
+
+size_t xPortGetFreeHeapSize(void)
+{
+ return (configADJUSTED_HEAP_SIZE - xNextFreeByte);
+}
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/queue.c b/product/rcar/src/CMSIS-FreeRTOS/Source/queue.c
new file mode 100644
index 00000000..398e7e20
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/queue.c
@@ -0,0 +1,2826 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+all the API functions to use the MPU wrappers. That should only be done when
+task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#include "FreeRTOS.h"
+#include "queue.h"
+#include "task.h"
+
+#if (configUSE_CO_ROUTINES == 1)
+# include "croutine.h"
+#endif
+
+/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
+because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
+for the header files above, but not in this file, in order to generate the
+correct privileged Vs unprivileged linkage and placement. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
+
+/* Constants used with the cRxLock and cTxLock structure members. */
+#define queueUNLOCKED ((int8_t)-1)
+#define queueLOCKED_UNMODIFIED ((int8_t)0)
+
+/* When the Queue_t structure is used to represent a base queue its pcHead and
+pcTail members are used as pointers into the queue storage area. When the
+Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
+not necessary, and the pcHead pointer is set to NULL to indicate that the
+structure instead holds a pointer to the mutex holder (if any). Map alternative
+names to the pcHead and structure member to ensure the readability of the code
+is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
+a union as their usage is mutually exclusive dependent on what the queue is
+being used for. */
+#define uxQueueType pcHead
+#define queueQUEUE_IS_MUTEX NULL
+
+typedef struct QueuePointers {
+ int8_t *pcTail; /*< Points to the byte at the end of the queue storage area.
+ Once more byte is allocated than necessary to store the
+ queue items, this is used as a marker. */
+ int8_t *pcReadFrom; /*< Points to the last place that a queued item was read
+ from when the structure is used as a queue. */
+} QueuePointers_t;
+
+typedef struct SemaphoreData {
+ TaskHandle_t
+ xMutexHolder; /*< The handle of the task that holds the mutex. */
+ UBaseType_t
+ uxRecursiveCallCount; /*< Maintains a count of the number of times a
+ recursive mutex has been recursively 'taken'
+ when the structure is used as a mutex. */
+} SemaphoreData_t;
+
+/* Semaphores do not actually store or copy data, so have an item size of
+zero. */
+#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ((UBaseType_t)0)
+#define queueMUTEX_GIVE_BLOCK_TIME ((TickType_t)0U)
+
+#if (configUSE_PREEMPTION == 0)
+/* If the cooperative scheduler is being used then a yield should not be
+performed just because a higher priority task has been woken. */
+# define queueYIELD_IF_USING_PREEMPTION()
+#else
+# define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
+#endif
+
+/*
+ * Definition of the queue used by the scheduler.
+ * Items are queued by copy, not reference. See the following link for the
+ * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html
+ */
+typedef struct QueueDefinition /* The old naming convention is used to prevent
+ breaking kernel aware debuggers. */
+{
+ int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
+ int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
+
+ union {
+ QueuePointers_t xQueue; /*< Data required exclusively when this
+ structure is used as a queue. */
+ SemaphoreData_t xSemaphore; /*< Data required exclusively when this
+ structure is used as a semaphore. */
+ } u;
+
+ List_t
+ xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post
+ onto this queue. Stored in priority order. */
+ List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to
+ read from this queue. Stored in priority
+ order. */
+
+ volatile UBaseType_t
+ uxMessagesWaiting; /*< The number of items currently in the queue. */
+ UBaseType_t uxLength; /*< The length of the queue defined as the number of
+ items it will hold, not the number of bytes. */
+ UBaseType_t
+ uxItemSize; /*< The size of each items that the queue will hold. */
+
+ volatile int8_t
+ cRxLock; /*< Stores the number of items received from the queue (removed
+ from the queue) while the queue was locked. Set to
+ queueUNLOCKED when the queue is not locked. */
+ volatile int8_t
+ cTxLock; /*< Stores the number of items transmitted to the queue (added
+ to the queue) while the queue was locked. Set to
+ queueUNLOCKED when the queue is not locked. */
+
+#if ( \
+ (configSUPPORT_STATIC_ALLOCATION == 1) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the
+ queue was statically allocated to ensure
+ no attempt is made to free the memory. */
+#endif
+
+#if (configUSE_QUEUE_SETS == 1)
+ struct QueueDefinition *pxQueueSetContainer;
+#endif
+
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxQueueNumber;
+ uint8_t ucQueueType;
+#endif
+
+} xQUEUE;
+
+/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
+name below to enable the use of older kernel aware debuggers. */
+typedef xQUEUE Queue_t;
+
+/*-----------------------------------------------------------*/
+
+/*
+ * The queue registry is just a means for kernel aware debuggers to locate
+ * queue structures. It has no other purpose so is an optional component.
+ */
+#if (configQUEUE_REGISTRY_SIZE > 0)
+
+/* The type stored within the queue registry array. This allows a name
+to be assigned to each queue making kernel aware debugging a little
+more user friendly. */
+typedef struct QUEUE_REGISTRY_ITEM {
+ const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ QueueHandle_t xHandle;
+} xQueueRegistryItem;
+
+/* The old xQueueRegistryItem name is maintained above then typedefed to the
+new xQueueRegistryItem name below to enable the use of older kernel aware
+debuggers. */
+typedef xQueueRegistryItem QueueRegistryItem_t;
+
+/* The queue registry is simply an array of QueueRegistryItem_t structures.
+The pcQueueName member of a structure being NULL is indicative of the
+array position being vacant. */
+PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[configQUEUE_REGISTRY_SIZE];
+
+#endif /* configQUEUE_REGISTRY_SIZE */
+
+/*
+ * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
+ * prevent an ISR from adding or removing items to the queue, but does prevent
+ * an ISR from removing tasks from the queue event lists. If an ISR finds a
+ * queue is locked it will instead increment the appropriate queue lock count
+ * to indicate that a task may require unblocking. When the queue in unlocked
+ * these lock counts are inspected, and the appropriate action taken.
+ */
+static void prvUnlockQueue(Queue_t *const pxQueue) PRIVILEGED_FUNCTION;
+
+/*
+ * Uses a critical section to determine if there is any data in a queue.
+ *
+ * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
+ */
+static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;
+
+/*
+ * Uses a critical section to determine if there is any space in a queue.
+ *
+ * @return pdTRUE if there is no space, otherwise pdFALSE;
+ */
+static BaseType_t prvIsQueueFull(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;
+
+/*
+ * Copies an item into the queue, either at the front of the queue or the
+ * back of the queue.
+ */
+static BaseType_t prvCopyDataToQueue(
+ Queue_t *const pxQueue,
+ const void *pvItemToQueue,
+ const BaseType_t xPosition) PRIVILEGED_FUNCTION;
+
+/*
+ * Copies an item out of a queue.
+ */
+static void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer)
+ PRIVILEGED_FUNCTION;
+
+#if (configUSE_QUEUE_SETS == 1)
+/*
+ * Checks to see if a queue is a member of a queue set, and if so, notifies
+ * the queue set that the queue contains data.
+ */
+static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue)
+ PRIVILEGED_FUNCTION;
+#endif
+
+/*
+ * Called after a Queue_t structure has been allocated either statically or
+ * dynamically to fill in the structure's members.
+ */
+static void prvInitialiseNewQueue(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t *pucQueueStorage,
+ const uint8_t ucQueueType,
+ Queue_t *pxNewQueue) PRIVILEGED_FUNCTION;
+
+/*
+ * Mutexes are a special type of queue. When a mutex is created, first the
+ * queue is created, then prvInitialiseMutex() is called to configure the queue
+ * as a mutex.
+ */
+#if (configUSE_MUTEXES == 1)
+static void prvInitialiseMutex(Queue_t *pxNewQueue) PRIVILEGED_FUNCTION;
+#endif
+
+#if (configUSE_MUTEXES == 1)
+/*
+ * If a task waiting for a mutex causes the mutex holder to inherit a
+ * priority, but the waiting task times out, then the holder should
+ * disinherit the priority - but only down to the highest priority of any
+ * other tasks that are waiting for the same mutex. This function returns
+ * that priority.
+ */
+static UBaseType_t prvGetDisinheritPriorityAfterTimeout(
+ const Queue_t *const pxQueue) PRIVILEGED_FUNCTION;
+#endif
+/*-----------------------------------------------------------*/
+
+/*
+ * Macro to mark a queue as locked. Locking a queue prevents an ISR from
+ * accessing the queue event lists.
+ */
+#define prvLockQueue(pxQueue) \
+ taskENTER_CRITICAL(); \
+ { \
+ if ((pxQueue)->cRxLock == queueUNLOCKED) { \
+ (pxQueue)->cRxLock = queueLOCKED_UNMODIFIED; \
+ } \
+ if ((pxQueue)->cTxLock == queueUNLOCKED) { \
+ (pxQueue)->cTxLock = queueLOCKED_UNMODIFIED; \
+ } \
+ } \
+ taskEXIT_CRITICAL()
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
+{
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+
+ taskENTER_CRITICAL();
+ {
+ pxQueue->u.xQueue.pcTail = pxQueue->pcHead +
+ (pxQueue->uxLength *
+ pxQueue->uxItemSize); /*lint !e9016 Pointer arithmetic allowed on
+ char types, especially when it assists
+ conveying intent. */
+ pxQueue->uxMessagesWaiting = (UBaseType_t)0U;
+ pxQueue->pcWriteTo = pxQueue->pcHead;
+ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead +
+ ((pxQueue->uxLength - 1U) *
+ pxQueue->uxItemSize); /*lint !e9016 Pointer arithmetic allowed on
+ char types, especially when it assists
+ conveying intent. */
+ pxQueue->cRxLock = queueUNLOCKED;
+ pxQueue->cTxLock = queueUNLOCKED;
+
+ if (xNewQueue == pdFALSE) {
+ /* If there are tasks blocked waiting to read from the queue, then
+ the tasks will remain blocked as after this function exits the queue
+ will still be empty. If there are tasks blocked waiting to write to
+ the queue, then one should be unblocked as after this function exits
+ it will be possible to write to it. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) !=
+ pdFALSE) {
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* Ensure the event queues start in the correct state. */
+ vListInitialise(&(pxQueue->xTasksWaitingToSend));
+ vListInitialise(&(pxQueue->xTasksWaitingToReceive));
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* A value is returned for calling semantic consistency with previous
+ versions. */
+ return pdPASS;
+}
+/*-----------------------------------------------------------*/
+
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+
+QueueHandle_t xQueueGenericCreateStatic(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t *pucQueueStorage,
+ StaticQueue_t *pxStaticQueue,
+ const uint8_t ucQueueType)
+{
+ Queue_t *pxNewQueue;
+
+ configASSERT(uxQueueLength > (UBaseType_t)0);
+
+ /* The StaticQueue_t structure and the queue storage area must be
+ supplied. */
+ configASSERT(pxStaticQueue != NULL);
+
+ /* A queue storage area should be provided if the item size is not 0, and
+ should not be provided if the item size is 0. */
+ configASSERT(!((pucQueueStorage != NULL) && (uxItemSize == 0)));
+ configASSERT(!((pucQueueStorage == NULL) && (uxItemSize != 0)));
+
+# if (configASSERT_DEFINED == 1)
+ {
+ /* Sanity check that the size of the structure used to declare a
+ variable of type StaticQueue_t or StaticSemaphore_t equals the size of
+ the real queue and semaphore structures. */
+ volatile size_t xSize = sizeof(StaticQueue_t);
+ configASSERT(xSize == sizeof(Queue_t));
+ (void)xSize; /* Keeps lint quiet when configASSERT() is not defined. */
+ }
+# endif /* configASSERT_DEFINED */
+
+ /* The address of a statically allocated queue was passed in, use it.
+ The address of a statically allocated storage area was also passed in
+ but is already set. */
+ pxNewQueue = (Queue_t *)
+ pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures
+ are designed to have the same alignment, and the size
+ is checked by an assert. */
+
+ if (pxNewQueue != NULL) {
+# if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+ {
+ /* Queues can be allocated wither statically or dynamically, so
+ note this queue was allocated statically in case the queue is
+ later deleted. */
+ pxNewQueue->ucStaticallyAllocated = pdTRUE;
+ }
+# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+
+ prvInitialiseNewQueue(
+ uxQueueLength,
+ uxItemSize,
+ pucQueueStorage,
+ ucQueueType,
+ pxNewQueue);
+ } else {
+ traceQUEUE_CREATE_FAILED(ucQueueType);
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return pxNewQueue;
+}
+
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+
+QueueHandle_t xQueueGenericCreate(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ const uint8_t ucQueueType)
+{
+ Queue_t *pxNewQueue;
+ size_t xQueueSizeInBytes;
+ uint8_t *pucQueueStorage;
+
+ configASSERT(uxQueueLength > (UBaseType_t)0);
+
+ /* Allocate enough space to hold the maximum number of items that
+ can be in the queue at any time. It is valid for uxItemSize to be
+ zero in the case the queue is used as a semaphore. */
+ xQueueSizeInBytes = (size_t)(
+ uxQueueLength * uxItemSize); /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+
+ /* Allocate the queue and storage area. Justification for MISRA
+ deviation as follows: pvPortMalloc() always ensures returned memory
+ blocks are aligned per the requirements of the MCU stack. In this case
+ pvPortMalloc() must return a pointer that is guaranteed to meet the
+ alignment requirements of the Queue_t structure - which in this case
+ is an int8_t *. Therefore, whenever the stack alignment requirements
+ are greater than or equal to the pointer to char requirements the cast
+ is safe. In other cases alignment requirements are not strict (one or
+ two bytes). */
+ pxNewQueue = (Queue_t *)pvPortMalloc(
+ sizeof(Queue_t) +
+ xQueueSizeInBytes); /*lint !e9087 !e9079 see comment above. */
+
+ if (pxNewQueue != NULL) {
+ /* Jump past the queue structure to find the location of the queue
+ storage area. */
+ pucQueueStorage = (uint8_t *)pxNewQueue;
+ pucQueueStorage += sizeof(
+ Queue_t); /*lint !e9016 Pointer arithmetic allowed on char types,
+ especially when it assists conveying intent. */
+
+# if (configSUPPORT_STATIC_ALLOCATION == 1)
+ {
+ /* Queues can be created either statically or dynamically, so
+ note this task was created dynamically in case it is later
+ deleted. */
+ pxNewQueue->ucStaticallyAllocated = pdFALSE;
+ }
+# endif /* configSUPPORT_STATIC_ALLOCATION */
+
+ prvInitialiseNewQueue(
+ uxQueueLength,
+ uxItemSize,
+ pucQueueStorage,
+ ucQueueType,
+ pxNewQueue);
+ } else {
+ traceQUEUE_CREATE_FAILED(ucQueueType);
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return pxNewQueue;
+}
+
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+static void prvInitialiseNewQueue(
+ const UBaseType_t uxQueueLength,
+ const UBaseType_t uxItemSize,
+ uint8_t *pucQueueStorage,
+ const uint8_t ucQueueType,
+ Queue_t *pxNewQueue)
+{
+ /* Remove compiler warnings about unused parameters should
+ configUSE_TRACE_FACILITY not be set to 1. */
+ (void)ucQueueType;
+
+ if (uxItemSize == (UBaseType_t)0) {
+ /* No RAM was allocated for the queue storage area, but PC head cannot
+ be set to NULL because NULL is used as a key to say the queue is used as
+ a mutex. Therefore just set pcHead to point to the queue as a benign
+ value that is known to be within the memory map. */
+ pxNewQueue->pcHead = (int8_t *)pxNewQueue;
+ } else {
+ /* Set the head to the start of the queue storage area. */
+ pxNewQueue->pcHead = (int8_t *)pucQueueStorage;
+ }
+
+ /* Initialise the queue members as described where the queue type is
+ defined. */
+ pxNewQueue->uxLength = uxQueueLength;
+ pxNewQueue->uxItemSize = uxItemSize;
+ (void)xQueueGenericReset(pxNewQueue, pdTRUE);
+
+#if (configUSE_TRACE_FACILITY == 1)
+ {
+ pxNewQueue->ucQueueType = ucQueueType;
+ }
+#endif /* configUSE_TRACE_FACILITY */
+
+#if (configUSE_QUEUE_SETS == 1)
+ {
+ pxNewQueue->pxQueueSetContainer = NULL;
+ }
+#endif /* configUSE_QUEUE_SETS */
+
+ traceQUEUE_CREATE(pxNewQueue);
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+static void prvInitialiseMutex(Queue_t *pxNewQueue)
+{
+ if (pxNewQueue != NULL) {
+ /* The queue create function will set all the queue structure members
+ correctly for a generic queue, but this function is creating a
+ mutex. Overwrite those members that need to be set differently -
+ in particular the information required for priority inheritance. */
+ pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
+ pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
+
+ /* In case this is a recursive mutex. */
+ pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
+
+ traceCREATE_MUTEX(pxNewQueue);
+
+ /* Start with the semaphore in the expected state. */
+ (void)xQueueGenericSend(
+ pxNewQueue, NULL, (TickType_t)0U, queueSEND_TO_BACK);
+ } else {
+ traceCREATE_MUTEX_FAILED();
+ }
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if ((configUSE_MUTEXES == 1) && (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType)
+{
+ QueueHandle_t xNewQueue;
+ const UBaseType_t uxMutexLength = (UBaseType_t)1,
+ uxMutexSize = (UBaseType_t)0;
+
+ xNewQueue = xQueueGenericCreate(uxMutexLength, uxMutexSize, ucQueueType);
+ prvInitialiseMutex((Queue_t *)xNewQueue);
+
+ return xNewQueue;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if ((configUSE_MUTEXES == 1) && (configSUPPORT_STATIC_ALLOCATION == 1))
+
+QueueHandle_t xQueueCreateMutexStatic(
+ const uint8_t ucQueueType,
+ StaticQueue_t *pxStaticQueue)
+{
+ QueueHandle_t xNewQueue;
+ const UBaseType_t uxMutexLength = (UBaseType_t)1,
+ uxMutexSize = (UBaseType_t)0;
+
+ /* Prevent compiler warnings about unused parameters if
+ configUSE_TRACE_FACILITY does not equal 1. */
+ (void)ucQueueType;
+
+ xNewQueue = xQueueGenericCreateStatic(
+ uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType);
+ prvInitialiseMutex((Queue_t *)xNewQueue);
+
+ return xNewQueue;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if ((configUSE_MUTEXES == 1) && (INCLUDE_xSemaphoreGetMutexHolder == 1))
+
+TaskHandle_t xQueueGetMutexHolder(QueueHandle_t xSemaphore)
+{
+ TaskHandle_t pxReturn;
+ Queue_t *const pxSemaphore = (Queue_t *)xSemaphore;
+
+ /* This function is called by xSemaphoreGetMutexHolder(), and should not
+ be called directly. Note: This is a good way of determining if the
+ calling task is the mutex holder, but not a good way of determining the
+ identity of the mutex holder, as the holder may change between the
+ following critical section exiting and the function returning. */
+ taskENTER_CRITICAL();
+ {
+ if (pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX) {
+ pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
+ } else {
+ pxReturn = NULL;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return pxReturn;
+} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef.
+ */
+
+#endif
+/*-----------------------------------------------------------*/
+
+#if ((configUSE_MUTEXES == 1) && (INCLUDE_xSemaphoreGetMutexHolder == 1))
+
+TaskHandle_t xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)
+{
+ TaskHandle_t pxReturn;
+
+ configASSERT(xSemaphore);
+
+ /* Mutexes cannot be used in interrupt service routines, so the mutex
+ holder should not change in an ISR, and therefore a critical section is
+ not required here. */
+ if (((Queue_t *)xSemaphore)->uxQueueType == queueQUEUE_IS_MUTEX) {
+ pxReturn = ((Queue_t *)xSemaphore)->u.xSemaphore.xMutexHolder;
+ } else {
+ pxReturn = NULL;
+ }
+
+ return pxReturn;
+} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef.
+ */
+
+#endif
+/*-----------------------------------------------------------*/
+
+#if (configUSE_RECURSIVE_MUTEXES == 1)
+
+BaseType_t xQueueGiveMutexRecursive(QueueHandle_t xMutex)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxMutex = (Queue_t *)xMutex;
+
+ configASSERT(pxMutex);
+
+ /* If this is the task that holds the mutex then xMutexHolder will not
+ change outside of this task. If this task does not hold the mutex then
+ pxMutexHolder can never coincidentally equal the tasks handle, and as
+ this is the only condition we are interested in it does not matter if
+ pxMutexHolder is accessed simultaneously by another task. Therefore no
+ mutual exclusion is required to test the pxMutexHolder variable. */
+ if (pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle()) {
+ traceGIVE_MUTEX_RECURSIVE(pxMutex);
+
+ /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
+ the task handle, therefore no underflow check is required. Also,
+ uxRecursiveCallCount is only modified by the mutex holder, and as
+ there can only be one, no mutual exclusion is required to modify the
+ uxRecursiveCallCount member. */
+ (pxMutex->u.xSemaphore.uxRecursiveCallCount)--;
+
+ /* Has the recursive call count unwound to 0? */
+ if (pxMutex->u.xSemaphore.uxRecursiveCallCount == (UBaseType_t)0) {
+ /* Return the mutex. This will automatically unblock any other
+ task that might be waiting to access the mutex. */
+ (void)xQueueGenericSend(
+ pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xReturn = pdPASS;
+ } else {
+ /* The mutex cannot be given because the calling task is not the
+ holder. */
+ xReturn = pdFAIL;
+
+ traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex);
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_RECURSIVE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_RECURSIVE_MUTEXES == 1)
+
+BaseType_t xQueueTakeMutexRecursive(
+ QueueHandle_t xMutex,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxMutex = (Queue_t *)xMutex;
+
+ configASSERT(pxMutex);
+
+ /* Comments regarding mutual exclusion as per those within
+ xQueueGiveMutexRecursive(). */
+
+ traceTAKE_MUTEX_RECURSIVE(pxMutex);
+
+ if (pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle()) {
+ (pxMutex->u.xSemaphore.uxRecursiveCallCount)++;
+ xReturn = pdPASS;
+ } else {
+ xReturn = xQueueSemaphoreTake(pxMutex, xTicksToWait);
+
+ /* pdPASS will only be returned if the mutex was successfully
+ obtained. The calling task may have entered the Blocked state
+ before reaching here. */
+ if (xReturn != pdFAIL) {
+ (pxMutex->u.xSemaphore.uxRecursiveCallCount)++;
+ } else {
+ traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex);
+ }
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_RECURSIVE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configUSE_COUNTING_SEMAPHORES == 1) && \
+ (configSUPPORT_STATIC_ALLOCATION == 1))
+
+QueueHandle_t xQueueCreateCountingSemaphoreStatic(
+ const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount,
+ StaticQueue_t *pxStaticQueue)
+{
+ QueueHandle_t xHandle;
+
+ configASSERT(uxMaxCount != 0);
+ configASSERT(uxInitialCount <= uxMaxCount);
+
+ xHandle = xQueueGenericCreateStatic(
+ uxMaxCount,
+ queueSEMAPHORE_QUEUE_ITEM_LENGTH,
+ NULL,
+ pxStaticQueue,
+ queueQUEUE_TYPE_COUNTING_SEMAPHORE);
+
+ if (xHandle != NULL) {
+ ((Queue_t *)xHandle)->uxMessagesWaiting = uxInitialCount;
+
+ traceCREATE_COUNTING_SEMAPHORE();
+ } else {
+ traceCREATE_COUNTING_SEMAPHORE_FAILED();
+ }
+
+ return xHandle;
+}
+
+#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( \
+ configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configUSE_COUNTING_SEMAPHORES == 1) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+QueueHandle_t xQueueCreateCountingSemaphore(
+ const UBaseType_t uxMaxCount,
+ const UBaseType_t uxInitialCount)
+{
+ QueueHandle_t xHandle;
+
+ configASSERT(uxMaxCount != 0);
+ configASSERT(uxInitialCount <= uxMaxCount);
+
+ xHandle = xQueueGenericCreate(
+ uxMaxCount,
+ queueSEMAPHORE_QUEUE_ITEM_LENGTH,
+ queueQUEUE_TYPE_COUNTING_SEMAPHORE);
+
+ if (xHandle != NULL) {
+ ((Queue_t *)xHandle)->uxMessagesWaiting = uxInitialCount;
+
+ traceCREATE_COUNTING_SEMAPHORE();
+ } else {
+ traceCREATE_COUNTING_SEMAPHORE_FAILED();
+ }
+
+ return xHandle;
+}
+
+#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( \
+ configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueGenericSend(
+ QueueHandle_t xQueue,
+ const void *const pvItemToQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xCopyPosition)
+{
+ BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
+ TimeOut_t xTimeOut;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ configASSERT(
+ !((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
+ configASSERT(
+ !((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));
+#if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
+ {
+ configASSERT(
+ !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
+ (xTicksToWait != 0)));
+ }
+#endif
+
+ /*lint -save -e904 This function relaxes the coding standard somewhat to
+ allow return statements within the function itself. This is done in the
+ interest of execution time efficiency. */
+ for (;;) {
+ taskENTER_CRITICAL();
+ {
+ /* Is there room on the queue now? The running task must be the
+ highest priority task wanting to access the queue. If the head item
+ in the queue is to be overwritten then it does not matter if the
+ queue is full. */
+ if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) ||
+ (xCopyPosition == queueOVERWRITE)) {
+ traceQUEUE_SEND(pxQueue);
+
+#if (configUSE_QUEUE_SETS == 1)
+ {
+ const UBaseType_t uxPreviousMessagesWaiting =
+ pxQueue->uxMessagesWaiting;
+
+ xYieldRequired = prvCopyDataToQueue(
+ pxQueue, pvItemToQueue, xCopyPosition);
+
+ if (pxQueue->pxQueueSetContainer != NULL) {
+ if ((xCopyPosition == queueOVERWRITE) &&
+ (uxPreviousMessagesWaiting != (UBaseType_t)0)) {
+ /* Do not notify the queue set as an existing item
+ was overwritten in the queue so the number of items
+ in the queue has not changed. */
+ mtCOVERAGE_TEST_MARKER();
+ } else if (
+ prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
+ /* The queue is a member of a queue set, and posting
+ to the queue set caused a higher priority task to
+ unblock. A context switch is required. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* If there was a task waiting for data to arrive on the
+ queue then unblock it now. */
+ if (listLIST_IS_EMPTY(&(
+ pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) !=
+ pdFALSE) {
+ /* The unblocked task has a priority higher than
+ our own so yield immediately. Yes it is ok to
+ do this from within the critical section - the
+ kernel takes care of that. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else if (xYieldRequired != pdFALSE) {
+ /* This path is a special case that will only get
+ executed if the task was holding multiple mutexes
+ and the mutexes were given back in an order that is
+ different to that in which they were taken. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+#else /* configUSE_QUEUE_SETS */
+ {
+ xYieldRequired = prvCopyDataToQueue(
+ pxQueue, pvItemToQueue, xCopyPosition);
+
+ /* If there was a task waiting for data to arrive on the
+ queue then unblock it now. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(&(
+ pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The unblocked task has a priority higher than
+ our own so yield immediately. Yes it is ok to do
+ this from within the critical section - the kernel
+ takes care of that. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else if (xYieldRequired != pdFALSE) {
+ /* This path is a special case that will only get
+ executed if the task was holding multiple mutexes and
+ the mutexes were given back in an order that is
+ different to that in which they were taken. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_QUEUE_SETS */
+
+ taskEXIT_CRITICAL();
+ return pdPASS;
+ } else {
+ if (xTicksToWait == (TickType_t)0) {
+ /* The queue was full and no block time is specified (or
+ the block time has expired) so leave now. */
+ taskEXIT_CRITICAL();
+
+ /* Return to the original privilege level before exiting
+ the function. */
+ traceQUEUE_SEND_FAILED(pxQueue);
+ return errQUEUE_FULL;
+ } else if (xEntryTimeSet == pdFALSE) {
+ /* The queue was full and a block time was specified so
+ configure the timeout structure. */
+ vTaskInternalSetTimeOutState(&xTimeOut);
+ xEntryTimeSet = pdTRUE;
+ } else {
+ /* Entry time was already set. */
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Interrupts and other tasks can send to and receive from the queue
+ now the critical section has been exited. */
+
+ vTaskSuspendAll();
+ prvLockQueue(pxQueue);
+
+ /* Update the timeout state to see if it has expired yet. */
+ if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
+ if (prvIsQueueFull(pxQueue) != pdFALSE) {
+ traceBLOCKING_ON_QUEUE_SEND(pxQueue);
+ vTaskPlaceOnEventList(
+ &(pxQueue->xTasksWaitingToSend), xTicksToWait);
+
+ /* Unlocking the queue means queue events can effect the
+ event list. It is possible that interrupts occurring now
+ remove this task from the event list again - but as the
+ scheduler is suspended the task will go onto the pending
+ ready last instead of the actual ready list. */
+ prvUnlockQueue(pxQueue);
+
+ /* Resuming the scheduler will move tasks from the pending
+ ready list into the ready list - so it is feasible that this
+ task is already in a ready list before it yields - in which
+ case the yield will not cause a context switch unless there
+ is also a higher priority task in the pending ready list. */
+ if (xTaskResumeAll() == pdFALSE) {
+ portYIELD_WITHIN_API();
+ }
+ } else {
+ /* Try again. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+ }
+ } else {
+ /* The timeout has expired. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+
+ traceQUEUE_SEND_FAILED(pxQueue);
+ return errQUEUE_FULL;
+ }
+ } /*lint -restore */
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueGenericSendFromISR(
+ QueueHandle_t xQueue,
+ const void *const pvItemToQueue,
+ BaseType_t *const pxHigherPriorityTaskWoken,
+ const BaseType_t xCopyPosition)
+{
+ BaseType_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ configASSERT(
+ !((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
+ configASSERT(
+ !((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ /* Similar to xQueueGenericSend, except without blocking if there is no room
+ in the queue. Also don't directly wake a task that was blocked on a queue
+ read, instead return a flag to say whether a context switch is required or
+ not (i.e. has a task with a higher priority than us been woken by this
+ post). */
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) ||
+ (xCopyPosition == queueOVERWRITE)) {
+ const int8_t cTxLock = pxQueue->cTxLock;
+ const UBaseType_t uxPreviousMessagesWaiting =
+ pxQueue->uxMessagesWaiting;
+
+ traceQUEUE_SEND_FROM_ISR(pxQueue);
+
+ /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
+ semaphore or mutex. That means prvCopyDataToQueue() cannot result
+ in a task disinheriting a priority and prvCopyDataToQueue() can be
+ called here even though the disinherit function does not check if
+ the scheduler is suspended before accessing the ready lists. */
+ (void)prvCopyDataToQueue(pxQueue, pvItemToQueue, xCopyPosition);
+
+ /* The event list is not altered if the queue is locked. This will
+ be done when the queue is unlocked later. */
+ if (cTxLock == queueUNLOCKED) {
+#if (configUSE_QUEUE_SETS == 1)
+ {
+ if (pxQueue->pxQueueSetContainer != NULL) {
+ if ((xCopyPosition == queueOVERWRITE) &&
+ (uxPreviousMessagesWaiting != (UBaseType_t)0)) {
+ /* Do not notify the queue set as an existing item
+ was overwritten in the queue so the number of items
+ in the queue has not changed. */
+ mtCOVERAGE_TEST_MARKER();
+ } else if (
+ prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
+ /* The queue is a member of a queue set, and posting
+ to the queue set caused a higher priority task to
+ unblock. A context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ if (listLIST_IS_EMPTY(&(
+ pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) !=
+ pdFALSE) {
+ /* The task waiting has a higher priority so
+ record that a context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+#else /* configUSE_QUEUE_SETS */
+ {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(&(
+ pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The task waiting has a higher priority so record
+ that a context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Not used in this path. */
+ (void)uxPreviousMessagesWaiting;
+ }
+#endif /* configUSE_QUEUE_SETS */
+ } else {
+ /* Increment the lock count so the task that unlocks the queue
+ knows that data was posted while it was locked. */
+ pxQueue->cTxLock = (int8_t)(cTxLock + 1);
+ }
+
+ xReturn = pdPASS;
+ } else {
+ traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
+ xReturn = errQUEUE_FULL;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueGiveFromISR(
+ QueueHandle_t xQueue,
+ BaseType_t *const pxHigherPriorityTaskWoken)
+{
+ BaseType_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+ Queue_t *const pxQueue = xQueue;
+
+ /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
+ item size is 0. Don't directly wake a task that was blocked on a queue
+ read, instead return a flag to say whether a context switch is required or
+ not (i.e. has a task with a higher priority than us been woken by this
+ post). */
+
+ configASSERT(pxQueue);
+
+ /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
+ if the item size is not 0. */
+ configASSERT(pxQueue->uxItemSize == 0);
+
+ /* Normally a mutex would not be given from an interrupt, especially if
+ there is a mutex holder, as priority inheritance makes no sense for an
+ interrupts, only tasks. */
+ configASSERT(
+ !((pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) &&
+ (pxQueue->u.xSemaphore.xMutexHolder != NULL)));
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
+
+ /* When the queue is used to implement a semaphore no data is ever
+ moved through the queue but it is still valid to see if the queue 'has
+ space'. */
+ if (uxMessagesWaiting < pxQueue->uxLength) {
+ const int8_t cTxLock = pxQueue->cTxLock;
+
+ traceQUEUE_SEND_FROM_ISR(pxQueue);
+
+ /* A task can only have an inherited priority if it is a mutex
+ holder - and if there is a mutex holder then the mutex cannot be
+ given from an ISR. As this is the ISR version of the function it
+ can be assumed there is no mutex holder and no need to determine if
+ priority disinheritance is needed. Simply increase the count of
+ messages (semaphores) available. */
+ pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;
+
+ /* The event list is not altered if the queue is locked. This will
+ be done when the queue is unlocked later. */
+ if (cTxLock == queueUNLOCKED) {
+#if (configUSE_QUEUE_SETS == 1)
+ {
+ if (pxQueue->pxQueueSetContainer != NULL) {
+ if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
+ /* The semaphore is a member of a queue set, and
+ posting to the queue set caused a higher priority
+ task to unblock. A context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ if (listLIST_IS_EMPTY(&(
+ pxQueue->xTasksWaitingToReceive)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) !=
+ pdFALSE) {
+ /* The task waiting has a higher priority so
+ record that a context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+#else /* configUSE_QUEUE_SETS */
+ {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(&(
+ pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The task waiting has a higher priority so record
+ that a context switch is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_QUEUE_SETS */
+ } else {
+ /* Increment the lock count so the task that unlocks the queue
+ knows that data was posted while it was locked. */
+ pxQueue->cTxLock = (int8_t)(cTxLock + 1);
+ }
+
+ xReturn = pdPASS;
+ } else {
+ traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
+ xReturn = errQUEUE_FULL;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueReceive(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xEntryTimeSet = pdFALSE;
+ TimeOut_t xTimeOut;
+ Queue_t *const pxQueue = xQueue;
+
+ /* Check the pointer is not NULL. */
+ configASSERT((pxQueue));
+
+ /* The buffer into which data is received can only be NULL if the data size
+ is zero (so no data is copied into the buffer. */
+ configASSERT(
+ !(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t)0U)));
+
+/* Cannot block if the scheduler is suspended. */
+#if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
+ {
+ configASSERT(
+ !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
+ (xTicksToWait != 0)));
+ }
+#endif
+
+ /*lint -save -e904 This function relaxes the coding standard somewhat to
+ allow return statements within the function itself. This is done in the
+ interest of execution time efficiency. */
+ for (;;) {
+ taskENTER_CRITICAL();
+ {
+ const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
+
+ /* Is there data in the queue now? To be running the calling task
+ must be the highest priority task wanting to access the queue. */
+ if (uxMessagesWaiting > (UBaseType_t)0) {
+ /* Data available, remove one item. */
+ prvCopyDataFromQueue(pxQueue, pvBuffer);
+ traceQUEUE_RECEIVE(pxQueue);
+ pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;
+
+ /* There is now space in the queue, were any tasks waiting to
+ post to the queue? If so, unblock the highest priority waiting
+ task. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ taskEXIT_CRITICAL();
+ return pdPASS;
+ } else {
+ if (xTicksToWait == (TickType_t)0) {
+ /* The queue was empty and no block time is specified (or
+ the block time has expired) so leave now. */
+ taskEXIT_CRITICAL();
+ traceQUEUE_RECEIVE_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else if (xEntryTimeSet == pdFALSE) {
+ /* The queue was empty and a block time was specified so
+ configure the timeout structure. */
+ vTaskInternalSetTimeOutState(&xTimeOut);
+ xEntryTimeSet = pdTRUE;
+ } else {
+ /* Entry time was already set. */
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Interrupts and other tasks can send to and receive from the queue
+ now the critical section has been exited. */
+
+ vTaskSuspendAll();
+ prvLockQueue(pxQueue);
+
+ /* Update the timeout state to see if it has expired yet. */
+ if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
+ /* The timeout has not expired. If the queue is still empty place
+ the task on the list of tasks waiting to receive from the queue. */
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+ traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);
+ vTaskPlaceOnEventList(
+ &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
+ prvUnlockQueue(pxQueue);
+ if (xTaskResumeAll() == pdFALSE) {
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* The queue contains data again. Loop back to try and read the
+ data. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+ }
+ } else {
+ /* Timed out. If there is no data in the queue exit, otherwise loop
+ back and attempt to read the data. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+ traceQUEUE_RECEIVE_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } /*lint -restore */
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueSemaphoreTake(QueueHandle_t xQueue, TickType_t xTicksToWait)
+{
+ BaseType_t xEntryTimeSet = pdFALSE;
+ TimeOut_t xTimeOut;
+ Queue_t *const pxQueue = xQueue;
+
+#if (configUSE_MUTEXES == 1)
+ BaseType_t xInheritanceOccurred = pdFALSE;
+#endif
+
+ /* Check the queue pointer is not NULL. */
+ configASSERT((pxQueue));
+
+ /* Check this really is a semaphore, in which case the item size will be
+ 0. */
+ configASSERT(pxQueue->uxItemSize == 0);
+
+/* Cannot block if the scheduler is suspended. */
+#if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
+ {
+ configASSERT(
+ !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
+ (xTicksToWait != 0)));
+ }
+#endif
+
+ /*lint -save -e904 This function relaxes the coding standard somewhat to
+ allow return statements within the function itself. This is done in the
+ interest of execution time efficiency. */
+ for (;;) {
+ taskENTER_CRITICAL();
+ {
+ /* Semaphores are queues with an item size of 0, and where the
+ number of messages in the queue is the semaphore's count value. */
+ const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
+
+ /* Is there data in the queue now? To be running the calling task
+ must be the highest priority task wanting to access the queue. */
+ if (uxSemaphoreCount > (UBaseType_t)0) {
+ traceQUEUE_RECEIVE(pxQueue);
+
+ /* Semaphores are queues with a data size of zero and where the
+ messages waiting is the semaphore's count. Reduce the count. */
+ pxQueue->uxMessagesWaiting = uxSemaphoreCount - (UBaseType_t)1;
+
+#if (configUSE_MUTEXES == 1)
+ {
+ if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
+ /* Record the information required to implement
+ priority inheritance should it become necessary. */
+ pxQueue->u.xSemaphore.xMutexHolder =
+ pvTaskIncrementMutexHeldCount();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_MUTEXES */
+
+ /* Check to see if other tasks are blocked waiting to give the
+ semaphore, and if so, unblock the highest priority such task. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ taskEXIT_CRITICAL();
+ return pdPASS;
+ } else {
+ if (xTicksToWait == (TickType_t)0) {
+/* For inheritance to have occurred there must have been an
+initial timeout, and an adjusted timeout cannot become 0, as
+if it were 0 the function would have exited. */
+#if (configUSE_MUTEXES == 1)
+ {
+ configASSERT(xInheritanceOccurred == pdFALSE);
+ }
+#endif /* configUSE_MUTEXES */
+
+ /* The semaphore count was 0 and no block time is specified
+ (or the block time has expired) so exit now. */
+ taskEXIT_CRITICAL();
+ traceQUEUE_RECEIVE_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else if (xEntryTimeSet == pdFALSE) {
+ /* The semaphore count was 0 and a block time was specified
+ so configure the timeout structure ready to block. */
+ vTaskInternalSetTimeOutState(&xTimeOut);
+ xEntryTimeSet = pdTRUE;
+ } else {
+ /* Entry time was already set. */
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Interrupts and other tasks can give to and take from the semaphore
+ now the critical section has been exited. */
+
+ vTaskSuspendAll();
+ prvLockQueue(pxQueue);
+
+ /* Update the timeout state to see if it has expired yet. */
+ if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
+ /* A block time is specified and not expired. If the semaphore
+ count is 0 then enter the Blocked state to wait for a semaphore to
+ become available. As semaphores are implemented with queues the
+ queue being empty is equivalent to the semaphore count being 0. */
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+ traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);
+
+#if (configUSE_MUTEXES == 1)
+ {
+ if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
+ taskENTER_CRITICAL();
+ {
+ xInheritanceOccurred = xTaskPriorityInherit(
+ pxQueue->u.xSemaphore.xMutexHolder);
+ }
+ taskEXIT_CRITICAL();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif
+
+ vTaskPlaceOnEventList(
+ &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
+ prvUnlockQueue(pxQueue);
+ if (xTaskResumeAll() == pdFALSE) {
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* There was no timeout and the semaphore count was not 0, so
+ attempt to take the semaphore again. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+ }
+ } else {
+ /* Timed out. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+
+ /* If the semaphore count is 0 exit now as the timeout has
+ expired. Otherwise return to attempt to take the semaphore that is
+ known to be available. As semaphores are implemented by queues the
+ queue being empty is equivalent to the semaphore count being 0. */
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+#if (configUSE_MUTEXES == 1)
+ {
+ /* xInheritanceOccurred could only have be set if
+ pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
+ test the mutex type again to check it is actually a mutex.
+ */
+ if (xInheritanceOccurred != pdFALSE) {
+ taskENTER_CRITICAL();
+ {
+ UBaseType_t uxHighestWaitingPriority;
+
+ /* This task blocking on the mutex caused another
+ task to inherit this task's priority. Now this task
+ has timed out the priority should be disinherited
+ again, but only as low as the next highest priority
+ task that is waiting for the same mutex. */
+ uxHighestWaitingPriority =
+ prvGetDisinheritPriorityAfterTimeout(pxQueue);
+ vTaskPriorityDisinheritAfterTimeout(
+ pxQueue->u.xSemaphore.xMutexHolder,
+ uxHighestWaitingPriority);
+ }
+ taskEXIT_CRITICAL();
+ }
+ }
+#endif /* configUSE_MUTEXES */
+
+ traceQUEUE_RECEIVE_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } /*lint -restore */
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueuePeek(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xEntryTimeSet = pdFALSE;
+ TimeOut_t xTimeOut;
+ int8_t *pcOriginalReadPosition;
+ Queue_t *const pxQueue = xQueue;
+
+ /* Check the pointer is not NULL. */
+ configASSERT((pxQueue));
+
+ /* The buffer into which data is received can only be NULL if the data size
+ is zero (so no data is copied into the buffer. */
+ configASSERT(
+ !(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t)0U)));
+
+/* Cannot block if the scheduler is suspended. */
+#if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
+ {
+ configASSERT(
+ !((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) &&
+ (xTicksToWait != 0)));
+ }
+#endif
+
+ /*lint -save -e904 This function relaxes the coding standard somewhat to
+ allow return statements within the function itself. This is done in the
+ interest of execution time efficiency. */
+ for (;;) {
+ taskENTER_CRITICAL();
+ {
+ const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
+
+ /* Is there data in the queue now? To be running the calling task
+ must be the highest priority task wanting to access the queue. */
+ if (uxMessagesWaiting > (UBaseType_t)0) {
+ /* Remember the read position so it can be reset after the data
+ is read from the queue as this function is only peeking the
+ data, not removing it. */
+ pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
+
+ prvCopyDataFromQueue(pxQueue, pvBuffer);
+ traceQUEUE_PEEK(pxQueue);
+
+ /* The data is not being removed, so reset the read pointer. */
+ pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
+
+ /* The data is being left in the queue, so see if there are
+ any other tasks waiting for the data. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The task waiting has a higher priority than this
+ * task. */
+ queueYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ taskEXIT_CRITICAL();
+ return pdPASS;
+ } else {
+ if (xTicksToWait == (TickType_t)0) {
+ /* The queue was empty and no block time is specified (or
+ the block time has expired) so leave now. */
+ taskEXIT_CRITICAL();
+ traceQUEUE_PEEK_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else if (xEntryTimeSet == pdFALSE) {
+ /* The queue was empty and a block time was specified so
+ configure the timeout structure ready to enter the blocked
+ state. */
+ vTaskInternalSetTimeOutState(&xTimeOut);
+ xEntryTimeSet = pdTRUE;
+ } else {
+ /* Entry time was already set. */
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Interrupts and other tasks can send to and receive from the queue
+ now the critical section has been exited. */
+
+ vTaskSuspendAll();
+ prvLockQueue(pxQueue);
+
+ /* Update the timeout state to see if it has expired yet. */
+ if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE) {
+ /* Timeout has not expired yet, check to see if there is data in the
+ queue now, and if not enter the Blocked state to wait for data. */
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+ traceBLOCKING_ON_QUEUE_PEEK(pxQueue);
+ vTaskPlaceOnEventList(
+ &(pxQueue->xTasksWaitingToReceive), xTicksToWait);
+ prvUnlockQueue(pxQueue);
+ if (xTaskResumeAll() == pdFALSE) {
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* There is data in the queue now, so don't enter the blocked
+ state, instead return to try and obtain the data. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+ }
+ } else {
+ /* The timeout has expired. If there is still no data in the queue
+ exit, otherwise go back and try to read the data again. */
+ prvUnlockQueue(pxQueue);
+ (void)xTaskResumeAll();
+
+ if (prvIsQueueEmpty(pxQueue) != pdFALSE) {
+ traceQUEUE_PEEK_FAILED(pxQueue);
+ return errQUEUE_EMPTY;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } /*lint -restore */
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueReceiveFromISR(
+ QueueHandle_t xQueue,
+ void *const pvBuffer,
+ BaseType_t *const pxHigherPriorityTaskWoken)
+{
+ BaseType_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ configASSERT(
+ !((pvBuffer == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
+
+ /* Cannot block in an ISR, so check there is data available. */
+ if (uxMessagesWaiting > (UBaseType_t)0) {
+ const int8_t cRxLock = pxQueue->cRxLock;
+
+ traceQUEUE_RECEIVE_FROM_ISR(pxQueue);
+
+ prvCopyDataFromQueue(pxQueue, pvBuffer);
+ pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;
+
+ /* If the queue is locked the event list will not be modified.
+ Instead update the lock count so the task that unlocks the queue
+ will know that an ISR has removed data while the queue was
+ locked. */
+ if (cRxLock == queueUNLOCKED) {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
+ /* The task waiting has a higher priority than us so
+ force a context switch. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* Increment the lock count so the task that unlocks the queue
+ knows that data was removed while it was locked. */
+ pxQueue->cRxLock = (int8_t)(cRxLock + 1);
+ }
+
+ xReturn = pdPASS;
+ } else {
+ xReturn = pdFAIL;
+ traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue);
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
+{
+ BaseType_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+ int8_t *pcOriginalReadPosition;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ configASSERT(
+ !((pvBuffer == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
+ configASSERT(pxQueue->uxItemSize != 0); /* Can't peek a semaphore. */
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* Cannot block in an ISR, so check there is data available. */
+ if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
+ traceQUEUE_PEEK_FROM_ISR(pxQueue);
+
+ /* Remember the read position so it can be reset as nothing is
+ actually being removed from the queue. */
+ pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
+ prvCopyDataFromQueue(pxQueue, pvBuffer);
+ pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
+
+ xReturn = pdPASS;
+ } else {
+ xReturn = pdFAIL;
+ traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue);
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
+{
+ UBaseType_t uxReturn;
+
+ configASSERT(xQueue);
+
+ taskENTER_CRITICAL();
+ {
+ uxReturn = ((Queue_t *)xQueue)->uxMessagesWaiting;
+ }
+ taskEXIT_CRITICAL();
+
+ return uxReturn;
+} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
+ pointer. */
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
+{
+ UBaseType_t uxReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+
+ taskENTER_CRITICAL();
+ {
+ uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
+ }
+ taskEXIT_CRITICAL();
+
+ return uxReturn;
+} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
+ pointer. */
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
+{
+ UBaseType_t uxReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ uxReturn = pxQueue->uxMessagesWaiting;
+
+ return uxReturn;
+} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not
+ pointer. */
+/*-----------------------------------------------------------*/
+
+void vQueueDelete(QueueHandle_t xQueue)
+{
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ traceQUEUE_DELETE(pxQueue);
+
+#if (configQUEUE_REGISTRY_SIZE > 0)
+ {
+ vQueueUnregisterQueue(pxQueue);
+ }
+#endif
+
+#if ( \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
+ (configSUPPORT_STATIC_ALLOCATION == 0))
+ {
+ /* The queue can only have been allocated dynamically - free it
+ again. */
+ vPortFree(pxQueue);
+ }
+#elif ( \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
+ (configSUPPORT_STATIC_ALLOCATION == 1))
+ {
+ /* The queue could have been allocated statically or dynamically, so
+ check before attempting to free the memory. */
+ if (pxQueue->ucStaticallyAllocated == (uint8_t)pdFALSE) {
+ vPortFree(pxQueue);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#else
+ {
+ /* The queue must have been statically allocated, so is not going to be
+ deleted. Avoid compiler warnings about the unused parameter. */
+ (void)pxQueue;
+ }
+#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue)
+{
+ return ((Queue_t *)xQueue)->uxQueueNumber;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber)
+{
+ ((Queue_t *)xQueue)->uxQueueNumber = uxQueueNumber;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+uint8_t ucQueueGetQueueType(QueueHandle_t xQueue)
+{
+ return ((Queue_t *)xQueue)->ucQueueType;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+static UBaseType_t prvGetDisinheritPriorityAfterTimeout(
+ const Queue_t *const pxQueue)
+{
+ UBaseType_t uxHighestPriorityOfWaitingTasks;
+
+ /* If a task waiting for a mutex causes the mutex holder to inherit a
+ priority, but the waiting task times out, then the holder should
+ disinherit the priority - but only down to the highest priority of any
+ other tasks that are waiting for the same mutex. For this purpose,
+ return the priority of the highest priority task that is waiting for the
+ mutex. */
+ if (listCURRENT_LIST_LENGTH(&(pxQueue->xTasksWaitingToReceive)) > 0U) {
+ uxHighestPriorityOfWaitingTasks = (UBaseType_t)configMAX_PRIORITIES -
+ (UBaseType_t)listGET_ITEM_VALUE_OF_HEAD_ENTRY(&(
+ pxQueue->xTasksWaitingToReceive));
+ } else {
+ uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
+ }
+
+ return uxHighestPriorityOfWaitingTasks;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+static BaseType_t prvCopyDataToQueue(
+ Queue_t *const pxQueue,
+ const void *pvItemToQueue,
+ const BaseType_t xPosition)
+{
+ BaseType_t xReturn = pdFALSE;
+ UBaseType_t uxMessagesWaiting;
+
+ /* This function is called from a critical section. */
+
+ uxMessagesWaiting = pxQueue->uxMessagesWaiting;
+
+ if (pxQueue->uxItemSize == (UBaseType_t)0) {
+#if (configUSE_MUTEXES == 1)
+ {
+ if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) {
+ /* The mutex is no longer being held. */
+ xReturn =
+ xTaskPriorityDisinherit(pxQueue->u.xSemaphore.xMutexHolder);
+ pxQueue->u.xSemaphore.xMutexHolder = NULL;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_MUTEXES */
+ } else if (xPosition == queueSEND_TO_BACK) {
+ (void)memcpy(
+ (void *)pxQueue->pcWriteTo,
+ pvItemToQueue,
+ (size_t)pxQueue
+ ->uxItemSize); /*lint !e961 !e418 !e9087 MISRA exception as the
+ casts are only redundant for some ports, plus
+ previous logic ensures a null pointer can only
+ be passed to memcpy() if the copy size is 0.
+ Cast to void required by function signature
+ and safe as no alignment requirement and copy
+ length specified in bytes. */
+ pxQueue->pcWriteTo +=
+ pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types
+ ok, especially in this use case where it is
+ the clearest way of conveying intent. */
+ if (pxQueue->pcWriteTo >=
+ pxQueue->u.xQueue
+ .pcTail) /*lint !e946 MISRA exception justified as comparison of
+ pointers is the cleanest solution. */
+ {
+ pxQueue->pcWriteTo = pxQueue->pcHead;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ (void)memcpy(
+ (void *)pxQueue->u.xQueue.pcReadFrom,
+ pvItemToQueue,
+ (size_t)pxQueue
+ ->uxItemSize); /*lint !e961 !e9087 !e418 MISRA exception as the
+ casts are only redundant for some ports. Cast
+ to void required by function signature and
+ safe as no alignment requirement and copy
+ length specified in bytes. Assert checks null
+ pointer only used when length is 0. */
+ pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
+ if (pxQueue->u.xQueue.pcReadFrom <
+ pxQueue
+ ->pcHead) /*lint !e946 MISRA exception justified as comparison
+ of pointers is the cleanest solution. */
+ {
+ pxQueue->u.xQueue.pcReadFrom =
+ (pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if (xPosition == queueOVERWRITE) {
+ if (uxMessagesWaiting > (UBaseType_t)0) {
+ /* An item is not being added but overwritten, so subtract
+ one from the recorded number of items in the queue so when
+ one is added again below the number of recorded items remains
+ correct. */
+ --uxMessagesWaiting;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+static void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer)
+{
+ if (pxQueue->uxItemSize != (UBaseType_t)0) {
+ pxQueue->u.xQueue.pcReadFrom +=
+ pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types
+ ok, especially in this use case where it is
+ the clearest way of conveying intent. */
+ if (pxQueue->u.xQueue.pcReadFrom >=
+ pxQueue->u.xQueue
+ .pcTail) /*lint !e946 MISRA exception justified as use of the
+ relational operator is the cleanest solutions. */
+ {
+ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ (void)memcpy(
+ (void *)pvBuffer,
+ (void *)pxQueue->u.xQueue.pcReadFrom,
+ (size_t)pxQueue
+ ->uxItemSize); /*lint !e961 !e418 !e9087 MISRA exception as the
+ casts are only redundant for some ports. Also
+ previous logic ensures a null pointer can only
+ be passed to memcpy() when the count is 0.
+ Cast to void required by function signature
+ and safe as no alignment requirement and copy
+ length specified in bytes. */
+ }
+}
+/*-----------------------------------------------------------*/
+
+static void prvUnlockQueue(Queue_t *const pxQueue)
+{
+ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
+
+ /* The lock counts contains the number of extra data items placed or
+ removed from the queue while the queue was locked. When a queue is
+ locked items can be added or removed, but the event lists cannot be
+ updated. */
+ taskENTER_CRITICAL();
+ {
+ int8_t cTxLock = pxQueue->cTxLock;
+
+ /* See if data was added to the queue while it was locked. */
+ while (cTxLock > queueLOCKED_UNMODIFIED) {
+/* Data was posted while the queue was locked. Are any tasks
+blocked waiting for data to become available? */
+#if (configUSE_QUEUE_SETS == 1)
+ {
+ if (pxQueue->pxQueueSetContainer != NULL) {
+ if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE) {
+ /* The queue is a member of a queue set, and posting to
+ the queue set caused a higher priority task to unblock.
+ A context switch is required. */
+ vTaskMissedYield();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* Tasks that are removed from the event list will get
+ added to the pending ready list as the scheduler is still
+ suspended. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(&(
+ pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The task waiting has a higher priority so record
+ that a context switch is required. */
+ vTaskMissedYield();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ break;
+ }
+ }
+ }
+#else /* configUSE_QUEUE_SETS */
+ {
+ /* Tasks that are removed from the event list will get added to
+ the pending ready list as the scheduler is still suspended. */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The task waiting has a higher priority so record that
+ a context switch is required. */
+ vTaskMissedYield();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ break;
+ }
+ }
+#endif /* configUSE_QUEUE_SETS */
+
+ --cTxLock;
+ }
+
+ pxQueue->cTxLock = queueUNLOCKED;
+ }
+ taskEXIT_CRITICAL();
+
+ /* Do the same for the Rx lock. */
+ taskENTER_CRITICAL();
+ {
+ int8_t cRxLock = pxQueue->cRxLock;
+
+ while (cRxLock > queueLOCKED_UNMODIFIED) {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) !=
+ pdFALSE) {
+ vTaskMissedYield();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ --cRxLock;
+ } else {
+ break;
+ }
+ }
+
+ pxQueue->cRxLock = queueUNLOCKED;
+ }
+ taskEXIT_CRITICAL();
+}
+/*-----------------------------------------------------------*/
+
+static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue)
+{
+ BaseType_t xReturn;
+
+ taskENTER_CRITICAL();
+ {
+ if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
+ xReturn = pdTRUE;
+ } else {
+ xReturn = pdFALSE;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
+ xReturn = pdTRUE;
+ } else {
+ xReturn = pdFALSE;
+ }
+
+ return xReturn;
+} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
+/*-----------------------------------------------------------*/
+
+static BaseType_t prvIsQueueFull(const Queue_t *pxQueue)
+{
+ BaseType_t xReturn;
+
+ taskENTER_CRITICAL();
+ {
+ if (pxQueue->uxMessagesWaiting == pxQueue->uxLength) {
+ xReturn = pdTRUE;
+ } else {
+ xReturn = pdFALSE;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ configASSERT(pxQueue);
+ if (pxQueue->uxMessagesWaiting == pxQueue->uxLength) {
+ xReturn = pdTRUE;
+ } else {
+ xReturn = pdFALSE;
+ }
+
+ return xReturn;
+} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_CO_ROUTINES == 1)
+
+BaseType_t xQueueCRSend(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ /* If the queue is already full we may have to block. A critical section
+ is required to prevent an interrupt removing something from the queue
+ between the check to see if the queue is full and blocking on the queue. */
+ portDISABLE_INTERRUPTS();
+ {
+ if (prvIsQueueFull(pxQueue) != pdFALSE) {
+ /* The queue is full - do we want to block or just leave without
+ posting? */
+ if (xTicksToWait > (TickType_t)0) {
+ /* As this is called from a coroutine we cannot block directly,
+ but return indicating that we need to block. */
+ vCoRoutineAddToDelayedList(
+ xTicksToWait, &(pxQueue->xTasksWaitingToSend));
+ portENABLE_INTERRUPTS();
+ return errQUEUE_BLOCKED;
+ } else {
+ portENABLE_INTERRUPTS();
+ return errQUEUE_FULL;
+ }
+ }
+ }
+ portENABLE_INTERRUPTS();
+
+ portDISABLE_INTERRUPTS();
+ {
+ if (pxQueue->uxMessagesWaiting < pxQueue->uxLength) {
+ /* There is room in the queue, copy the data into the queue. */
+ prvCopyDataToQueue(pxQueue, pvItemToQueue, queueSEND_TO_BACK);
+ xReturn = pdPASS;
+
+ /* Were any co-routines waiting for data to become available? */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ /* In this instance the co-routine could be placed directly
+ into the ready list as we are within a critical section.
+ Instead the same pending ready list mechanism is used as if
+ the event were caused from within an interrupt. */
+ if (xCoRoutineRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ /* The co-routine waiting has a higher priority so record
+ that a yield might be appropriate. */
+ xReturn = errQUEUE_YIELD;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ xReturn = errQUEUE_FULL;
+ }
+ }
+ portENABLE_INTERRUPTS();
+
+ return xReturn;
+}
+
+#endif /* configUSE_CO_ROUTINES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_CO_ROUTINES == 1)
+
+BaseType_t xQueueCRReceive(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ /* If the queue is already empty we may have to block. A critical section
+ is required to prevent an interrupt adding something to the queue
+ between the check to see if the queue is empty and blocking on the queue. */
+ portDISABLE_INTERRUPTS();
+ {
+ if (pxQueue->uxMessagesWaiting == (UBaseType_t)0) {
+ /* There are no messages in the queue, do we want to block or just
+ leave with nothing? */
+ if (xTicksToWait > (TickType_t)0) {
+ /* As this is a co-routine we cannot block directly, but return
+ indicating that we need to block. */
+ vCoRoutineAddToDelayedList(
+ xTicksToWait, &(pxQueue->xTasksWaitingToReceive));
+ portENABLE_INTERRUPTS();
+ return errQUEUE_BLOCKED;
+ } else {
+ portENABLE_INTERRUPTS();
+ return errQUEUE_FULL;
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ portENABLE_INTERRUPTS();
+
+ portDISABLE_INTERRUPTS();
+ {
+ if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
+ /* Data is available from the queue. */
+ pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
+ if (pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail) {
+ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ --(pxQueue->uxMessagesWaiting);
+ (void)memcpy(
+ (void *)pvBuffer,
+ (void *)pxQueue->u.xQueue.pcReadFrom,
+ (unsigned)pxQueue->uxItemSize);
+
+ xReturn = pdPASS;
+
+ /* Were any co-routines waiting for space to become available? */
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
+ /* In this instance the co-routine could be placed directly
+ into the ready list as we are within a critical section.
+ Instead the same pending ready list mechanism is used as if
+ the event were caused from within an interrupt. */
+ if (xCoRoutineRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
+ xReturn = errQUEUE_YIELD;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ xReturn = pdFAIL;
+ }
+ }
+ portENABLE_INTERRUPTS();
+
+ return xReturn;
+}
+
+#endif /* configUSE_CO_ROUTINES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_CO_ROUTINES == 1)
+
+BaseType_t xQueueCRSendFromISR(
+ QueueHandle_t xQueue,
+ const void *pvItemToQueue,
+ BaseType_t xCoRoutinePreviouslyWoken)
+{
+ Queue_t *const pxQueue = xQueue;
+
+ /* Cannot block within an ISR so if there is no space on the queue then
+ exit without doing anything. */
+ if (pxQueue->uxMessagesWaiting < pxQueue->uxLength) {
+ prvCopyDataToQueue(pxQueue, pvItemToQueue, queueSEND_TO_BACK);
+
+ /* We only want to wake one co-routine per ISR, so check that a
+ co-routine has not already been woken. */
+ if (xCoRoutinePreviouslyWoken == pdFALSE) {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) ==
+ pdFALSE) {
+ if (xCoRoutineRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToReceive)) != pdFALSE) {
+ return pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xCoRoutinePreviouslyWoken;
+}
+
+#endif /* configUSE_CO_ROUTINES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_CO_ROUTINES == 1)
+
+BaseType_t xQueueCRReceiveFromISR(
+ QueueHandle_t xQueue,
+ void *pvBuffer,
+ BaseType_t *pxCoRoutineWoken)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueue = xQueue;
+
+ /* We cannot block from an ISR, so check there is data available. If
+ not then just leave without doing anything. */
+ if (pxQueue->uxMessagesWaiting > (UBaseType_t)0) {
+ /* Copy the data from the queue. */
+ pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
+ if (pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail) {
+ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ --(pxQueue->uxMessagesWaiting);
+ (void)memcpy(
+ (void *)pvBuffer,
+ (void *)pxQueue->u.xQueue.pcReadFrom,
+ (unsigned)pxQueue->uxItemSize);
+
+ if ((*pxCoRoutineWoken) == pdFALSE) {
+ if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE) {
+ if (xCoRoutineRemoveFromEventList(
+ &(pxQueue->xTasksWaitingToSend)) != pdFALSE) {
+ *pxCoRoutineWoken = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ xReturn = pdPASS;
+ } else {
+ xReturn = pdFAIL;
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_CO_ROUTINES */
+/*-----------------------------------------------------------*/
+
+#if (configQUEUE_REGISTRY_SIZE > 0)
+
+void vQueueAddToRegistry(
+ QueueHandle_t xQueue,
+ const char *pcQueueName) /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+{
+ UBaseType_t ux;
+
+ /* See if there is an empty space in the registry. A NULL name denotes
+ a free slot. */
+ for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
+ ux++) {
+ if (xQueueRegistry[ux].pcQueueName == NULL) {
+ /* Store the information on this queue. */
+ xQueueRegistry[ux].pcQueueName = pcQueueName;
+ xQueueRegistry[ux].xHandle = xQueue;
+
+ traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName);
+ break;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+
+#endif /* configQUEUE_REGISTRY_SIZE */
+/*-----------------------------------------------------------*/
+
+#if (configQUEUE_REGISTRY_SIZE > 0)
+
+const char *pcQueueGetName(
+ QueueHandle_t xQueue) /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+{
+ UBaseType_t ux;
+ const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+
+ /* Note there is nothing here to protect against another task adding or
+ removing entries from the registry while it is being searched. */
+ for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
+ ux++) {
+ if (xQueueRegistry[ux].xHandle == xQueue) {
+ pcReturn = xQueueRegistry[ux].pcQueueName;
+ break;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ return pcReturn;
+} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
+
+#endif /* configQUEUE_REGISTRY_SIZE */
+/*-----------------------------------------------------------*/
+
+#if (configQUEUE_REGISTRY_SIZE > 0)
+
+void vQueueUnregisterQueue(QueueHandle_t xQueue)
+{
+ UBaseType_t ux;
+
+ /* See if the handle of the queue being unregistered in actually in the
+ registry. */
+ for (ux = (UBaseType_t)0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE;
+ ux++) {
+ if (xQueueRegistry[ux].xHandle == xQueue) {
+ /* Set the name to NULL to show that this slot if free again. */
+ xQueueRegistry[ux].pcQueueName = NULL;
+
+ /* Set the handle to NULL to ensure the same queue handle cannot
+ appear in the registry twice if it is added, removed, then
+ added again. */
+ xQueueRegistry[ux].xHandle = (QueueHandle_t)0;
+ break;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
+
+#endif /* configQUEUE_REGISTRY_SIZE */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TIMERS == 1)
+
+void vQueueWaitForMessageRestricted(
+ QueueHandle_t xQueue,
+ TickType_t xTicksToWait,
+ const BaseType_t xWaitIndefinitely)
+{
+ Queue_t *const pxQueue = xQueue;
+
+ /* This function should not be called by application code hence the
+ 'Restricted' in its name. It is not part of the public API. It is
+ designed for use by kernel code, and has special calling requirements.
+ It can result in vListInsert() being called on a list that can only
+ possibly ever have one item in it, so the list will be fast, but even
+ so it should be called with the scheduler locked and not from a critical
+ section. */
+
+ /* Only do anything if there are no messages in the queue. This function
+ will not actually cause the task to block, just place it on a blocked
+ list. It will not block until the scheduler is unlocked - at which
+ time a yield will be performed. If an item is added to the queue while
+ the queue is locked, and the calling task blocks on the queue, then the
+ calling task will be immediately unblocked when the queue is unlocked. */
+ prvLockQueue(pxQueue);
+ if (pxQueue->uxMessagesWaiting == (UBaseType_t)0U) {
+ /* There is nothing in the queue, block for the specified period. */
+ vTaskPlaceOnEventListRestricted(
+ &(pxQueue->xTasksWaitingToReceive),
+ xTicksToWait,
+ xWaitIndefinitely);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ prvUnlockQueue(pxQueue);
+}
+
+#endif /* configUSE_TIMERS */
+/*-----------------------------------------------------------*/
+
+#if ((configUSE_QUEUE_SETS == 1) && (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength)
+{
+ QueueSetHandle_t pxQueue;
+
+ pxQueue = xQueueGenericCreate(
+ uxEventQueueLength,
+ (UBaseType_t)sizeof(Queue_t *),
+ queueQUEUE_TYPE_SET);
+
+ return pxQueue;
+}
+
+#endif /* configUSE_QUEUE_SETS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_QUEUE_SETS == 1)
+
+BaseType_t xQueueAddToSet(
+ QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet)
+{
+ BaseType_t xReturn;
+
+ taskENTER_CRITICAL();
+ {
+ if (((Queue_t *)xQueueOrSemaphore)->pxQueueSetContainer != NULL) {
+ /* Cannot add a queue/semaphore to more than one queue set. */
+ xReturn = pdFAIL;
+ } else if (
+ ((Queue_t *)xQueueOrSemaphore)->uxMessagesWaiting !=
+ (UBaseType_t)0) {
+ /* Cannot add a queue/semaphore to a queue set if there are already
+ items in the queue/semaphore. */
+ xReturn = pdFAIL;
+ } else {
+ ((Queue_t *)xQueueOrSemaphore)->pxQueueSetContainer = xQueueSet;
+ xReturn = pdPASS;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+
+#endif /* configUSE_QUEUE_SETS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_QUEUE_SETS == 1)
+
+BaseType_t xQueueRemoveFromSet(
+ QueueSetMemberHandle_t xQueueOrSemaphore,
+ QueueSetHandle_t xQueueSet)
+{
+ BaseType_t xReturn;
+ Queue_t *const pxQueueOrSemaphore = (Queue_t *)xQueueOrSemaphore;
+
+ if (pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet) {
+ /* The queue was not a member of the set. */
+ xReturn = pdFAIL;
+ } else if (pxQueueOrSemaphore->uxMessagesWaiting != (UBaseType_t)0) {
+ /* It is dangerous to remove a queue from a set when the queue is
+ not empty because the queue set will still hold pending events for
+ the queue. */
+ xReturn = pdFAIL;
+ } else {
+ taskENTER_CRITICAL();
+ {
+ /* The queue is no longer contained in the set. */
+ pxQueueOrSemaphore->pxQueueSetContainer = NULL;
+ }
+ taskEXIT_CRITICAL();
+ xReturn = pdPASS;
+ }
+
+ return xReturn;
+} /*lint !e818 xQueueSet could not be declared as pointing to const as it is a
+ typedef. */
+
+#endif /* configUSE_QUEUE_SETS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_QUEUE_SETS == 1)
+
+QueueSetMemberHandle_t xQueueSelectFromSet(
+ QueueSetHandle_t xQueueSet,
+ TickType_t const xTicksToWait)
+{
+ QueueSetMemberHandle_t xReturn = NULL;
+
+ (void)xQueueReceive(
+ (QueueHandle_t)xQueueSet,
+ &xReturn,
+ xTicksToWait); /*lint !e961 Casting from one typedef to another is not
+ redundant. */
+ return xReturn;
+}
+
+#endif /* configUSE_QUEUE_SETS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_QUEUE_SETS == 1)
+
+QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)
+{
+ QueueSetMemberHandle_t xReturn = NULL;
+
+ (void)xQueueReceiveFromISR(
+ (QueueHandle_t)xQueueSet,
+ &xReturn,
+ NULL); /*lint !e961 Casting from one typedef to another is not
+ redundant. */
+ return xReturn;
+}
+
+#endif /* configUSE_QUEUE_SETS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_QUEUE_SETS == 1)
+
+static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue)
+{
+ Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
+ BaseType_t xReturn = pdFALSE;
+
+ /* This function must be called form a critical section. */
+
+ configASSERT(pxQueueSetContainer);
+ configASSERT(
+ pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength);
+
+ if (pxQueueSetContainer->uxMessagesWaiting <
+ pxQueueSetContainer->uxLength) {
+ const int8_t cTxLock = pxQueueSetContainer->cTxLock;
+
+ traceQUEUE_SEND(pxQueueSetContainer);
+
+ /* The data copied is the handle of the queue that contains data. */
+ xReturn = prvCopyDataToQueue(
+ pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK);
+
+ if (cTxLock == queueUNLOCKED) {
+ if (listLIST_IS_EMPTY(&(
+ pxQueueSetContainer->xTasksWaitingToReceive)) == pdFALSE) {
+ if (xTaskRemoveFromEventList(
+ &(pxQueueSetContainer->xTasksWaitingToReceive)) !=
+ pdFALSE) {
+ /* The task waiting has a higher priority. */
+ xReturn = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ pxQueueSetContainer->cTxLock = (int8_t)(cTxLock + 1);
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_QUEUE_SETS */
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/tasks.c b/product/rcar/src/CMSIS-FreeRTOS/Source/tasks.c
new file mode 100644
index 00000000..ea565032
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/tasks.c
@@ -0,0 +1,5356 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+#include <string.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+all the API functions to use the MPU wrappers. That should only be done when
+task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+/* FreeRTOS includes. */
+#include "FreeRTOS.h"
+#include "stack_macros.h"
+#include "task.h"
+#include "timers.h"
+
+/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
+because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
+for the header files above, but not in this file, in order to generate the
+correct privileged Vs unprivileged linkage and placement. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
+
+/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
+functions but without including stdio.h here. */
+#if (configUSE_STATS_FORMATTING_FUNCTIONS == 1)
+/* At the bottom of this file are two optional functions that can be used
+to generate human readable text from the raw data generated by the
+uxTaskGetSystemState() function. Note the formatting functions are provided
+for convenience only, and are NOT considered part of the kernel. */
+# include <stdio.h>
+#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
+
+#if (configUSE_PREEMPTION == 0)
+/* If the cooperative scheduler is being used then a yield should not be
+performed just because a higher priority task has been woken. */
+# define taskYIELD_IF_USING_PREEMPTION()
+#else
+# define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
+#endif
+
+/* Values that can be assigned to the ucNotifyState member of the TCB. */
+#define taskNOT_WAITING_NOTIFICATION ((uint8_t)0)
+#define taskWAITING_NOTIFICATION ((uint8_t)1)
+#define taskNOTIFICATION_RECEIVED ((uint8_t)2)
+
+/*
+ * The value used to fill the stack of a task when the task is created. This
+ * is used purely for checking the high water mark for tasks.
+ */
+#define tskSTACK_FILL_BYTE (0xa5U)
+
+/* Bits used to recored how a task's stack and TCB were allocated. */
+#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ((uint8_t)0)
+#define tskSTATICALLY_ALLOCATED_STACK_ONLY ((uint8_t)1)
+#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ((uint8_t)2)
+
+/* If any of the following are set then task stacks are filled with a known
+value so the high water mark can be determined. If none of the following are
+set then don't fill the stack so there is no unnecessary dependency on memset.
+*/
+#if ( \
+ (configCHECK_FOR_STACK_OVERFLOW > 1) || (configUSE_TRACE_FACILITY == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark2 == 1))
+# define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
+#else
+# define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
+#endif
+
+/*
+ * Macros used by vListTask to indicate which state a task is in.
+ */
+#define tskRUNNING_CHAR ('X')
+#define tskBLOCKED_CHAR ('B')
+#define tskREADY_CHAR ('R')
+#define tskDELETED_CHAR ('D')
+#define tskSUSPENDED_CHAR ('S')
+
+/*
+ * Some kernel aware debuggers require the data the debugger needs access to be
+ * global, rather than file scope.
+ */
+#ifdef portREMOVE_STATIC_QUALIFIER
+# define static
+#endif
+
+/* The name allocated to the Idle task. This can be overridden by defining
+configIDLE_TASK_NAME in FreeRTOSConfig.h. */
+#ifndef configIDLE_TASK_NAME
+# define configIDLE_TASK_NAME "IDLE"
+#endif
+
+#if (configUSE_PORT_OPTIMISED_TASK_SELECTION == 0)
+
+/* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
+performed in a generic way that is not optimised to any particular
+microcontroller architecture. */
+
+/* uxTopReadyPriority holds the priority of the highest priority ready
+state task. */
+# define taskRECORD_READY_PRIORITY(uxPriority) \
+ { \
+ if ((uxPriority) > uxTopReadyPriority) { \
+ uxTopReadyPriority = (uxPriority); \
+ } \
+ } /* taskRECORD_READY_PRIORITY */
+
+/*-----------------------------------------------------------*/
+
+# define taskSELECT_HIGHEST_PRIORITY_TASK() \
+ { \
+ UBaseType_t uxTopPriority = uxTopReadyPriority; \
+\
+ /* Find the highest priority queue that contains ready tasks. */ \
+ while (listLIST_IS_EMPTY(&(pxReadyTasksLists[uxTopPriority]))) { \
+ configASSERT(uxTopPriority); \
+ --uxTopPriority; \
+ } \
+\
+ /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the \
+ tasks of \
+ the same priority get an equal share of the processor time. */ \
+ listGET_OWNER_OF_NEXT_ENTRY( \
+ pxCurrentTCB, &(pxReadyTasksLists[uxTopPriority])); \
+ uxTopReadyPriority = uxTopPriority; \
+ } /* taskSELECT_HIGHEST_PRIORITY_TASK */
+
+/*-----------------------------------------------------------*/
+
+/* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
+they are only required when a port optimised method of task selection is
+being used. */
+# define taskRESET_READY_PRIORITY(uxPriority)
+# define portRESET_READY_PRIORITY(uxPriority, uxTopReadyPriority)
+
+#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+/* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
+performed in a way that is tailored to the particular microcontroller
+architecture being used. */
+
+/* A port optimised version is provided. Call the port defined macros. */
+# define taskRECORD_READY_PRIORITY(uxPriority) \
+ portRECORD_READY_PRIORITY(uxPriority, uxTopReadyPriority)
+
+/*-----------------------------------------------------------*/
+
+# define taskSELECT_HIGHEST_PRIORITY_TASK() \
+ { \
+ UBaseType_t uxTopPriority; \
+\
+ /* Find the highest priority list that contains ready tasks. */ \
+ portGET_HIGHEST_PRIORITY(uxTopPriority, uxTopReadyPriority); \
+ configASSERT( \
+ listCURRENT_LIST_LENGTH(&(pxReadyTasksLists[uxTopPriority])) > \
+ 0); \
+ listGET_OWNER_OF_NEXT_ENTRY( \
+ pxCurrentTCB, &(pxReadyTasksLists[uxTopPriority])); \
+ } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
+
+/*-----------------------------------------------------------*/
+
+/* A port optimised version is provided, call it only if the TCB being reset
+is being referenced from a ready list. If it is referenced from a delayed
+or suspended list then it won't be in a ready list. */
+# define taskRESET_READY_PRIORITY(uxPriority) \
+ { \
+ if (listCURRENT_LIST_LENGTH(&(pxReadyTasksLists[(uxPriority)])) == \
+ (UBaseType_t)0) { \
+ portRESET_READY_PRIORITY((uxPriority), (uxTopReadyPriority)); \
+ } \
+ }
+
+#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
+
+/*-----------------------------------------------------------*/
+
+/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
+count overflows. */
+#define taskSWITCH_DELAYED_LISTS() \
+ { \
+ List_t *pxTemp; \
+\
+ /* The delayed tasks list should be empty when the lists are switched. \
+ */ \
+ configASSERT((listLIST_IS_EMPTY(pxDelayedTaskList))); \
+\
+ pxTemp = pxDelayedTaskList; \
+ pxDelayedTaskList = pxOverflowDelayedTaskList; \
+ pxOverflowDelayedTaskList = pxTemp; \
+ xNumOfOverflows++; \
+ prvResetNextTaskUnblockTime(); \
+ }
+
+/*-----------------------------------------------------------*/
+
+/*
+ * Place the task represented by pxTCB into the appropriate ready list for
+ * the task. It is inserted at the end of the list.
+ */
+#define prvAddTaskToReadyList(pxTCB) \
+ traceMOVED_TASK_TO_READY_STATE(pxTCB); \
+ taskRECORD_READY_PRIORITY((pxTCB)->uxPriority); \
+ vListInsertEnd( \
+ &(pxReadyTasksLists[(pxTCB)->uxPriority]), \
+ &((pxTCB)->xStateListItem)); \
+ tracePOST_MOVED_TASK_TO_READY_STATE(pxTCB)
+/*-----------------------------------------------------------*/
+
+/*
+ * Several functions take an TaskHandle_t parameter that can optionally be NULL,
+ * where NULL is used to indicate that the handle of the currently executing
+ * task should be used in place of the parameter. This macro simply checks to
+ * see if the parameter is NULL and returns a pointer to the appropriate TCB.
+ */
+#define prvGetTCBFromHandle(pxHandle) \
+ (((pxHandle) == NULL) ? pxCurrentTCB : (pxHandle))
+
+/* The item value of the event list item is normally used to hold the priority
+of the task to which it belongs (coded to allow it to be held in reverse
+priority order). However, it is occasionally borrowed for other purposes. It
+is important its value is not updated due to a task priority change while it is
+being used for another purpose. The following bit definition is used to inform
+the scheduler that the value should not be changed - in which case it is the
+responsibility of whichever module is using the value to ensure it gets set back
+to its original value when it is released. */
+#if (configUSE_16_BIT_TICKS == 1)
+# define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
+#else
+# define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
+#endif
+
+/*
+ * Task control block. A task control block (TCB) is allocated for each task,
+ * and stores task state information, including a pointer to the task's context
+ * (the task's run time environment, including register values)
+ */
+typedef struct tskTaskControlBlock /* The old naming convention is used to
+ prevent breaking kernel aware debuggers.
+ */
+{
+ volatile StackType_t
+ *pxTopOfStack; /*< Points to the location of the last item placed on the
+ tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB
+ STRUCT. */
+
+#if (portUSING_MPU_WRAPPERS == 1)
+ xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the
+ port layer. THIS MUST BE THE SECOND MEMBER
+ OF THE TCB STRUCT. */
+#endif
+
+ ListItem_t xStateListItem; /*< The list that the state list item of a task
+ is reference from denotes the state of that
+ task (Ready, Blocked, Suspended ). */
+ ListItem_t
+ xEventListItem; /*< Used to reference a task from an event list. */
+ UBaseType_t
+ uxPriority; /*< The priority of the task. 0 is the lowest priority. */
+ StackType_t *pxStack; /*< Points to the start of the stack. */
+ char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
+
+#if ((portSTACK_GROWTH > 0) || (configRECORD_STACK_HIGH_ADDRESS == 1))
+ StackType_t
+ *pxEndOfStack; /*< Points to the highest valid address for the stack. */
+#endif
+
+#if (portCRITICAL_NESTING_IN_TCB == 1)
+ UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth
+ for ports that do not maintain their own
+ count in the port layer. */
+#endif
+
+#if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t
+ uxTCBNumber; /*< Stores a number that increments each time a TCB is
+ created. It allows debuggers to determine when a task
+ has been deleted and then recreated. */
+ UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third
+ party trace code. */
+#endif
+
+#if (configUSE_MUTEXES == 1)
+ UBaseType_t
+ uxBasePriority; /*< The priority last assigned to the task - used by the
+ priority inheritance mechanism. */
+ UBaseType_t uxMutexesHeld;
+#endif
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+ TaskHookFunction_t pxTaskTag;
+#endif
+
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0)
+ void *pvThreadLocalStoragePointers[configNUM_THREAD_LOCAL_STORAGE_POINTERS];
+#endif
+
+#if (configGENERATE_RUN_TIME_STATS == 1)
+ uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent
+ in the Running state. */
+#endif
+
+#if (configUSE_NEWLIB_REENTRANT == 1)
+ /* Allocate a Newlib reent structure that is specific to this task.
+ Note Newlib support has been included by popular demand, but is not
+ used by the FreeRTOS maintainers themselves. FreeRTOS is not
+ responsible for resulting newlib operation. User must be familiar with
+ newlib and must provide system-wide implementations of the necessary
+ stubs. Be warned that (at the time of writing) the current newlib design
+ implements a system-wide malloc() that must be provided with locks.
+
+ See the third party link
+ http://www.nadler.com/embedded/newlibAndFreeRTOS.html for additional
+ information. */
+ struct _reent xNewLib_reent;
+#endif
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+ volatile uint32_t ulNotifiedValue;
+ volatile uint8_t ucNotifyState;
+#endif
+
+/* See the comments in FreeRTOS.h with the definition of
+tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
+#if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0) /*lint !e731 !e9029 Macro \
+ has been consolidated \
+ for readability \
+ reasons. */
+ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically
+ allocated to ensure no attempt is made to
+ free the memory. */
+#endif
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+ uint8_t ucDelayAborted;
+#endif
+
+#if (configUSE_POSIX_ERRNO == 1)
+ int iTaskErrno;
+#endif
+
+} tskTCB;
+
+/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
+below to enable the use of older kernel aware debuggers. */
+typedef tskTCB TCB_t;
+
+/*lint -save -e956 A manual analysis and inspection has been used to determine
+which static variables must be declared volatile. */
+PRIVILEGED_DATA TCB_t *volatile pxCurrentTCB = NULL;
+
+/* Lists for ready and blocked tasks. --------------------
+xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
+doing so breaks some kernel aware debuggers and debuggers that rely on removing
+the static qualifier. */
+PRIVILEGED_DATA static List_t
+ pxReadyTasksLists[configMAX_PRIORITIES]; /*< Prioritised ready tasks. */
+PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
+PRIVILEGED_DATA static List_t
+ xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays
+ that have overflowed the current tick count. */
+PRIVILEGED_DATA static List_t
+ *volatile pxDelayedTaskList; /*< Points to the delayed task list currently
+ being used. */
+PRIVILEGED_DATA static List_t
+ *volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list
+ currently being used to hold tasks
+ that have overflowed the current
+ tick count. */
+PRIVILEGED_DATA static List_t
+ xPendingReadyList; /*< Tasks that have been readied while the scheduler was
+ suspended. They will be moved to the ready list when
+ the scheduler is resumed. */
+
+#if (INCLUDE_vTaskDelete == 1)
+
+PRIVILEGED_DATA static List_t
+ xTasksWaitingTermination; /*< Tasks that have been deleted - but their
+ memory not yet freed. */
+PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp =
+ (UBaseType_t)0U;
+
+#endif
+
+#if (INCLUDE_vTaskSuspend == 1)
+
+PRIVILEGED_DATA static List_t
+ xSuspendedTaskList; /*< Tasks that are currently suspended. */
+
+#endif
+
+/* Global POSIX errno. Its value is changed upon context switching to match
+the errno of the currently running task. */
+#if (configUSE_POSIX_ERRNO == 1)
+int FreeRTOS_errno = 0;
+#endif
+
+/* Other file private variables. --------------------------------*/
+PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks =
+ (UBaseType_t)0U;
+PRIVILEGED_DATA static volatile TickType_t xTickCount =
+ (TickType_t)configINITIAL_TICK_COUNT;
+PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority =
+ tskIDLE_PRIORITY;
+PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
+PRIVILEGED_DATA static volatile TickType_t xPendedTicks = (TickType_t)0U;
+PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
+PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = (BaseType_t)0;
+PRIVILEGED_DATA static UBaseType_t uxTaskNumber = (UBaseType_t)0U;
+PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime =
+ (TickType_t)0U; /* Initialised to portMAX_DELAY before the scheduler starts.
+ */
+PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle =
+ NULL; /*< Holds the handle of the idle task. The idle task is created
+ automatically when the scheduler is started. */
+
+/* Context switches are held pending while the scheduler is suspended. Also,
+interrupts must not manipulate the xStateListItem of a TCB, or any of the
+lists the xStateListItem can be referenced from, if the scheduler is suspended.
+If an interrupt needs to unblock a task while the scheduler is suspended then it
+moves the task's event list item into the xPendingReadyList, ready for the
+kernel to move the task from the pending ready list into the real ready list
+when the scheduler is unsuspended. The pending ready list itself can only be
+accessed from a critical section. */
+PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended =
+ (UBaseType_t)pdFALSE;
+
+#if (configGENERATE_RUN_TIME_STATS == 1)
+
+/* Do not move these variables to function scope as doing so prevents the
+code working with debuggers that need to remove the static qualifier. */
+PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime =
+ 0UL; /*< Holds the value of a timer/counter the last time a task was
+ switched in. */
+PRIVILEGED_DATA static uint32_t ulTotalRunTime =
+ 0UL; /*< Holds the total amount of execution time as defined by the run time
+ counter clock. */
+
+#endif
+
+/*lint -restore */
+
+/*-----------------------------------------------------------*/
+
+/* Callback function prototypes. --------------------------*/
+#if (configCHECK_FOR_STACK_OVERFLOW > 0)
+
+extern void vApplicationStackOverflowHook(TaskHandle_t xTask, char *pcTaskName);
+
+#endif
+
+#if (configUSE_TICK_HOOK > 0)
+
+extern void vApplicationTickHook(
+ void); /*lint !e526 Symbol not defined as it is an application callback. */
+
+#endif
+
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+
+extern void vApplicationGetIdleTaskMemory(
+ StaticTask_t **ppxIdleTaskTCBBuffer,
+ StackType_t **ppxIdleTaskStackBuffer,
+ uint32_t *pulIdleTaskStackSize); /*lint !e526 Symbol not defined as it is an
+ application callback. */
+
+#endif
+
+/* File private functions. --------------------------------*/
+
+/**
+ * Utility task that simply returns pdTRUE if the task referenced by xTask is
+ * currently in the Suspended state, or pdFALSE if the task referenced by xTask
+ * is in any other state.
+ */
+#if (INCLUDE_vTaskSuspend == 1)
+
+static BaseType_t prvTaskIsTaskSuspended(const TaskHandle_t xTask)
+ PRIVILEGED_FUNCTION;
+
+#endif /* INCLUDE_vTaskSuspend */
+
+/*
+ * Utility to ready all the lists used by the scheduler. This is called
+ * automatically upon the creation of the first task.
+ */
+static void prvInitialiseTaskLists(void) PRIVILEGED_FUNCTION;
+
+/*
+ * The idle task, which as all tasks is implemented as a never ending loop.
+ * The idle task is automatically created and added to the ready lists upon
+ * creation of the first user task.
+ *
+ * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
+ * language extensions. The equivalent prototype for this function is:
+ *
+ * void prvIdleTask( void *pvParameters );
+ *
+ */
+static portTASK_FUNCTION_PROTO(prvIdleTask, pvParameters);
+
+/*
+ * Utility to free all memory allocated by the scheduler to hold a TCB,
+ * including the stack pointed to by the TCB.
+ *
+ * This does not free memory allocated by the task itself (i.e. memory
+ * allocated by calls to pvPortMalloc from within the tasks application code).
+ */
+#if (INCLUDE_vTaskDelete == 1)
+
+static void prvDeleteTCB(TCB_t *pxTCB) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * Used only by the idle task. This checks to see if anything has been placed
+ * in the list of tasks waiting to be deleted. If so the task is cleaned up
+ * and its TCB deleted.
+ */
+static void prvCheckTasksWaitingTermination(void) PRIVILEGED_FUNCTION;
+
+/*
+ * The currently executing task is entering the Blocked state. Add the task to
+ * either the current or the overflow delayed task list.
+ */
+static void prvAddCurrentTaskToDelayedList(
+ TickType_t xTicksToWait,
+ const BaseType_t xCanBlockIndefinitely) PRIVILEGED_FUNCTION;
+
+/*
+ * Fills an TaskStatus_t structure with information on each task that is
+ * referenced from the pxList list (which may be a ready list, a delayed list,
+ * a suspended list, etc.).
+ *
+ * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
+ * NORMAL APPLICATION CODE.
+ */
+#if (configUSE_TRACE_FACILITY == 1)
+
+static UBaseType_t prvListTasksWithinSingleList(
+ TaskStatus_t *pxTaskStatusArray,
+ List_t *pxList,
+ eTaskState eState) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * Searches pxList for a task with name pcNameToQuery - returning a handle to
+ * the task if it is found, or NULL if the task is not found.
+ */
+#if (INCLUDE_xTaskGetHandle == 1)
+
+static TCB_t *prvSearchForNameWithinSingleList(
+ List_t *pxList,
+ const char pcNameToQuery[]) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * When a task is created, the stack of the task is filled with a known value.
+ * This function determines the 'high water mark' of the task stack by
+ * determining how much of the stack remains at the original preset value.
+ */
+#if ( \
+ (configUSE_TRACE_FACILITY == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark2 == 1))
+
+static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace(
+ const uint8_t *pucStackByte) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * Return the amount of time, in ticks, that will pass before the kernel will
+ * next move a task from the Blocked state to the Running state.
+ *
+ * This conditional compilation should use inequality to 0, not equality to 1.
+ * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
+ * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
+ * set to a value other than 1.
+ */
+#if (configUSE_TICKLESS_IDLE != 0)
+
+static TickType_t prvGetExpectedIdleTime(void) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * Set xNextTaskUnblockTime to the time at which the next Blocked state task
+ * will exit the Blocked state.
+ */
+static void prvResetNextTaskUnblockTime(void);
+
+#if ( \
+ (configUSE_TRACE_FACILITY == 1) && \
+ (configUSE_STATS_FORMATTING_FUNCTIONS > 0))
+
+/*
+ * Helper function used to pad task names with spaces when printing out
+ * human readable tables of task information.
+ */
+static char *prvWriteNameToBuffer(char *pcBuffer, const char *pcTaskName)
+ PRIVILEGED_FUNCTION;
+
+#endif
+
+/*
+ * Called after a Task_t structure has been allocated either statically or
+ * dynamically to fill in the structure's members.
+ */
+static void prvInitialiseNewTask(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const uint32_t ulStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t *const pxCreatedTask,
+ TCB_t *pxNewTCB,
+ const MemoryRegion_t *const xRegions) PRIVILEGED_FUNCTION;
+
+/*
+ * Called after a new task has been created and initialised to place the task
+ * under the control of the scheduler.
+ */
+static void prvAddNewTaskToReadyList(TCB_t *pxNewTCB) PRIVILEGED_FUNCTION;
+
+/*
+ * freertos_tasks_c_additions_init() should only be called if the user definable
+ * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
+ * called by the function.
+ */
+#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
+
+static void freertos_tasks_c_additions_init(void) PRIVILEGED_FUNCTION;
+
+#endif
+
+/*-----------------------------------------------------------*/
+
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+
+TaskHandle_t xTaskCreateStatic(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const uint32_t ulStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ StackType_t *const puxStackBuffer,
+ StaticTask_t *const pxTaskBuffer)
+{
+ TCB_t *pxNewTCB;
+ TaskHandle_t xReturn;
+
+ configASSERT(puxStackBuffer != NULL);
+ configASSERT(pxTaskBuffer != NULL);
+
+# if (configASSERT_DEFINED == 1)
+ {
+ /* Sanity check that the size of the structure used to declare a
+ variable of type StaticTask_t equals the size of the real task
+ structure. */
+ volatile size_t xSize = sizeof(StaticTask_t);
+ configASSERT(xSize == sizeof(TCB_t));
+ (void)xSize; /* Prevent lint warning when configASSERT() is not used. */
+ }
+# endif /* configASSERT_DEFINED */
+
+ if ((pxTaskBuffer != NULL) && (puxStackBuffer != NULL)) {
+ /* The memory used for the task's TCB and stack are passed into this
+ function - use them. */
+ pxNewTCB = (TCB_t *)
+ pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the
+ structures are designed to have the same alignment,
+ and the size is checked by an assert. */
+ pxNewTCB->pxStack = (StackType_t *)puxStackBuffer;
+
+# if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0) /*lint !e731 !e9029 \
+ Macro has been \
+ consolidated for \
+ readability \
+ reasons. */
+ {
+ /* Tasks can be created statically or dynamically, so note this
+ task was created statically in case the task is later deleted. */
+ pxNewTCB->ucStaticallyAllocated =
+ tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
+ }
+# endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
+
+ prvInitialiseNewTask(
+ pxTaskCode,
+ pcName,
+ ulStackDepth,
+ pvParameters,
+ uxPriority,
+ &xReturn,
+ pxNewTCB,
+ NULL);
+ prvAddNewTaskToReadyList(pxNewTCB);
+ } else {
+ xReturn = NULL;
+ }
+
+ return xReturn;
+}
+
+#endif /* SUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+#if ((portUSING_MPU_WRAPPERS == 1) && (configSUPPORT_STATIC_ALLOCATION == 1))
+
+BaseType_t xTaskCreateRestrictedStatic(
+ const TaskParameters_t *const pxTaskDefinition,
+ TaskHandle_t *pxCreatedTask)
+{
+ TCB_t *pxNewTCB;
+ BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
+
+ configASSERT(pxTaskDefinition->puxStackBuffer != NULL);
+ configASSERT(pxTaskDefinition->pxTaskBuffer != NULL);
+
+ if ((pxTaskDefinition->puxStackBuffer != NULL) &&
+ (pxTaskDefinition->pxTaskBuffer != NULL)) {
+ /* Allocate space for the TCB. Where the memory comes from depends
+ on the implementation of the port malloc function and whether or
+ not static allocation is being used. */
+ pxNewTCB = (TCB_t *)pxTaskDefinition->pxTaskBuffer;
+
+ /* Store the stack location in the TCB. */
+ pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
+
+# if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0)
+ {
+ /* Tasks can be created statically or dynamically, so note this
+ task was created statically in case the task is later deleted. */
+ pxNewTCB->ucStaticallyAllocated =
+ tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
+ }
+# endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
+
+ prvInitialiseNewTask(
+ pxTaskDefinition->pvTaskCode,
+ pxTaskDefinition->pcName,
+ (uint32_t)pxTaskDefinition->usStackDepth,
+ pxTaskDefinition->pvParameters,
+ pxTaskDefinition->uxPriority,
+ pxCreatedTask,
+ pxNewTCB,
+ pxTaskDefinition->xRegions);
+
+ prvAddNewTaskToReadyList(pxNewTCB);
+ xReturn = pdPASS;
+ }
+
+ return xReturn;
+}
+
+#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION \
+ == 1 ) */
+/*-----------------------------------------------------------*/
+
+#if ((portUSING_MPU_WRAPPERS == 1) && (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+BaseType_t xTaskCreateRestricted(
+ const TaskParameters_t *const pxTaskDefinition,
+ TaskHandle_t *pxCreatedTask)
+{
+ TCB_t *pxNewTCB;
+ BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
+
+ configASSERT(pxTaskDefinition->puxStackBuffer);
+
+ if (pxTaskDefinition->puxStackBuffer != NULL) {
+ /* Allocate space for the TCB. Where the memory comes from depends
+ on the implementation of the port malloc function and whether or
+ not static allocation is being used. */
+ pxNewTCB = (TCB_t *)pvPortMalloc(sizeof(TCB_t));
+
+ if (pxNewTCB != NULL) {
+ /* Store the stack location in the TCB. */
+ pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
+
+# if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0)
+ {
+ /* Tasks can be created statically or dynamically, so note
+ this task had a statically allocated stack in case it is
+ later deleted. The TCB was allocated dynamically. */
+ pxNewTCB->ucStaticallyAllocated =
+ tskSTATICALLY_ALLOCATED_STACK_ONLY;
+ }
+# endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
+
+ prvInitialiseNewTask(
+ pxTaskDefinition->pvTaskCode,
+ pxTaskDefinition->pcName,
+ (uint32_t)pxTaskDefinition->usStackDepth,
+ pxTaskDefinition->pvParameters,
+ pxTaskDefinition->uxPriority,
+ pxCreatedTask,
+ pxNewTCB,
+ pxTaskDefinition->xRegions);
+
+ prvAddNewTaskToReadyList(pxNewTCB);
+ xReturn = pdPASS;
+ }
+ }
+
+ return xReturn;
+}
+
+#endif /* portUSING_MPU_WRAPPERS */
+/*-----------------------------------------------------------*/
+
+#if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+
+BaseType_t xTaskCreate(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const configSTACK_DEPTH_TYPE usStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t *const pxCreatedTask)
+{
+ TCB_t *pxNewTCB;
+ BaseType_t xReturn;
+
+/* If the stack grows down then allocate the stack then the TCB so the stack
+does not grow into the TCB. Likewise if the stack grows up then allocate
+the TCB then the stack. */
+# if (portSTACK_GROWTH > 0)
+ {
+ /* Allocate space for the TCB. Where the memory comes from depends on
+ the implementation of the port malloc function and whether or not static
+ allocation is being used. */
+ pxNewTCB = (TCB_t *)pvPortMalloc(sizeof(TCB_t));
+
+ if (pxNewTCB != NULL) {
+ /* Allocate space for the stack used by the task being created.
+ The base of the stack memory stored in the TCB so the task can
+ be deleted later if required. */
+ pxNewTCB->pxStack = (StackType_t *)pvPortMalloc((
+ ((size_t)usStackDepth) *
+ sizeof(StackType_t))); /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+
+ if (pxNewTCB->pxStack == NULL) {
+ /* Could not allocate the stack. Delete the allocated TCB. */
+ vPortFree(pxNewTCB);
+ pxNewTCB = NULL;
+ }
+ }
+ }
+# else /* portSTACK_GROWTH */
+ {
+ StackType_t *pxStack;
+
+ /* Allocate space for the stack used by the task being created. */
+ pxStack = pvPortMalloc(
+ (((size_t)usStackDepth) *
+ sizeof(StackType_t))); /*lint !e9079 All values returned by
+ pvPortMalloc() have at least the
+ alignment required by the MCU's stack and
+ this allocation is the stack. */
+
+ if (pxStack != NULL) {
+ /* Allocate space for the TCB. */
+ pxNewTCB = (TCB_t *)pvPortMalloc(sizeof(
+ TCB_t)); /*lint !e9087 !e9079 All values returned by
+ pvPortMalloc() have at least the alignment required
+ by the MCU's stack, and the first member of TCB_t is
+ always a pointer to the task's stack. */
+
+ if (pxNewTCB != NULL) {
+ /* Store the stack location in the TCB. */
+ pxNewTCB->pxStack = pxStack;
+ } else {
+ /* The stack cannot be used as the TCB was not created. Free
+ it again. */
+ vPortFree(pxStack);
+ }
+ } else {
+ pxNewTCB = NULL;
+ }
+ }
+# endif /* portSTACK_GROWTH */
+
+ if (pxNewTCB != NULL) {
+# if (tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0) /*lint !e9029 !e731 \
+ Macro has been \
+ consolidated for \
+ readability \
+ reasons. */
+ {
+ /* Tasks can be created statically or dynamically, so note this
+ task was created dynamically in case it is later deleted. */
+ pxNewTCB->ucStaticallyAllocated =
+ tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
+ }
+# endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
+
+ prvInitialiseNewTask(
+ pxTaskCode,
+ pcName,
+ (uint32_t)usStackDepth,
+ pvParameters,
+ uxPriority,
+ pxCreatedTask,
+ pxNewTCB,
+ NULL);
+ prvAddNewTaskToReadyList(pxNewTCB);
+ xReturn = pdPASS;
+ } else {
+ xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
+ }
+
+ return xReturn;
+}
+
+#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+static void prvInitialiseNewTask(
+ TaskFunction_t pxTaskCode,
+ const char *const pcName, /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+ const uint32_t ulStackDepth,
+ void *const pvParameters,
+ UBaseType_t uxPriority,
+ TaskHandle_t *const pxCreatedTask,
+ TCB_t *pxNewTCB,
+ const MemoryRegion_t *const xRegions)
+{
+ StackType_t *pxTopOfStack;
+ UBaseType_t x;
+
+#if (portUSING_MPU_WRAPPERS == 1)
+ /* Should the task be created in privileged mode? */
+ BaseType_t xRunPrivileged;
+ if ((uxPriority & portPRIVILEGE_BIT) != 0U) {
+ xRunPrivileged = pdTRUE;
+ } else {
+ xRunPrivileged = pdFALSE;
+ }
+ uxPriority &= ~portPRIVILEGE_BIT;
+#endif /* portUSING_MPU_WRAPPERS == 1 */
+
+/* Avoid dependency on memset() if it is not required. */
+#if (tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1)
+ {
+ /* Fill the stack with a known value to assist debugging. */
+ (void)memset(
+ pxNewTCB->pxStack,
+ (int)tskSTACK_FILL_BYTE,
+ (size_t)ulStackDepth * sizeof(StackType_t));
+ }
+#endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
+
+/* Calculate the top of stack address. This depends on whether the stack
+grows from high memory to low (as per the 80x86) or vice versa.
+portSTACK_GROWTH is used to make the result positive or negative as required
+by the port. */
+#if (portSTACK_GROWTH < 0)
+ {
+ pxTopOfStack = &(pxNewTCB->pxStack[ulStackDepth - (uint32_t)1]);
+ pxTopOfStack =
+ (StackType_t
+ *)(((portPOINTER_SIZE_TYPE)pxTopOfStack) & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK))); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
+
+ /* Check the alignment of the calculated top of stack is correct. */
+ configASSERT(
+ (((portPOINTER_SIZE_TYPE)pxTopOfStack &
+ (portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK) == 0UL));
+
+# if (configRECORD_STACK_HIGH_ADDRESS == 1)
+ {
+ /* Also record the stack's high address, which may assist
+ debugging. */
+ pxNewTCB->pxEndOfStack = pxTopOfStack;
+ }
+# endif /* configRECORD_STACK_HIGH_ADDRESS */
+ }
+#else /* portSTACK_GROWTH */
+ {
+ pxTopOfStack = pxNewTCB->pxStack;
+
+ /* Check the alignment of the stack buffer is correct. */
+ configASSERT(
+ (((portPOINTER_SIZE_TYPE)pxNewTCB->pxStack &
+ (portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK) == 0UL));
+
+ /* The other extreme of the stack space is required if stack checking is
+ performed. */
+ pxNewTCB->pxEndOfStack =
+ pxNewTCB->pxStack + (ulStackDepth - (uint32_t)1);
+ }
+#endif /* portSTACK_GROWTH */
+
+ /* Store the task name in the TCB. */
+ if (pcName != NULL) {
+ for (x = (UBaseType_t)0; x < (UBaseType_t)configMAX_TASK_NAME_LEN;
+ x++) {
+ pxNewTCB->pcTaskName[x] = pcName[x];
+
+ /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter
+ than configMAX_TASK_NAME_LEN characters just in case the memory
+ after the string is not accessible (extremely unlikely). */
+ if (pcName[x] == (char)0x00) {
+ break;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ /* Ensure the name string is terminated in the case that the string
+ length was greater or equal to configMAX_TASK_NAME_LEN. */
+ pxNewTCB->pcTaskName[configMAX_TASK_NAME_LEN - 1] = '\0';
+ } else {
+ /* The task has not been given a name, so just ensure there is a NULL
+ terminator when it is read out. */
+ pxNewTCB->pcTaskName[0] = 0x00;
+ }
+
+ /* This is used as an array index so must ensure it's not too large. First
+ remove the privilege bit if one is present. */
+ if (uxPriority >= (UBaseType_t)configMAX_PRIORITIES) {
+ uxPriority = (UBaseType_t)configMAX_PRIORITIES - (UBaseType_t)1U;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ pxNewTCB->uxPriority = uxPriority;
+#if (configUSE_MUTEXES == 1)
+ {
+ pxNewTCB->uxBasePriority = uxPriority;
+ pxNewTCB->uxMutexesHeld = 0;
+ }
+#endif /* configUSE_MUTEXES */
+
+ vListInitialiseItem(&(pxNewTCB->xStateListItem));
+ vListInitialiseItem(&(pxNewTCB->xEventListItem));
+
+ /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can
+ get back to the containing TCB from a generic item in a list. */
+ listSET_LIST_ITEM_OWNER(&(pxNewTCB->xStateListItem), pxNewTCB);
+
+ /* Event lists are always in priority order. */
+ listSET_LIST_ITEM_VALUE(
+ &(pxNewTCB->xEventListItem),
+ (TickType_t)configMAX_PRIORITIES -
+ (TickType_t)uxPriority); /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+ listSET_LIST_ITEM_OWNER(&(pxNewTCB->xEventListItem), pxNewTCB);
+
+#if (portCRITICAL_NESTING_IN_TCB == 1)
+ {
+ pxNewTCB->uxCriticalNesting = (UBaseType_t)0U;
+ }
+#endif /* portCRITICAL_NESTING_IN_TCB */
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+ {
+ pxNewTCB->pxTaskTag = NULL;
+ }
+#endif /* configUSE_APPLICATION_TASK_TAG */
+
+#if (configGENERATE_RUN_TIME_STATS == 1)
+ {
+ pxNewTCB->ulRunTimeCounter = 0UL;
+ }
+#endif /* configGENERATE_RUN_TIME_STATS */
+
+#if (portUSING_MPU_WRAPPERS == 1)
+ {
+ vPortStoreTaskMPUSettings(
+ &(pxNewTCB->xMPUSettings),
+ xRegions,
+ pxNewTCB->pxStack,
+ ulStackDepth);
+ }
+#else
+ {
+ /* Avoid compiler warning about unreferenced parameter. */
+ (void)xRegions;
+ }
+#endif
+
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0)
+ {
+ for (x = 0; x < (UBaseType_t)configNUM_THREAD_LOCAL_STORAGE_POINTERS;
+ x++) {
+ pxNewTCB->pvThreadLocalStoragePointers[x] = NULL;
+ }
+ }
+#endif
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+ {
+ pxNewTCB->ulNotifiedValue = 0;
+ pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
+ }
+#endif
+
+#if (configUSE_NEWLIB_REENTRANT == 1)
+ {
+ /* Initialise this task's Newlib reent structure.
+ See the third party link
+ http://www.nadler.com/embedded/newlibAndFreeRTOS.html for additional
+ information. */
+ _REENT_INIT_PTR((&(pxNewTCB->xNewLib_reent)));
+ }
+#endif
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+ {
+ pxNewTCB->ucDelayAborted = pdFALSE;
+ }
+#endif
+
+/* Initialize the TCB stack to look as if the task was already running,
+but had been interrupted by the scheduler. The return address is set
+to the start of the task function. Once the stack has been initialised
+the top of stack variable is updated. */
+#if (portUSING_MPU_WRAPPERS == 1)
+ {
+/* If the port has capability to detect stack overflow,
+pass the stack end address to the stack initialization
+function as well. */
+# if (portHAS_STACK_OVERFLOW_CHECKING == 1)
+ {
+# if (portSTACK_GROWTH < 0)
+ {
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack(
+ pxTopOfStack,
+ pxNewTCB->pxStack,
+ pxTaskCode,
+ pvParameters,
+ xRunPrivileged);
+ }
+# else /* portSTACK_GROWTH */
+ {
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack(
+ pxTopOfStack,
+ pxNewTCB->pxEndOfStack,
+ pxTaskCode,
+ pvParameters,
+ xRunPrivileged);
+ }
+# endif /* portSTACK_GROWTH */
+ }
+# else /* portHAS_STACK_OVERFLOW_CHECKING */
+ {
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack(
+ pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged);
+ }
+# endif /* portHAS_STACK_OVERFLOW_CHECKING */
+ }
+#else /* portUSING_MPU_WRAPPERS */
+ {
+/* If the port has capability to detect stack overflow,
+pass the stack end address to the stack initialization
+function as well. */
+# if (portHAS_STACK_OVERFLOW_CHECKING == 1)
+ {
+# if (portSTACK_GROWTH < 0)
+ {
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack(
+ pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters);
+ }
+# else /* portSTACK_GROWTH */
+ {
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack(
+ pxTopOfStack,
+ pxNewTCB->pxEndOfStack,
+ pxTaskCode,
+ pvParameters);
+ }
+# endif /* portSTACK_GROWTH */
+ }
+# else /* portHAS_STACK_OVERFLOW_CHECKING */
+ {
+ pxNewTCB->pxTopOfStack =
+ pxPortInitialiseStack(pxTopOfStack, pxTaskCode, pvParameters);
+ }
+# endif /* portHAS_STACK_OVERFLOW_CHECKING */
+ }
+#endif /* portUSING_MPU_WRAPPERS */
+
+ if (pxCreatedTask != NULL) {
+ /* Pass the handle out in an anonymous way. The handle can be used to
+ change the created task's priority, delete the created task, etc.*/
+ *pxCreatedTask = (TaskHandle_t)pxNewTCB;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+static void prvAddNewTaskToReadyList(TCB_t *pxNewTCB)
+{
+ /* Ensure interrupts don't access the task lists while the lists are being
+ updated. */
+ taskENTER_CRITICAL();
+ {
+ uxCurrentNumberOfTasks++;
+ if (pxCurrentTCB == NULL) {
+ /* There are no other tasks, or all the other tasks are in
+ the suspended state - make this the current task. */
+ pxCurrentTCB = pxNewTCB;
+
+ if (uxCurrentNumberOfTasks == (UBaseType_t)1) {
+ /* This is the first task to be created so do the preliminary
+ initialisation required. We will not recover if this call
+ fails, but we will report the failure. */
+ prvInitialiseTaskLists();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* If the scheduler is not already running, make this task the
+ current task if it is the highest priority task to be created
+ so far. */
+ if (xSchedulerRunning == pdFALSE) {
+ if (pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority) {
+ pxCurrentTCB = pxNewTCB;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ uxTaskNumber++;
+
+#if (configUSE_TRACE_FACILITY == 1)
+ {
+ /* Add a counter into the TCB for tracing only. */
+ pxNewTCB->uxTCBNumber = uxTaskNumber;
+ }
+#endif /* configUSE_TRACE_FACILITY */
+ traceTASK_CREATE(pxNewTCB);
+
+ prvAddTaskToReadyList(pxNewTCB);
+
+ portSETUP_TCB(pxNewTCB);
+ }
+ taskEXIT_CRITICAL();
+
+ if (xSchedulerRunning != pdFALSE) {
+ /* If the created task is of a higher priority than the current task
+ then it should run now. */
+ if (pxCurrentTCB->uxPriority < pxNewTCB->uxPriority) {
+ taskYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskDelete == 1)
+
+void vTaskDelete(TaskHandle_t xTaskToDelete)
+{
+ TCB_t *pxTCB;
+
+ taskENTER_CRITICAL();
+ {
+ /* If null is passed in here then it is the calling task that is
+ being deleted. */
+ pxTCB = prvGetTCBFromHandle(xTaskToDelete);
+
+ /* Remove task from the ready/delayed list. */
+ if (uxListRemove(&(pxTCB->xStateListItem)) == (UBaseType_t)0) {
+ taskRESET_READY_PRIORITY(pxTCB->uxPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Is the task waiting on an event also? */
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) != NULL) {
+ (void)uxListRemove(&(pxTCB->xEventListItem));
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Increment the uxTaskNumber also so kernel aware debuggers can
+ detect that the task lists need re-generating. This is done before
+ portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
+ not return. */
+ uxTaskNumber++;
+
+ if (pxTCB == pxCurrentTCB) {
+ /* A task is deleting itself. This cannot complete within the
+ task itself, as a context switch to another task is required.
+ Place the task in the termination list. The idle task will
+ check the termination list and free up any memory allocated by
+ the scheduler for the TCB and stack of the deleted task. */
+ vListInsertEnd(&xTasksWaitingTermination, &(pxTCB->xStateListItem));
+
+ /* Increment the ucTasksDeleted variable so the idle task knows
+ there is a task that has been deleted and that it should therefore
+ check the xTasksWaitingTermination list. */
+ ++uxDeletedTasksWaitingCleanUp;
+
+ /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
+ portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
+ traceTASK_DELETE(pxTCB);
+
+ /* The pre-delete hook is primarily for the Windows simulator,
+ in which Windows specific clean up operations are performed,
+ after which it is not possible to yield away from this task -
+ hence xYieldPending is used to latch that a context switch is
+ required. */
+ portPRE_TASK_DELETE_HOOK(pxTCB, &xYieldPending);
+ } else {
+ --uxCurrentNumberOfTasks;
+ traceTASK_DELETE(pxTCB);
+ prvDeleteTCB(pxTCB);
+
+ /* Reset the next expected unblock time in case it referred to
+ the task that has just been deleted. */
+ prvResetNextTaskUnblockTime();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Force a reschedule if it is the currently running task that has just
+ been deleted. */
+ if (xSchedulerRunning != pdFALSE) {
+ if (pxTCB == pxCurrentTCB) {
+ configASSERT(uxSchedulerSuspended == 0);
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+}
+
+#endif /* INCLUDE_vTaskDelete */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskDelayUntil == 1)
+
+void vTaskDelayUntil(
+ TickType_t *const pxPreviousWakeTime,
+ const TickType_t xTimeIncrement)
+{
+ TickType_t xTimeToWake;
+ BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
+
+ configASSERT(pxPreviousWakeTime);
+ configASSERT((xTimeIncrement > 0U));
+ configASSERT(uxSchedulerSuspended == 0);
+
+ vTaskSuspendAll();
+ {
+ /* Minor optimisation. The tick count cannot change in this
+ block. */
+ const TickType_t xConstTickCount = xTickCount;
+
+ /* Generate the tick time at which the task wants to wake. */
+ xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
+
+ if (xConstTickCount < *pxPreviousWakeTime) {
+ /* The tick count has overflowed since this function was
+ lasted called. In this case the only time we should ever
+ actually delay is if the wake time has also overflowed,
+ and the wake time is greater than the tick time. When this
+ is the case it is as if neither time had overflowed. */
+ if ((xTimeToWake < *pxPreviousWakeTime) &&
+ (xTimeToWake > xConstTickCount)) {
+ xShouldDelay = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* The tick time has not overflowed. In this case we will
+ delay if either the wake time has overflowed, and/or the
+ tick time is less than the wake time. */
+ if ((xTimeToWake < *pxPreviousWakeTime) ||
+ (xTimeToWake > xConstTickCount)) {
+ xShouldDelay = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ /* Update the wake time ready for the next call. */
+ *pxPreviousWakeTime = xTimeToWake;
+
+ if (xShouldDelay != pdFALSE) {
+ traceTASK_DELAY_UNTIL(xTimeToWake);
+
+ /* prvAddCurrentTaskToDelayedList() needs the block time, not
+ the time to wake, so subtract the current tick count. */
+ prvAddCurrentTaskToDelayedList(
+ xTimeToWake - xConstTickCount, pdFALSE);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ xAlreadyYielded = xTaskResumeAll();
+
+ /* Force a reschedule if xTaskResumeAll has not already done so, we may
+ have put ourselves to sleep. */
+ if (xAlreadyYielded == pdFALSE) {
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* INCLUDE_vTaskDelayUntil */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskDelay == 1)
+
+void vTaskDelay(const TickType_t xTicksToDelay)
+{
+ BaseType_t xAlreadyYielded = pdFALSE;
+
+ /* A delay time of zero just forces a reschedule. */
+ if (xTicksToDelay > (TickType_t)0U) {
+ configASSERT(uxSchedulerSuspended == 0);
+ vTaskSuspendAll();
+ {
+ traceTASK_DELAY();
+
+ /* A task that is removed from the event list while the
+ scheduler is suspended will not get placed in the ready
+ list or removed from the blocked list until the scheduler
+ is resumed.
+
+ This task cannot be in an event list as it is the currently
+ executing task. */
+ prvAddCurrentTaskToDelayedList(xTicksToDelay, pdFALSE);
+ }
+ xAlreadyYielded = xTaskResumeAll();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Force a reschedule if xTaskResumeAll has not already done so, we may
+ have put ourselves to sleep. */
+ if (xAlreadyYielded == pdFALSE) {
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* INCLUDE_vTaskDelay */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (INCLUDE_eTaskGetState == 1) || (configUSE_TRACE_FACILITY == 1) || \
+ (INCLUDE_xTaskAbortDelay == 1))
+
+eTaskState eTaskGetState(TaskHandle_t xTask)
+{
+ eTaskState eReturn;
+ List_t const *pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
+ const TCB_t *const pxTCB = xTask;
+
+ configASSERT(pxTCB);
+
+ if (pxTCB == pxCurrentTCB) {
+ /* The task calling this function is querying its own state. */
+ eReturn = eRunning;
+ } else {
+ taskENTER_CRITICAL();
+ {
+ pxStateList = listLIST_ITEM_CONTAINER(&(pxTCB->xStateListItem));
+ pxDelayedList = pxDelayedTaskList;
+ pxOverflowedDelayedList = pxOverflowDelayedTaskList;
+ }
+ taskEXIT_CRITICAL();
+
+ if ((pxStateList == pxDelayedList) ||
+ (pxStateList == pxOverflowedDelayedList)) {
+ /* The task being queried is referenced from one of the Blocked
+ lists. */
+ eReturn = eBlocked;
+ }
+# if (INCLUDE_vTaskSuspend == 1)
+ else if (pxStateList == &xSuspendedTaskList) {
+ /* The task being queried is referenced from the suspended
+ list. Is it genuinely suspended or is it blocked
+ indefinitely? */
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) == NULL) {
+# if (configUSE_TASK_NOTIFICATIONS == 1)
+ {
+ /* The task does not appear on the event list item of
+ and of the RTOS objects, but could still be in the
+ blocked state if it is waiting on its notification
+ rather than waiting on an object. */
+ if (pxTCB->ucNotifyState == taskWAITING_NOTIFICATION) {
+ eReturn = eBlocked;
+ } else {
+ eReturn = eSuspended;
+ }
+ }
+# else
+ {
+ eReturn = eSuspended;
+ }
+# endif
+ } else {
+ eReturn = eBlocked;
+ }
+ }
+# endif
+
+# if (INCLUDE_vTaskDelete == 1)
+ else if (
+ (pxStateList == &xTasksWaitingTermination) ||
+ (pxStateList == NULL)) {
+ /* The task being queried is referenced from the deleted
+ tasks list, or it is not referenced from any lists at
+ all. */
+ eReturn = eDeleted;
+ }
+# endif
+
+ else /*lint !e525 Negative indentation is intended to make use of
+ pre-processor clearer. */
+ {
+ /* If the task is not in any other state, it must be in the
+ Ready (including pending ready) state. */
+ eReturn = eReady;
+ }
+ }
+
+ return eReturn;
+} /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
+
+#endif /* INCLUDE_eTaskGetState */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_uxTaskPriorityGet == 1)
+
+UBaseType_t uxTaskPriorityGet(const TaskHandle_t xTask)
+{
+ TCB_t const *pxTCB;
+ UBaseType_t uxReturn;
+
+ taskENTER_CRITICAL();
+ {
+ /* If null is passed in here then it is the priority of the task
+ that called uxTaskPriorityGet() that is being queried. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+ uxReturn = pxTCB->uxPriority;
+ }
+ taskEXIT_CRITICAL();
+
+ return uxReturn;
+}
+
+#endif /* INCLUDE_uxTaskPriorityGet */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_uxTaskPriorityGet == 1)
+
+UBaseType_t uxTaskPriorityGetFromISR(const TaskHandle_t xTask)
+{
+ TCB_t const *pxTCB;
+ UBaseType_t uxReturn, uxSavedInterruptState;
+
+ /* RTOS ports that support interrupt nesting have the concept of a
+ maximum system call (or maximum API call) interrupt priority.
+ Interrupts that are above the maximum system call priority are keep
+ permanently enabled, even when the RTOS kernel is in a critical section,
+ but cannot make any calls to FreeRTOS API functions. If configASSERT()
+ is defined in FreeRTOSConfig.h then
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has
+ been assigned a priority above the configured maximum system call
+ priority. Only FreeRTOS functions that end in FromISR can be called
+ from interrupts that have been assigned a priority at or (logically)
+ below the maximum system call interrupt priority. FreeRTOS maintains a
+ separate interrupt safe API to ensure interrupt entry is as fast and as
+ simple as possible. More information (albeit Cortex-M specific) is
+ provided on the following link:
+ https://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* If null is passed in here then it is the priority of the calling
+ task that is being queried. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+ uxReturn = pxTCB->uxPriority;
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptState);
+
+ return uxReturn;
+}
+
+#endif /* INCLUDE_uxTaskPriorityGet */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskPrioritySet == 1)
+
+void vTaskPrioritySet(TaskHandle_t xTask, UBaseType_t uxNewPriority)
+{
+ TCB_t *pxTCB;
+ UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
+ BaseType_t xYieldRequired = pdFALSE;
+
+ configASSERT((uxNewPriority < configMAX_PRIORITIES));
+
+ /* Ensure the new priority is valid. */
+ if (uxNewPriority >= (UBaseType_t)configMAX_PRIORITIES) {
+ uxNewPriority = (UBaseType_t)configMAX_PRIORITIES - (UBaseType_t)1U;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ taskENTER_CRITICAL();
+ {
+ /* If null is passed in here then it is the priority of the calling
+ task that is being changed. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ traceTASK_PRIORITY_SET(pxTCB, uxNewPriority);
+
+# if (configUSE_MUTEXES == 1)
+ {
+ uxCurrentBasePriority = pxTCB->uxBasePriority;
+ }
+# else
+ {
+ uxCurrentBasePriority = pxTCB->uxPriority;
+ }
+# endif
+
+ if (uxCurrentBasePriority != uxNewPriority) {
+ /* The priority change may have readied a task of higher
+ priority than the calling task. */
+ if (uxNewPriority > uxCurrentBasePriority) {
+ if (pxTCB != pxCurrentTCB) {
+ /* The priority of a task other than the currently
+ running task is being raised. Is the priority being
+ raised above that of the running task? */
+ if (uxNewPriority >= pxCurrentTCB->uxPriority) {
+ xYieldRequired = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ /* The priority of the running task is being raised,
+ but the running task must already be the highest
+ priority task able to run so no yield is required. */
+ }
+ } else if (pxTCB == pxCurrentTCB) {
+ /* Setting the priority of the running task down means
+ there may now be another task of higher priority that
+ is ready to execute. */
+ xYieldRequired = pdTRUE;
+ } else {
+ /* Setting the priority of any other task down does not
+ require a yield as the running task must be above the
+ new priority of the task being modified. */
+ }
+
+ /* Remember the ready list the task might be referenced from
+ before its uxPriority member is changed so the
+ taskRESET_READY_PRIORITY() macro can function correctly. */
+ uxPriorityUsedOnEntry = pxTCB->uxPriority;
+
+# if (configUSE_MUTEXES == 1)
+ {
+ /* Only change the priority being used if the task is not
+ currently using an inherited priority. */
+ if (pxTCB->uxBasePriority == pxTCB->uxPriority) {
+ pxTCB->uxPriority = uxNewPriority;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* The base priority gets set whatever. */
+ pxTCB->uxBasePriority = uxNewPriority;
+ }
+# else
+ {
+ pxTCB->uxPriority = uxNewPriority;
+ }
+# endif
+
+ /* Only reset the event list item value if the value is not
+ being used for anything else. */
+ if ((listGET_LIST_ITEM_VALUE(&(pxTCB->xEventListItem)) &
+ taskEVENT_LIST_ITEM_VALUE_IN_USE) == 0UL) {
+ listSET_LIST_ITEM_VALUE(
+ &(pxTCB->xEventListItem),
+ ((TickType_t)configMAX_PRIORITIES -
+ (TickType_t)
+ uxNewPriority)); /*lint !e961 MISRA exception as the
+ casts are only redundant for some
+ ports. */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* If the task is in the blocked or suspended list we need do
+ nothing more than change its priority variable. However, if
+ the task is in a ready list it needs to be removed and placed
+ in the list appropriate to its new priority. */
+ if (listIS_CONTAINED_WITHIN(
+ &(pxReadyTasksLists[uxPriorityUsedOnEntry]),
+ &(pxTCB->xStateListItem)) != pdFALSE) {
+ /* The task is currently in its ready list - remove before
+ adding it to it's new ready list. As we are in a critical
+ section we can do this even if the scheduler is suspended. */
+ if (uxListRemove(&(pxTCB->xStateListItem)) == (UBaseType_t)0) {
+ /* It is known that the task is in its ready list so
+ there is no need to check again and the port level
+ reset macro can be called directly. */
+ portRESET_READY_PRIORITY(
+ uxPriorityUsedOnEntry, uxTopReadyPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ prvAddTaskToReadyList(pxTCB);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if (xYieldRequired != pdFALSE) {
+ taskYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Remove compiler warning about unused variables when the port
+ optimised task selection is not being used. */
+ (void)uxPriorityUsedOnEntry;
+ }
+ }
+ taskEXIT_CRITICAL();
+}
+
+#endif /* INCLUDE_vTaskPrioritySet */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskSuspend == 1)
+
+void vTaskSuspend(TaskHandle_t xTaskToSuspend)
+{
+ TCB_t *pxTCB;
+
+ taskENTER_CRITICAL();
+ {
+ /* If null is passed in here then it is the running task that is
+ being suspended. */
+ pxTCB = prvGetTCBFromHandle(xTaskToSuspend);
+
+ traceTASK_SUSPEND(pxTCB);
+
+ /* Remove task from the ready/delayed list and place in the
+ suspended list. */
+ if (uxListRemove(&(pxTCB->xStateListItem)) == (UBaseType_t)0) {
+ taskRESET_READY_PRIORITY(pxTCB->uxPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Is the task waiting on an event also? */
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) != NULL) {
+ (void)uxListRemove(&(pxTCB->xEventListItem));
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ vListInsertEnd(&xSuspendedTaskList, &(pxTCB->xStateListItem));
+
+# if (configUSE_TASK_NOTIFICATIONS == 1)
+ {
+ if (pxTCB->ucNotifyState == taskWAITING_NOTIFICATION) {
+ /* The task was blocked to wait for a notification, but is
+ now suspended, so no notification was received. */
+ pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
+ }
+ }
+# endif
+ }
+ taskEXIT_CRITICAL();
+
+ if (xSchedulerRunning != pdFALSE) {
+ /* Reset the next expected unblock time in case it referred to the
+ task that is now in the Suspended state. */
+ taskENTER_CRITICAL();
+ {
+ prvResetNextTaskUnblockTime();
+ }
+ taskEXIT_CRITICAL();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if (pxTCB == pxCurrentTCB) {
+ if (xSchedulerRunning != pdFALSE) {
+ /* The current task has just been suspended. */
+ configASSERT(uxSchedulerSuspended == 0);
+ portYIELD_WITHIN_API();
+ } else {
+ /* The scheduler is not running, but the task that was pointed
+ to by pxCurrentTCB has just been suspended and pxCurrentTCB
+ must be adjusted to point to a different task. */
+ if (listCURRENT_LIST_LENGTH(&xSuspendedTaskList) ==
+ uxCurrentNumberOfTasks) /*lint !e931 Right has no side effect,
+ just volatile. */
+ {
+ /* No other tasks are ready, so set pxCurrentTCB back to
+ NULL so when the next task is created pxCurrentTCB will
+ be set to point to it no matter what its relative priority
+ is. */
+ pxCurrentTCB = NULL;
+ } else {
+ vTaskSwitchContext();
+ }
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* INCLUDE_vTaskSuspend */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskSuspend == 1)
+
+static BaseType_t prvTaskIsTaskSuspended(const TaskHandle_t xTask)
+{
+ BaseType_t xReturn = pdFALSE;
+ const TCB_t *const pxTCB = xTask;
+
+ /* Accesses xPendingReadyList so must be called from a critical
+ section. */
+
+ /* It does not make sense to check if the calling task is suspended. */
+ configASSERT(xTask);
+
+ /* Is the task being resumed actually in the suspended list? */
+ if (listIS_CONTAINED_WITHIN(
+ &xSuspendedTaskList, &(pxTCB->xStateListItem)) != pdFALSE) {
+ /* Has the task already been resumed from within an ISR? */
+ if (listIS_CONTAINED_WITHIN(
+ &xPendingReadyList, &(pxTCB->xEventListItem)) == pdFALSE) {
+ /* Is it in the suspended list because it is in the Suspended
+ state, or because is is blocked with no timeout? */
+ if (listIS_CONTAINED_WITHIN(NULL, &(pxTCB->xEventListItem)) !=
+ pdFALSE) /*lint !e961. The cast is only redundant when NULL is
+ used. */
+ {
+ xReturn = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xReturn;
+} /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
+
+#endif /* INCLUDE_vTaskSuspend */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskSuspend == 1)
+
+void vTaskResume(TaskHandle_t xTaskToResume)
+{
+ TCB_t *const pxTCB = xTaskToResume;
+
+ /* It does not make sense to resume the calling task. */
+ configASSERT(xTaskToResume);
+
+ /* The parameter cannot be NULL as it is impossible to resume the
+ currently executing task. */
+ if ((pxTCB != pxCurrentTCB) && (pxTCB != NULL)) {
+ taskENTER_CRITICAL();
+ {
+ if (prvTaskIsTaskSuspended(pxTCB) != pdFALSE) {
+ traceTASK_RESUME(pxTCB);
+
+ /* The ready list can be accessed even if the scheduler is
+ suspended because this is inside a critical section. */
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+
+ /* A higher priority task may have just been resumed. */
+ if (pxTCB->uxPriority >= pxCurrentTCB->uxPriority) {
+ /* This yield may not cause the task just resumed to run,
+ but will leave the lists in the correct state for the
+ next yield. */
+ taskYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* INCLUDE_vTaskSuspend */
+
+/*-----------------------------------------------------------*/
+
+#if ((INCLUDE_xTaskResumeFromISR == 1) && (INCLUDE_vTaskSuspend == 1))
+
+BaseType_t xTaskResumeFromISR(TaskHandle_t xTaskToResume)
+{
+ BaseType_t xYieldRequired = pdFALSE;
+ TCB_t *const pxTCB = xTaskToResume;
+ UBaseType_t uxSavedInterruptStatus;
+
+ configASSERT(xTaskToResume);
+
+ /* RTOS ports that support interrupt nesting have the concept of a
+ maximum system call (or maximum API call) interrupt priority.
+ Interrupts that are above the maximum system call priority are keep
+ permanently enabled, even when the RTOS kernel is in a critical section,
+ but cannot make any calls to FreeRTOS API functions. If configASSERT()
+ is defined in FreeRTOSConfig.h then
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has
+ been assigned a priority above the configured maximum system call
+ priority. Only FreeRTOS functions that end in FromISR can be called
+ from interrupts that have been assigned a priority at or (logically)
+ below the maximum system call interrupt priority. FreeRTOS maintains a
+ separate interrupt safe API to ensure interrupt entry is as fast and as
+ simple as possible. More information (albeit Cortex-M specific) is
+ provided on the following link:
+ https://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ if (prvTaskIsTaskSuspended(pxTCB) != pdFALSE) {
+ traceTASK_RESUME_FROM_ISR(pxTCB);
+
+ /* Check the ready lists can be accessed. */
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ /* Ready lists can be accessed so move the task from the
+ suspended list to the ready list directly. */
+ if (pxTCB->uxPriority >= pxCurrentTCB->uxPriority) {
+ xYieldRequired = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+ } else {
+ /* The delayed or ready lists cannot be accessed so the task
+ is held in the pending ready list until the scheduler is
+ unsuspended. */
+ vListInsertEnd(&(xPendingReadyList), &(pxTCB->xEventListItem));
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xYieldRequired;
+}
+
+#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 \
+ ) ) */
+/*-----------------------------------------------------------*/
+
+void vTaskStartScheduler(void)
+{
+ BaseType_t xReturn;
+
+/* Add the idle task at the lowest priority. */
+#if (configSUPPORT_STATIC_ALLOCATION == 1)
+ {
+ StaticTask_t *pxIdleTaskTCBBuffer = NULL;
+ StackType_t *pxIdleTaskStackBuffer = NULL;
+ uint32_t ulIdleTaskStackSize;
+
+ /* The Idle task is created using user provided RAM - obtain the
+ address of the RAM then create the idle task. */
+ vApplicationGetIdleTaskMemory(
+ &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize);
+ xIdleTaskHandle = xTaskCreateStatic(
+ prvIdleTask,
+ configIDLE_TASK_NAME,
+ ulIdleTaskStackSize,
+ (void *)NULL, /*lint !e961. The cast is not redundant for all
+ compilers. */
+ portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY |
+ portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is
+ zero. */
+ pxIdleTaskStackBuffer,
+ pxIdleTaskTCBBuffer); /*lint !e961 MISRA exception, justified as it
+ is not a redundant explicit cast to all
+ supported compilers. */
+
+ if (xIdleTaskHandle != NULL) {
+ xReturn = pdPASS;
+ } else {
+ xReturn = pdFAIL;
+ }
+ }
+#else
+ {
+ /* The Idle task is being created using dynamically allocated RAM. */
+ xReturn = xTaskCreate(
+ prvIdleTask,
+ configIDLE_TASK_NAME,
+ configMINIMAL_STACK_SIZE,
+ (void *)NULL,
+ portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY |
+ portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is
+ zero. */
+ &xIdleTaskHandle); /*lint !e961 MISRA exception, justified as it is
+ not a redundant explicit cast to all supported
+ compilers. */
+ }
+#endif /* configSUPPORT_STATIC_ALLOCATION */
+
+#if (configUSE_TIMERS == 1)
+ {
+ if (xReturn == pdPASS) {
+ xReturn = xTimerCreateTimerTask();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_TIMERS */
+
+ if (xReturn == pdPASS) {
+/* freertos_tasks_c_additions_init() should only be called if the user
+definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
+the only macro called by the function. */
+#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
+ {
+ freertos_tasks_c_additions_init();
+ }
+#endif
+
+ /* Interrupts are turned off here, to ensure a tick does not occur
+ before or during the call to xPortStartScheduler(). The stacks of
+ the created tasks contain a status word with interrupts switched on
+ so interrupts will automatically get re-enabled when the first task
+ starts to run. */
+ portDISABLE_INTERRUPTS();
+
+#if (configUSE_NEWLIB_REENTRANT == 1)
+ {
+ /* Switch Newlib's _impure_ptr variable to point to the _reent
+ structure specific to the task that will run first.
+ See the third party link
+ http://www.nadler.com/embedded/newlibAndFreeRTOS.html
+ for additional information. */
+ _impure_ptr = &(pxCurrentTCB->xNewLib_reent);
+ }
+#endif /* configUSE_NEWLIB_REENTRANT */
+
+ xNextTaskUnblockTime = portMAX_DELAY;
+ xSchedulerRunning = pdTRUE;
+ xTickCount = (TickType_t)configINITIAL_TICK_COUNT;
+
+ /* If configGENERATE_RUN_TIME_STATS is defined then the following
+ macro must be defined to configure the timer/counter used to generate
+ the run time counter time base. NOTE: If
+ configGENERATE_RUN_TIME_STATS is set to 0 and the following line fails
+ to build then ensure you do not have
+ portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
+ FreeRTOSConfig.h file. */
+ portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
+
+ traceTASK_SWITCHED_IN();
+
+ /* Setting up the timer tick is hardware specific and thus in the
+ portable interface. */
+ if (xPortStartScheduler() != pdFALSE) {
+ /* Should not reach here as if the scheduler is running the
+ function will not return. */
+ } else {
+ /* Should only reach here if a task calls xTaskEndScheduler(). */
+ }
+ } else {
+ /* This line will only be reached if the kernel could not be started,
+ because there was not enough FreeRTOS heap to create the idle task
+ or the timer task. */
+ configASSERT(xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY);
+ }
+
+ /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
+ meaning xIdleTaskHandle is not used anywhere else. */
+ (void)xIdleTaskHandle;
+}
+/*-----------------------------------------------------------*/
+
+void vTaskEndScheduler(void)
+{
+ /* Stop the scheduler interrupts and call the portable scheduler end
+ routine so the original ISRs can be restored if necessary. The port
+ layer must ensure interrupts enable bit is left in the correct state. */
+ portDISABLE_INTERRUPTS();
+ xSchedulerRunning = pdFALSE;
+ vPortEndScheduler();
+}
+/*----------------------------------------------------------*/
+
+void vTaskSuspendAll(void)
+{
+ /* A critical section is not required as the variable is of type
+ BaseType_t. Please read Richard Barry's reply in the following link to a
+ post in the FreeRTOS support forum before reporting this as a bug! -
+ http://goo.gl/wu4acr */
+
+ /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports
+ that do not otherwise exhibit real time behaviour. */
+ portSOFTWARE_BARRIER();
+
+ /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An
+ increment is used to allow calls to vTaskSuspendAll() to nest. */
+ ++uxSchedulerSuspended;
+
+ /* Enforces ordering for ports and optimised compilers that may otherwise
+ place the above increment elsewhere. */
+ portMEMORY_BARRIER();
+}
+/*----------------------------------------------------------*/
+
+#if (configUSE_TICKLESS_IDLE != 0)
+
+static TickType_t prvGetExpectedIdleTime(void)
+{
+ TickType_t xReturn;
+ UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
+
+/* uxHigherPriorityReadyTasks takes care of the case where
+configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
+task that are in the Ready state, even though the idle task is
+running. */
+# if (configUSE_PORT_OPTIMISED_TASK_SELECTION == 0)
+ {
+ if (uxTopReadyPriority > tskIDLE_PRIORITY) {
+ uxHigherPriorityReadyTasks = pdTRUE;
+ }
+ }
+# else
+ {
+ const UBaseType_t uxLeastSignificantBit = (UBaseType_t)0x01;
+
+ /* When port optimised task selection is used the uxTopReadyPriority
+ variable is used as a bit map. If bits other than the least
+ significant bit are set then there are tasks that have a priority
+ above the idle priority that are in the Ready state. This takes
+ care of the case where the co-operative scheduler is in use. */
+ if (uxTopReadyPriority > uxLeastSignificantBit) {
+ uxHigherPriorityReadyTasks = pdTRUE;
+ }
+ }
+# endif
+
+ if (pxCurrentTCB->uxPriority > tskIDLE_PRIORITY) {
+ xReturn = 0;
+ } else if (
+ listCURRENT_LIST_LENGTH(&(pxReadyTasksLists[tskIDLE_PRIORITY])) > 1) {
+ /* There are other idle priority tasks in the ready state. If
+ time slicing is used then the very next tick interrupt must be
+ processed. */
+ xReturn = 0;
+ } else if (uxHigherPriorityReadyTasks != pdFALSE) {
+ /* There are tasks in the Ready state that have a priority above the
+ idle priority. This path can only be reached if
+ configUSE_PREEMPTION is 0. */
+ xReturn = 0;
+ } else {
+ xReturn = xNextTaskUnblockTime - xTickCount;
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_TICKLESS_IDLE */
+/*----------------------------------------------------------*/
+
+BaseType_t xTaskResumeAll(void)
+{
+ TCB_t *pxTCB = NULL;
+ BaseType_t xAlreadyYielded = pdFALSE;
+
+ /* If uxSchedulerSuspended is zero then this function does not match a
+ previous call to vTaskSuspendAll(). */
+ configASSERT(uxSchedulerSuspended);
+
+ /* It is possible that an ISR caused a task to be removed from an event
+ list while the scheduler was suspended. If this was the case then the
+ removed task will have been added to the xPendingReadyList. Once the
+ scheduler has been resumed it is safe to move all the pending ready
+ tasks from this list into their appropriate ready list. */
+ taskENTER_CRITICAL();
+ {
+ --uxSchedulerSuspended;
+
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ if (uxCurrentNumberOfTasks > (UBaseType_t)0U) {
+ /* Move any readied tasks from the pending list into the
+ appropriate ready list. */
+ while (listLIST_IS_EMPTY(&xPendingReadyList) == pdFALSE) {
+ pxTCB = listGET_OWNER_OF_HEAD_ENTRY(
+ (&xPendingReadyList)); /*lint !e9079 void * is used as
+ this macro is used with timers
+ and co-routines too. Alignment
+ is known to be fine as the
+ type of the pointer stored and
+ retrieved is the same. */
+ (void)uxListRemove(&(pxTCB->xEventListItem));
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+
+ /* If the moved task has a priority higher than the current
+ task then a yield must be performed. */
+ if (pxTCB->uxPriority >= pxCurrentTCB->uxPriority) {
+ xYieldPending = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ if (pxTCB != NULL) {
+ /* A task was unblocked while the scheduler was suspended,
+ which may have prevented the next unblock time from being
+ re-calculated, in which case re-calculate it now. Mainly
+ important for low power tickless implementations, where
+ this can prevent an unnecessary exit from low power
+ state. */
+ prvResetNextTaskUnblockTime();
+ }
+
+ /* If any ticks occurred while the scheduler was suspended then
+ they should be processed now. This ensures the tick count does
+ not slip, and that any delayed tasks are resumed at the correct
+ time. */
+ {
+ TickType_t xPendedCounts =
+ xPendedTicks; /* Non-volatile copy. */
+
+ if (xPendedCounts > (TickType_t)0U) {
+ do {
+ if (xTaskIncrementTick() != pdFALSE) {
+ xYieldPending = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ --xPendedCounts;
+ } while (xPendedCounts > (TickType_t)0U);
+
+ xPendedTicks = 0;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ if (xYieldPending != pdFALSE) {
+#if (configUSE_PREEMPTION != 0)
+ {
+ xAlreadyYielded = pdTRUE;
+ }
+#endif
+ taskYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xAlreadyYielded;
+}
+/*-----------------------------------------------------------*/
+
+TickType_t xTaskGetTickCount(void)
+{
+ TickType_t xTicks;
+
+ /* Critical section required if running on a 16 bit processor. */
+ portTICK_TYPE_ENTER_CRITICAL();
+ {
+ xTicks = xTickCount;
+ }
+ portTICK_TYPE_EXIT_CRITICAL();
+
+ return xTicks;
+}
+/*-----------------------------------------------------------*/
+
+TickType_t xTaskGetTickCountFromISR(void)
+{
+ TickType_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
+ {
+ xReturn = xTickCount;
+ }
+ portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxTaskGetNumberOfTasks(void)
+{
+ /* A critical section is not required because the variables are of type
+ BaseType_t. */
+ return uxCurrentNumberOfTasks;
+}
+/*-----------------------------------------------------------*/
+
+char *pcTaskGetName(
+ TaskHandle_t xTaskToQuery) /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+{
+ TCB_t *pxTCB;
+
+ /* If null is passed in here then the name of the calling task is being
+ queried. */
+ pxTCB = prvGetTCBFromHandle(xTaskToQuery);
+ configASSERT(pxTCB);
+ return &(pxTCB->pcTaskName[0]);
+}
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_xTaskGetHandle == 1)
+
+static TCB_t *prvSearchForNameWithinSingleList(
+ List_t *pxList,
+ const char pcNameToQuery[])
+{
+ TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
+ UBaseType_t x;
+ char cNextChar;
+ BaseType_t xBreakLoop;
+
+ /* This function is called with the scheduler suspended. */
+
+ if (listCURRENT_LIST_LENGTH(pxList) > (UBaseType_t)0) {
+ listGET_OWNER_OF_NEXT_ENTRY(
+ pxFirstTCB,
+ pxList); /*lint !e9079 void * is used as this macro is used with
+ timers and co-routines too. Alignment is known to be
+ fine as the type of the pointer stored and retrieved is
+ the same. */
+
+ do {
+ listGET_OWNER_OF_NEXT_ENTRY(
+ pxNextTCB,
+ pxList); /*lint !e9079 void * is used as this macro is used with
+ timers and co-routines too. Alignment is known to
+ be fine as the type of the pointer stored and
+ retrieved is the same. */
+
+ /* Check each character in the name looking for a match or
+ mismatch. */
+ xBreakLoop = pdFALSE;
+ for (x = (UBaseType_t)0; x < (UBaseType_t)configMAX_TASK_NAME_LEN;
+ x++) {
+ cNextChar = pxNextTCB->pcTaskName[x];
+
+ if (cNextChar != pcNameToQuery[x]) {
+ /* Characters didn't match. */
+ xBreakLoop = pdTRUE;
+ } else if (cNextChar == (char)0x00) {
+ /* Both strings terminated, a match must have been
+ found. */
+ pxReturn = pxNextTCB;
+ xBreakLoop = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ if (xBreakLoop != pdFALSE) {
+ break;
+ }
+ }
+
+ if (pxReturn != NULL) {
+ /* The handle has been found. */
+ break;
+ }
+
+ } while (pxNextTCB != pxFirstTCB);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return pxReturn;
+}
+
+#endif /* INCLUDE_xTaskGetHandle */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_xTaskGetHandle == 1)
+
+TaskHandle_t xTaskGetHandle(
+ const char *pcNameToQuery) /*lint !e971 Unqualified char types are allowed
+ for strings and single characters only. */
+{
+ UBaseType_t uxQueue = configMAX_PRIORITIES;
+ TCB_t *pxTCB;
+
+ /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
+ configASSERT(strlen(pcNameToQuery) < configMAX_TASK_NAME_LEN);
+
+ vTaskSuspendAll();
+ {
+ /* Search the ready lists. */
+ do {
+ uxQueue--;
+ pxTCB = prvSearchForNameWithinSingleList(
+ (List_t *)&(pxReadyTasksLists[uxQueue]), pcNameToQuery);
+
+ if (pxTCB != NULL) {
+ /* Found the handle. */
+ break;
+ }
+
+ } while (
+ uxQueue >
+ (UBaseType_t)
+ tskIDLE_PRIORITY); /*lint !e961 MISRA exception as the casts are
+ only redundant for some ports. */
+
+ /* Search the delayed lists. */
+ if (pxTCB == NULL) {
+ pxTCB = prvSearchForNameWithinSingleList(
+ (List_t *)pxDelayedTaskList, pcNameToQuery);
+ }
+
+ if (pxTCB == NULL) {
+ pxTCB = prvSearchForNameWithinSingleList(
+ (List_t *)pxOverflowDelayedTaskList, pcNameToQuery);
+ }
+
+# if (INCLUDE_vTaskSuspend == 1)
+ {
+ if (pxTCB == NULL) {
+ /* Search the suspended list. */
+ pxTCB = prvSearchForNameWithinSingleList(
+ &xSuspendedTaskList, pcNameToQuery);
+ }
+ }
+# endif
+
+# if (INCLUDE_vTaskDelete == 1)
+ {
+ if (pxTCB == NULL) {
+ /* Search the deleted list. */
+ pxTCB = prvSearchForNameWithinSingleList(
+ &xTasksWaitingTermination, pcNameToQuery);
+ }
+ }
+# endif
+ }
+ (void)xTaskResumeAll();
+
+ return pxTCB;
+}
+
+#endif /* INCLUDE_xTaskGetHandle */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+UBaseType_t uxTaskGetSystemState(
+ TaskStatus_t *const pxTaskStatusArray,
+ const UBaseType_t uxArraySize,
+ uint32_t *const pulTotalRunTime)
+{
+ UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
+
+ vTaskSuspendAll();
+ {
+ /* Is there a space in the array for each task in the system? */
+ if (uxArraySize >= uxCurrentNumberOfTasks) {
+ /* Fill in an TaskStatus_t structure with information on each
+ task in the Ready state. */
+ do {
+ uxQueue--;
+ uxTask += prvListTasksWithinSingleList(
+ &(pxTaskStatusArray[uxTask]),
+ &(pxReadyTasksLists[uxQueue]),
+ eReady);
+
+ } while (
+ uxQueue >
+ (UBaseType_t)
+ tskIDLE_PRIORITY); /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+
+ /* Fill in an TaskStatus_t structure with information on each
+ task in the Blocked state. */
+ uxTask += prvListTasksWithinSingleList(
+ &(pxTaskStatusArray[uxTask]),
+ (List_t *)pxDelayedTaskList,
+ eBlocked);
+ uxTask += prvListTasksWithinSingleList(
+ &(pxTaskStatusArray[uxTask]),
+ (List_t *)pxOverflowDelayedTaskList,
+ eBlocked);
+
+# if (INCLUDE_vTaskDelete == 1)
+ {
+ /* Fill in an TaskStatus_t structure with information on
+ each task that has been deleted but not yet cleaned up. */
+ uxTask += prvListTasksWithinSingleList(
+ &(pxTaskStatusArray[uxTask]),
+ &xTasksWaitingTermination,
+ eDeleted);
+ }
+# endif
+
+# if (INCLUDE_vTaskSuspend == 1)
+ {
+ /* Fill in an TaskStatus_t structure with information on
+ each task in the Suspended state. */
+ uxTask += prvListTasksWithinSingleList(
+ &(pxTaskStatusArray[uxTask]),
+ &xSuspendedTaskList,
+ eSuspended);
+ }
+# endif
+
+# if (configGENERATE_RUN_TIME_STATS == 1)
+ {
+ if (pulTotalRunTime != NULL) {
+# ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
+ portALT_GET_RUN_TIME_COUNTER_VALUE((*pulTotalRunTime));
+# else
+ *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
+# endif
+ }
+ }
+# else
+ {
+ if (pulTotalRunTime != NULL) {
+ *pulTotalRunTime = 0;
+ }
+ }
+# endif
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ (void)xTaskResumeAll();
+
+ return uxTask;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*----------------------------------------------------------*/
+
+#if (INCLUDE_xTaskGetIdleTaskHandle == 1)
+
+TaskHandle_t xTaskGetIdleTaskHandle(void)
+{
+ /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
+ started, then xIdleTaskHandle will be NULL. */
+ configASSERT((xIdleTaskHandle != NULL));
+ return xIdleTaskHandle;
+}
+
+#endif /* INCLUDE_xTaskGetIdleTaskHandle */
+/*----------------------------------------------------------*/
+
+/* This conditional compilation should use inequality to 0, not equality to 1.
+This is to ensure vTaskStepTick() is available when user defined low power mode
+implementations require configUSE_TICKLESS_IDLE to be set to a value other than
+1. */
+#if (configUSE_TICKLESS_IDLE != 0)
+
+void vTaskStepTick(const TickType_t xTicksToJump)
+{
+ /* Correct the tick count value after a period during which the tick
+ was suppressed. Note this does *not* call the tick hook function for
+ each stepped tick. */
+ configASSERT((xTickCount + xTicksToJump) <= xNextTaskUnblockTime);
+ xTickCount += xTicksToJump;
+ traceINCREASE_TICK_COUNT(xTicksToJump);
+}
+
+#endif /* configUSE_TICKLESS_IDLE */
+/*----------------------------------------------------------*/
+
+BaseType_t xTaskCatchUpTicks(TickType_t xTicksToCatchUp)
+{
+ BaseType_t xYieldRequired = pdFALSE;
+
+ /* Must not be called with the scheduler suspended as the implementation
+ relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
+ configASSERT(uxSchedulerSuspended == 0);
+
+ /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
+ the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
+ vTaskSuspendAll();
+ xPendedTicks += xTicksToCatchUp;
+ xYieldRequired = xTaskResumeAll();
+
+ return xYieldRequired;
+}
+/*----------------------------------------------------------*/
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+
+BaseType_t xTaskAbortDelay(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB = xTask;
+ BaseType_t xReturn;
+
+ configASSERT(pxTCB);
+
+ vTaskSuspendAll();
+ {
+ /* A task can only be prematurely removed from the Blocked state if
+ it is actually in the Blocked state. */
+ if (eTaskGetState(xTask) == eBlocked) {
+ xReturn = pdPASS;
+
+ /* Remove the reference to the task from the blocked list. An
+ interrupt won't touch the xStateListItem because the
+ scheduler is suspended. */
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+
+ /* Is the task waiting on an event also? If so remove it from
+ the event list too. Interrupts can touch the event list item,
+ even though the scheduler is suspended, so a critical section
+ is used. */
+ taskENTER_CRITICAL();
+ {
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) != NULL) {
+ (void)uxListRemove(&(pxTCB->xEventListItem));
+
+ /* This lets the task know it was forcibly removed from the
+ blocked state so it should not re-evaluate its block time
+ and then block again. */
+ pxTCB->ucDelayAborted = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ /* Place the unblocked task into the appropriate ready list. */
+ prvAddTaskToReadyList(pxTCB);
+
+/* A task being unblocked cannot cause an immediate context
+switch if preemption is turned off. */
+# if (configUSE_PREEMPTION == 1)
+ {
+ /* Preemption is on, but a context switch should only be
+ performed if the unblocked task has a priority that is
+ equal to or higher than the currently executing task. */
+ if (pxTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* Pend the yield to be performed when the scheduler
+ is unsuspended. */
+ xYieldPending = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+# endif /* configUSE_PREEMPTION */
+ } else {
+ xReturn = pdFAIL;
+ }
+ }
+ (void)xTaskResumeAll();
+
+ return xReturn;
+}
+
+#endif /* INCLUDE_xTaskAbortDelay */
+/*----------------------------------------------------------*/
+
+BaseType_t xTaskIncrementTick(void)
+{
+ TCB_t *pxTCB;
+ TickType_t xItemValue;
+ BaseType_t xSwitchRequired = pdFALSE;
+
+ /* Called by the portable layer each time a tick interrupt occurs.
+ Increments the tick then checks to see if the new tick value will cause any
+ tasks to be unblocked. */
+ traceTASK_INCREMENT_TICK(xTickCount);
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ /* Minor optimisation. The tick count cannot change in this
+ block. */
+ const TickType_t xConstTickCount = xTickCount + (TickType_t)1;
+
+ /* Increment the RTOS tick, switching the delayed and overflowed
+ delayed lists if it wraps to 0. */
+ xTickCount = xConstTickCount;
+
+ if (xConstTickCount ==
+ (TickType_t)0U) /*lint !e774 'if' does not always evaluate to false
+ as it is looking for an overflow. */
+ {
+ taskSWITCH_DELAYED_LISTS();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* See if this tick has made a timeout expire. Tasks are stored in
+ the queue in the order of their wake time - meaning once one task
+ has been found whose block time has not expired there is no need to
+ look any further down the list. */
+ if (xConstTickCount >= xNextTaskUnblockTime) {
+ for (;;) {
+ if (listLIST_IS_EMPTY(pxDelayedTaskList) != pdFALSE) {
+ /* The delayed list is empty. Set xNextTaskUnblockTime
+ to the maximum possible value so it is extremely
+ unlikely that the
+ if( xTickCount >= xNextTaskUnblockTime ) test will pass
+ next time through. */
+ xNextTaskUnblockTime =
+ portMAX_DELAY; /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+ break;
+ } else {
+ /* The delayed list is not empty, get the value of the
+ item at the head of the delayed list. This is the time
+ at which the task at the head of the delayed list must
+ be removed from the Blocked state. */
+ pxTCB = listGET_OWNER_OF_HEAD_ENTRY(
+ pxDelayedTaskList); /*lint !e9079 void * is used as this
+ macro is used with timers and
+ co-routines too. Alignment is
+ known to be fine as the type of
+ the pointer stored and retrieved
+ is the same. */
+ xItemValue =
+ listGET_LIST_ITEM_VALUE(&(pxTCB->xStateListItem));
+
+ if (xConstTickCount < xItemValue) {
+ /* It is not time to unblock this item yet, but the
+ item value is the time at which the task at the head
+ of the blocked list must be removed from the Blocked
+ state - so record the item value in
+ xNextTaskUnblockTime. */
+ xNextTaskUnblockTime = xItemValue;
+ break; /*lint !e9011 Code structure here is deedmed
+ easier to understand with multiple breaks. */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* It is time to remove the item from the Blocked state. */
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+
+ /* Is the task waiting on an event also? If so remove
+ it from the event list. */
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) !=
+ NULL) {
+ (void)uxListRemove(&(pxTCB->xEventListItem));
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Place the unblocked task into the appropriate ready
+ list. */
+ prvAddTaskToReadyList(pxTCB);
+
+/* A task being unblocked cannot cause an immediate
+context switch if preemption is turned off. */
+#if (configUSE_PREEMPTION == 1)
+ {
+ /* Preemption is on, but a context switch should
+ only be performed if the unblocked task has a
+ priority that is equal to or higher than the
+ currently executing task. */
+ if (pxTCB->uxPriority >= pxCurrentTCB->uxPriority) {
+ xSwitchRequired = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_PREEMPTION */
+ }
+ }
+ }
+
+/* Tasks of equal priority to the currently running task will share
+processing time (time slice) if preemption is on, and the application
+writer has not explicitly turned time slicing off. */
+#if ((configUSE_PREEMPTION == 1) && (configUSE_TIME_SLICING == 1))
+ {
+ if (listCURRENT_LIST_LENGTH(
+ &(pxReadyTasksLists[pxCurrentTCB->uxPriority])) >
+ (UBaseType_t)1) {
+ xSwitchRequired = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) \
+ */
+
+#if (configUSE_TICK_HOOK == 1)
+ {
+ /* Guard against the tick hook being called when the pended tick
+ count is being unwound (when the scheduler is being unlocked). */
+ if (xPendedTicks == (TickType_t)0) {
+ vApplicationTickHook();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_TICK_HOOK */
+
+#if (configUSE_PREEMPTION == 1)
+ {
+ if (xYieldPending != pdFALSE) {
+ xSwitchRequired = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_PREEMPTION */
+ } else {
+ ++xPendedTicks;
+
+/* The tick hook gets called at regular intervals, even if the
+scheduler is locked. */
+#if (configUSE_TICK_HOOK == 1)
+ {
+ vApplicationTickHook();
+ }
+#endif
+ }
+
+ return xSwitchRequired;
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+
+void vTaskSetApplicationTaskTag(
+ TaskHandle_t xTask,
+ TaskHookFunction_t pxHookFunction)
+{
+ TCB_t *xTCB;
+
+ /* If xTask is NULL then it is the task hook of the calling task that is
+ getting set. */
+ if (xTask == NULL) {
+ xTCB = (TCB_t *)pxCurrentTCB;
+ } else {
+ xTCB = xTask;
+ }
+
+ /* Save the hook function in the TCB. A critical section is required as
+ the value can be accessed from an interrupt. */
+ taskENTER_CRITICAL();
+ {
+ xTCB->pxTaskTag = pxHookFunction;
+ }
+ taskEXIT_CRITICAL();
+}
+
+#endif /* configUSE_APPLICATION_TASK_TAG */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+
+TaskHookFunction_t xTaskGetApplicationTaskTag(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB;
+ TaskHookFunction_t xReturn;
+
+ /* If xTask is NULL then set the calling task's hook. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ /* Save the hook function in the TCB. A critical section is required as
+ the value can be accessed from an interrupt. */
+ taskENTER_CRITICAL();
+ {
+ xReturn = pxTCB->pxTaskTag;
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+
+#endif /* configUSE_APPLICATION_TASK_TAG */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+
+TaskHookFunction_t xTaskGetApplicationTaskTagFromISR(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB;
+ TaskHookFunction_t xReturn;
+ UBaseType_t uxSavedInterruptStatus;
+
+ /* If xTask is NULL then set the calling task's hook. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ /* Save the hook function in the TCB. A critical section is required as
+ the value can be accessed from an interrupt. */
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ xReturn = pxTCB->pxTaskTag;
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+
+#endif /* configUSE_APPLICATION_TASK_TAG */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_APPLICATION_TASK_TAG == 1)
+
+BaseType_t xTaskCallApplicationTaskHook(TaskHandle_t xTask, void *pvParameter)
+{
+ TCB_t *xTCB;
+ BaseType_t xReturn;
+
+ /* If xTask is NULL then we are calling our own task hook. */
+ if (xTask == NULL) {
+ xTCB = pxCurrentTCB;
+ } else {
+ xTCB = xTask;
+ }
+
+ if (xTCB->pxTaskTag != NULL) {
+ xReturn = xTCB->pxTaskTag(pvParameter);
+ } else {
+ xReturn = pdFAIL;
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_APPLICATION_TASK_TAG */
+/*-----------------------------------------------------------*/
+
+void vTaskSwitchContext(void)
+{
+ if (uxSchedulerSuspended != (UBaseType_t)pdFALSE) {
+ /* The scheduler is currently suspended - do not allow a context
+ switch. */
+ xYieldPending = pdTRUE;
+ } else {
+ xYieldPending = pdFALSE;
+ traceTASK_SWITCHED_OUT();
+
+#if (configGENERATE_RUN_TIME_STATS == 1)
+ {
+# ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
+ portALT_GET_RUN_TIME_COUNTER_VALUE(ulTotalRunTime);
+# else
+ ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
+# endif
+
+ /* Add the amount of time the task has been running to the
+ accumulated time so far. The time the task started running was
+ stored in ulTaskSwitchedInTime. Note that there is no overflow
+ protection here so count values are only valid until the timer
+ overflows. The guard against negative values is to protect
+ against suspect run time stat counter implementations - which
+ are provided by the application, not the kernel. */
+ if (ulTotalRunTime > ulTaskSwitchedInTime) {
+ pxCurrentTCB->ulRunTimeCounter +=
+ (ulTotalRunTime - ulTaskSwitchedInTime);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ ulTaskSwitchedInTime = ulTotalRunTime;
+ }
+#endif /* configGENERATE_RUN_TIME_STATS */
+
+ /* Check for stack overflow, if configured. */
+ taskCHECK_FOR_STACK_OVERFLOW();
+
+/* Before the currently running task is switched out, save its errno. */
+#if (configUSE_POSIX_ERRNO == 1)
+ {
+ pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
+ }
+#endif
+
+ /* Select a new task to run using either the generic C or port
+ optimised asm code. */
+ taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this
+ macro is used with timers and
+ co-routines too. Alignment is
+ known to be fine as the type of
+ the pointer stored and retrieved
+ is the same. */
+ traceTASK_SWITCHED_IN();
+
+/* After the new task is switched in, update the global errno. */
+#if (configUSE_POSIX_ERRNO == 1)
+ {
+ FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
+ }
+#endif
+
+#if (configUSE_NEWLIB_REENTRANT == 1)
+ {
+ /* Switch Newlib's _impure_ptr variable to point to the _reent
+ structure specific to this task.
+ See the third party link
+ http://www.nadler.com/embedded/newlibAndFreeRTOS.html
+ for additional information. */
+ _impure_ptr = &(pxCurrentTCB->xNewLib_reent);
+ }
+#endif /* configUSE_NEWLIB_REENTRANT */
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vTaskPlaceOnEventList(
+ List_t *const pxEventList,
+ const TickType_t xTicksToWait)
+{
+ configASSERT(pxEventList);
+
+ /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
+ SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
+
+ /* Place the event list item of the TCB in the appropriate event list.
+ This is placed in the list in priority order so the highest priority task
+ is the first to be woken by the event. The queue that contains the event
+ list is locked, preventing simultaneous access from interrupts. */
+ vListInsert(pxEventList, &(pxCurrentTCB->xEventListItem));
+
+ prvAddCurrentTaskToDelayedList(xTicksToWait, pdTRUE);
+}
+/*-----------------------------------------------------------*/
+
+void vTaskPlaceOnUnorderedEventList(
+ List_t *pxEventList,
+ const TickType_t xItemValue,
+ const TickType_t xTicksToWait)
+{
+ configASSERT(pxEventList);
+
+ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
+ the event groups implementation. */
+ configASSERT(uxSchedulerSuspended != 0);
+
+ /* Store the item value in the event list item. It is safe to access the
+ event list item here as interrupts won't access the event list item of a
+ task that is not in the Blocked state. */
+ listSET_LIST_ITEM_VALUE(
+ &(pxCurrentTCB->xEventListItem),
+ xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE);
+
+ /* Place the event list item of the TCB at the end of the appropriate event
+ list. It is safe to access the event list here because it is part of an
+ event group implementation - and interrupts don't access event groups
+ directly (instead they access them indirectly by pending function calls to
+ the task level). */
+ vListInsertEnd(pxEventList, &(pxCurrentTCB->xEventListItem));
+
+ prvAddCurrentTaskToDelayedList(xTicksToWait, pdTRUE);
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TIMERS == 1)
+
+void vTaskPlaceOnEventListRestricted(
+ List_t *const pxEventList,
+ TickType_t xTicksToWait,
+ const BaseType_t xWaitIndefinitely)
+{
+ configASSERT(pxEventList);
+
+ /* This function should not be called by application code hence the
+ 'Restricted' in its name. It is not part of the public API. It is
+ designed for use by kernel code, and has special calling requirements -
+ it should be called with the scheduler suspended. */
+
+ /* Place the event list item of the TCB in the appropriate event list.
+ In this case it is assume that this is the only task that is going to
+ be waiting on this event list, so the faster vListInsertEnd() function
+ can be used in place of vListInsert. */
+ vListInsertEnd(pxEventList, &(pxCurrentTCB->xEventListItem));
+
+ /* If the task should block indefinitely then set the block time to a
+ value that will be recognised as an indefinite delay inside the
+ prvAddCurrentTaskToDelayedList() function. */
+ if (xWaitIndefinitely != pdFALSE) {
+ xTicksToWait = portMAX_DELAY;
+ }
+
+ traceTASK_DELAY_UNTIL((xTickCount + xTicksToWait));
+ prvAddCurrentTaskToDelayedList(xTicksToWait, xWaitIndefinitely);
+}
+
+#endif /* configUSE_TIMERS */
+/*-----------------------------------------------------------*/
+
+BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList)
+{
+ TCB_t *pxUnblockedTCB;
+ BaseType_t xReturn;
+
+ /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
+ called from a critical section within an ISR. */
+
+ /* The event list is sorted in priority order, so the first in the list can
+ be removed as it is known to be the highest priority. Remove the TCB from
+ the delayed list, and add it to the ready list.
+
+ If an event is for a queue that is locked then this function will never
+ get called - the lock count on the queue will get modified instead. This
+ means exclusive access to the event list is guaranteed here.
+
+ This function assumes that a check has already been made to ensure that
+ pxEventList is not empty. */
+ pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY(
+ pxEventList); /*lint !e9079 void * is used as this macro is used with
+ timers and co-routines too. Alignment is known to be
+ fine as the type of the pointer stored and retrieved is
+ the same. */
+ configASSERT(pxUnblockedTCB);
+ (void)uxListRemove(&(pxUnblockedTCB->xEventListItem));
+
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ (void)uxListRemove(&(pxUnblockedTCB->xStateListItem));
+ prvAddTaskToReadyList(pxUnblockedTCB);
+
+#if (configUSE_TICKLESS_IDLE != 0)
+ {
+ /* If a task is blocked on a kernel object then xNextTaskUnblockTime
+ might be set to the blocked task's time out time. If the task is
+ unblocked for a reason other than a timeout xNextTaskUnblockTime is
+ normally left unchanged, because it is automatically reset to a new
+ value when the tick count equals xNextTaskUnblockTime. However if
+ tickless idling is used it might be more important to enter sleep
+ mode at the earliest possible time - so reset xNextTaskUnblockTime
+ here to ensure it is updated at the earliest possible time. */
+ prvResetNextTaskUnblockTime();
+ }
+#endif
+ } else {
+ /* The delayed and ready lists cannot be accessed, so hold this task
+ pending until the scheduler is resumed. */
+ vListInsertEnd(&(xPendingReadyList), &(pxUnblockedTCB->xEventListItem));
+ }
+
+ if (pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* Return true if the task removed from the event list has a higher
+ priority than the calling task. This allows the calling task to know if
+ it should force a context switch now. */
+ xReturn = pdTRUE;
+
+ /* Mark that a yield is pending in case the user is not using the
+ "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function.
+ */
+ xYieldPending = pdTRUE;
+ } else {
+ xReturn = pdFALSE;
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vTaskRemoveFromUnorderedEventList(
+ ListItem_t *pxEventListItem,
+ const TickType_t xItemValue)
+{
+ TCB_t *pxUnblockedTCB;
+
+ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
+ the event flags implementation. */
+ configASSERT(uxSchedulerSuspended != pdFALSE);
+
+ /* Store the new item value in the event list. */
+ listSET_LIST_ITEM_VALUE(
+ pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE);
+
+ /* Remove the event list form the event flag. Interrupts do not access
+ event flags. */
+ pxUnblockedTCB = listGET_LIST_ITEM_OWNER(
+ pxEventListItem); /*lint !e9079 void * is used as this macro is used
+ with timers and co-routines too. Alignment is
+ known to be fine as the type of the pointer stored
+ and retrieved is the same. */
+ configASSERT(pxUnblockedTCB);
+ (void)uxListRemove(pxEventListItem);
+
+#if (configUSE_TICKLESS_IDLE != 0)
+ {
+ /* If a task is blocked on a kernel object then xNextTaskUnblockTime
+ might be set to the blocked task's time out time. If the task is
+ unblocked for a reason other than a timeout xNextTaskUnblockTime is
+ normally left unchanged, because it is automatically reset to a new
+ value when the tick count equals xNextTaskUnblockTime. However if
+ tickless idling is used it might be more important to enter sleep mode
+ at the earliest possible time - so reset xNextTaskUnblockTime here to
+ ensure it is updated at the earliest possible time. */
+ prvResetNextTaskUnblockTime();
+ }
+#endif
+
+ /* Remove the task from the delayed list and add it to the ready list. The
+ scheduler is suspended so interrupts will not be accessing the ready
+ lists. */
+ (void)uxListRemove(&(pxUnblockedTCB->xStateListItem));
+ prvAddTaskToReadyList(pxUnblockedTCB);
+
+ if (pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* The unblocked task has a priority above that of the calling task, so
+ a context switch is required. This function is called with the
+ scheduler suspended so xYieldPending is set so the context switch
+ occurs immediately that the scheduler is resumed (unsuspended). */
+ xYieldPending = pdTRUE;
+ }
+}
+/*-----------------------------------------------------------*/
+
+void vTaskSetTimeOutState(TimeOut_t *const pxTimeOut)
+{
+ configASSERT(pxTimeOut);
+ taskENTER_CRITICAL();
+ {
+ pxTimeOut->xOverflowCount = xNumOfOverflows;
+ pxTimeOut->xTimeOnEntering = xTickCount;
+ }
+ taskEXIT_CRITICAL();
+}
+/*-----------------------------------------------------------*/
+
+void vTaskInternalSetTimeOutState(TimeOut_t *const pxTimeOut)
+{
+ /* For internal use only as it does not use a critical section. */
+ pxTimeOut->xOverflowCount = xNumOfOverflows;
+ pxTimeOut->xTimeOnEntering = xTickCount;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xTaskCheckForTimeOut(
+ TimeOut_t *const pxTimeOut,
+ TickType_t *const pxTicksToWait)
+{
+ BaseType_t xReturn;
+
+ configASSERT(pxTimeOut);
+ configASSERT(pxTicksToWait);
+
+ taskENTER_CRITICAL();
+ {
+ /* Minor optimisation. The tick count cannot change in this block. */
+ const TickType_t xConstTickCount = xTickCount;
+ const TickType_t xElapsedTime =
+ xConstTickCount - pxTimeOut->xTimeOnEntering;
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+ if (pxCurrentTCB->ucDelayAborted != (uint8_t)pdFALSE) {
+ /* The delay was aborted, which is not the same as a time out,
+ but has the same result. */
+ pxCurrentTCB->ucDelayAborted = pdFALSE;
+ xReturn = pdTRUE;
+ } else
+#endif
+
+#if (INCLUDE_vTaskSuspend == 1)
+ if (*pxTicksToWait == portMAX_DELAY) {
+ /* If INCLUDE_vTaskSuspend is set to 1 and the block time
+ specified is the maximum block time then the task should block
+ indefinitely, and therefore never time out. */
+ xReturn = pdFALSE;
+ } else
+#endif
+
+ if ((xNumOfOverflows != pxTimeOut->xOverflowCount) &&
+ (xConstTickCount >=
+ pxTimeOut
+ ->xTimeOnEntering)) /*lint !e525 Indentation preferred as
+ is to make code within pre-processor
+ directives clearer. */
+ {
+ /* The tick count is greater than the time at which
+ vTaskSetTimeout() was called, but has also overflowed since
+ vTaskSetTimeOut() was called. It must have wrapped all the way
+ around and gone past again. This passed since vTaskSetTimeout()
+ was called. */
+ xReturn = pdTRUE;
+ } else if (xElapsedTime < *pxTicksToWait) /*lint !e961 Explicit casting
+ is only redundant with some
+ compilers, whereas others
+ require it to prevent
+ integer conversion errors.
+ */
+ {
+ /* Not a genuine timeout. Adjust parameters for time remaining. */
+ *pxTicksToWait -= xElapsedTime;
+ vTaskInternalSetTimeOutState(pxTimeOut);
+ xReturn = pdFALSE;
+ } else {
+ *pxTicksToWait = 0;
+ xReturn = pdTRUE;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vTaskMissedYield(void)
+{
+ xYieldPending = pdTRUE;
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+UBaseType_t uxTaskGetTaskNumber(TaskHandle_t xTask)
+{
+ UBaseType_t uxReturn;
+ TCB_t const *pxTCB;
+
+ if (xTask != NULL) {
+ pxTCB = xTask;
+ uxReturn = pxTCB->uxTaskNumber;
+ } else {
+ uxReturn = 0U;
+ }
+
+ return uxReturn;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+void vTaskSetTaskNumber(TaskHandle_t xTask, const UBaseType_t uxHandle)
+{
+ TCB_t *pxTCB;
+
+ if (xTask != NULL) {
+ pxTCB = xTask;
+ pxTCB->uxTaskNumber = uxHandle;
+ }
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+
+/*
+ * -----------------------------------------------------------
+ * The Idle task.
+ * ----------------------------------------------------------
+ *
+ * The portTASK_FUNCTION() macro is used to allow port/compiler specific
+ * language extensions. The equivalent prototype for this function is:
+ *
+ * void prvIdleTask( void *pvParameters );
+ *
+ */
+static portTASK_FUNCTION(prvIdleTask, pvParameters)
+{
+ /* Stop warnings. */
+ (void)pvParameters;
+
+ /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
+ SCHEDULER IS STARTED. **/
+
+ /* In case a task that has a secure context deletes itself, in which case
+ the idle task is responsible for deleting the task's secure context, if
+ any. */
+ portALLOCATE_SECURE_CONTEXT(configMINIMAL_SECURE_STACK_SIZE);
+
+ for (;;) {
+ /* See if any tasks have deleted themselves - if so then the idle task
+ is responsible for freeing the deleted task's TCB and stack. */
+ prvCheckTasksWaitingTermination();
+
+#if (configUSE_PREEMPTION == 0)
+ {
+ /* If we are not using preemption we keep forcing a task switch to
+ see if any other task has become available. If we are using
+ preemption we don't need to do this as any task becoming available
+ will automatically get the processor anyway. */
+ taskYIELD();
+ }
+#endif /* configUSE_PREEMPTION */
+
+#if ((configUSE_PREEMPTION == 1) && (configIDLE_SHOULD_YIELD == 1))
+ {
+ /* When using preemption tasks of equal priority will be
+ timesliced. If a task that is sharing the idle priority is ready
+ to run then the idle task should yield before the end of the
+ timeslice.
+
+ A critical region is not required here as we are just reading from
+ the list, and an occasional incorrect value will not matter. If
+ the ready list at the idle priority contains more than one task
+ then a task other than the idle task is ready to execute. */
+ if (listCURRENT_LIST_LENGTH(
+ &(pxReadyTasksLists[tskIDLE_PRIORITY])) > (UBaseType_t)1) {
+ taskYIELD();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) \
+ ) */
+
+#if (configUSE_IDLE_HOOK == 1)
+ {
+ extern void vApplicationIdleHook(void);
+
+ /* Call the user defined function from within the idle task. This
+ allows the application designer to add background functionality
+ without the overhead of a separate task.
+ NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
+ CALL A FUNCTION THAT MIGHT BLOCK. */
+ vApplicationIdleHook();
+ }
+#endif /* configUSE_IDLE_HOOK */
+
+/* This conditional compilation should use inequality to 0, not equality
+to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
+user defined low power mode implementations require
+configUSE_TICKLESS_IDLE to be set to a value other than 1. */
+#if (configUSE_TICKLESS_IDLE != 0)
+ {
+ TickType_t xExpectedIdleTime;
+
+ /* It is not desirable to suspend then resume the scheduler on
+ each iteration of the idle task. Therefore, a preliminary
+ test of the expected idle time is performed without the
+ scheduler suspended. The result here is not necessarily
+ valid. */
+ xExpectedIdleTime = prvGetExpectedIdleTime();
+
+ if (xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP) {
+ vTaskSuspendAll();
+ {
+ /* Now the scheduler is suspended, the expected idle
+ time can be sampled again, and this time its value can
+ be used. */
+ configASSERT(xNextTaskUnblockTime >= xTickCount);
+ xExpectedIdleTime = prvGetExpectedIdleTime();
+
+ /* Define the following macro to set xExpectedIdleTime to 0
+ if the application does not want
+ portSUPPRESS_TICKS_AND_SLEEP() to be called. */
+ configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING(
+ xExpectedIdleTime);
+
+ if (xExpectedIdleTime >=
+ configEXPECTED_IDLE_TIME_BEFORE_SLEEP) {
+ traceLOW_POWER_IDLE_BEGIN();
+ portSUPPRESS_TICKS_AND_SLEEP(xExpectedIdleTime);
+ traceLOW_POWER_IDLE_END();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ (void)xTaskResumeAll();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+#endif /* configUSE_TICKLESS_IDLE */
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TICKLESS_IDLE != 0)
+
+eSleepModeStatus eTaskConfirmSleepModeStatus(void)
+{
+ /* The idle task exists in addition to the application tasks. */
+ const UBaseType_t uxNonApplicationTasks = 1;
+ eSleepModeStatus eReturn = eStandardSleep;
+
+ /* This function must be called from a critical section. */
+
+ if (listCURRENT_LIST_LENGTH(&xPendingReadyList) != 0) {
+ /* A task was made ready while the scheduler was suspended. */
+ eReturn = eAbortSleep;
+ } else if (xYieldPending != pdFALSE) {
+ /* A yield was pended while the scheduler was suspended. */
+ eReturn = eAbortSleep;
+ } else {
+ /* If all the tasks are in the suspended list (which might mean they
+ have an infinite block time rather than actually being suspended)
+ then it is safe to turn all clocks off and just wait for external
+ interrupts. */
+ if (listCURRENT_LIST_LENGTH(&xSuspendedTaskList) ==
+ (uxCurrentNumberOfTasks - uxNonApplicationTasks)) {
+ eReturn = eNoTasksWaitingTimeout;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ return eReturn;
+}
+
+#endif /* configUSE_TICKLESS_IDLE */
+/*-----------------------------------------------------------*/
+
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0)
+
+void vTaskSetThreadLocalStoragePointer(
+ TaskHandle_t xTaskToSet,
+ BaseType_t xIndex,
+ void *pvValue)
+{
+ TCB_t *pxTCB;
+
+ if (xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS) {
+ pxTCB = prvGetTCBFromHandle(xTaskToSet);
+ configASSERT(pxTCB != NULL);
+ pxTCB->pvThreadLocalStoragePointers[xIndex] = pvValue;
+ }
+}
+
+#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
+/*-----------------------------------------------------------*/
+
+#if (configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0)
+
+void *pvTaskGetThreadLocalStoragePointer(
+ TaskHandle_t xTaskToQuery,
+ BaseType_t xIndex)
+{
+ void *pvReturn = NULL;
+ TCB_t *pxTCB;
+
+ if (xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS) {
+ pxTCB = prvGetTCBFromHandle(xTaskToQuery);
+ pvReturn = pxTCB->pvThreadLocalStoragePointers[xIndex];
+ } else {
+ pvReturn = NULL;
+ }
+
+ return pvReturn;
+}
+
+#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
+/*-----------------------------------------------------------*/
+
+#if (portUSING_MPU_WRAPPERS == 1)
+
+void vTaskAllocateMPURegions(
+ TaskHandle_t xTaskToModify,
+ const MemoryRegion_t *const xRegions)
+{
+ TCB_t *pxTCB;
+
+ /* If null is passed in here then we are modifying the MPU settings of
+ the calling task. */
+ pxTCB = prvGetTCBFromHandle(xTaskToModify);
+
+ vPortStoreTaskMPUSettings(&(pxTCB->xMPUSettings), xRegions, NULL, 0);
+}
+
+#endif /* portUSING_MPU_WRAPPERS */
+/*-----------------------------------------------------------*/
+
+static void prvInitialiseTaskLists(void)
+{
+ UBaseType_t uxPriority;
+
+ for (uxPriority = (UBaseType_t)0U;
+ uxPriority < (UBaseType_t)configMAX_PRIORITIES;
+ uxPriority++) {
+ vListInitialise(&(pxReadyTasksLists[uxPriority]));
+ }
+
+ vListInitialise(&xDelayedTaskList1);
+ vListInitialise(&xDelayedTaskList2);
+ vListInitialise(&xPendingReadyList);
+
+#if (INCLUDE_vTaskDelete == 1)
+ {
+ vListInitialise(&xTasksWaitingTermination);
+ }
+#endif /* INCLUDE_vTaskDelete */
+
+#if (INCLUDE_vTaskSuspend == 1)
+ {
+ vListInitialise(&xSuspendedTaskList);
+ }
+#endif /* INCLUDE_vTaskSuspend */
+
+ /* Start with pxDelayedTaskList using list1 and the
+ pxOverflowDelayedTaskList using list2. */
+ pxDelayedTaskList = &xDelayedTaskList1;
+ pxOverflowDelayedTaskList = &xDelayedTaskList2;
+}
+/*-----------------------------------------------------------*/
+
+static void prvCheckTasksWaitingTermination(void)
+{
+ /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
+
+#if (INCLUDE_vTaskDelete == 1)
+ {
+ TCB_t *pxTCB;
+
+ /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
+ being called too often in the idle task. */
+ while (uxDeletedTasksWaitingCleanUp > (UBaseType_t)0U) {
+ taskENTER_CRITICAL();
+ {
+ pxTCB = listGET_OWNER_OF_HEAD_ENTRY(
+ (&xTasksWaitingTermination)); /*lint !e9079 void * is used
+ as this macro is used with
+ timers and co-routines too.
+ Alignment is known to be
+ fine as the type of the
+ pointer stored and
+ retrieved is the same. */
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ --uxCurrentNumberOfTasks;
+ --uxDeletedTasksWaitingCleanUp;
+ }
+ taskEXIT_CRITICAL();
+
+ prvDeleteTCB(pxTCB);
+ }
+ }
+#endif /* INCLUDE_vTaskDelete */
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+void vTaskGetInfo(
+ TaskHandle_t xTask,
+ TaskStatus_t *pxTaskStatus,
+ BaseType_t xGetFreeStackSpace,
+ eTaskState eState)
+{
+ TCB_t *pxTCB;
+
+ /* xTask is NULL then get the state of the calling task. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ pxTaskStatus->xHandle = (TaskHandle_t)pxTCB;
+ pxTaskStatus->pcTaskName = (const char *)&(pxTCB->pcTaskName[0]);
+ pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
+ pxTaskStatus->pxStackBase = pxTCB->pxStack;
+ pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
+
+# if (configUSE_MUTEXES == 1)
+ {
+ pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
+ }
+# else
+ {
+ pxTaskStatus->uxBasePriority = 0;
+ }
+# endif
+
+# if (configGENERATE_RUN_TIME_STATS == 1)
+ {
+ pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
+ }
+# else
+ {
+ pxTaskStatus->ulRunTimeCounter = 0;
+ }
+# endif
+
+ /* Obtaining the task state is a little fiddly, so is only done if the
+ value of eState passed into this function is eInvalid - otherwise the
+ state is just set to whatever is passed in. */
+ if (eState != eInvalid) {
+ if (pxTCB == pxCurrentTCB) {
+ pxTaskStatus->eCurrentState = eRunning;
+ } else {
+ pxTaskStatus->eCurrentState = eState;
+
+# if (INCLUDE_vTaskSuspend == 1)
+ {
+ /* If the task is in the suspended list then there is a
+ chance it is actually just blocked indefinitely - so really
+ it should be reported as being in the Blocked state. */
+ if (eState == eSuspended) {
+ vTaskSuspendAll();
+ {
+ if (listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) !=
+ NULL) {
+ pxTaskStatus->eCurrentState = eBlocked;
+ }
+ }
+ (void)xTaskResumeAll();
+ }
+ }
+# endif /* INCLUDE_vTaskSuspend */
+ }
+ } else {
+ pxTaskStatus->eCurrentState = eTaskGetState(pxTCB);
+ }
+
+ /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
+ parameter is provided to allow it to be skipped. */
+ if (xGetFreeStackSpace != pdFALSE) {
+# if (portSTACK_GROWTH > 0)
+ {
+ pxTaskStatus->usStackHighWaterMark =
+ prvTaskCheckFreeStackSpace((uint8_t *)pxTCB->pxEndOfStack);
+ }
+# else
+ {
+ pxTaskStatus->usStackHighWaterMark =
+ prvTaskCheckFreeStackSpace((uint8_t *)pxTCB->pxStack);
+ }
+# endif
+ } else {
+ pxTaskStatus->usStackHighWaterMark = 0;
+ }
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TRACE_FACILITY == 1)
+
+static UBaseType_t prvListTasksWithinSingleList(
+ TaskStatus_t *pxTaskStatusArray,
+ List_t *pxList,
+ eTaskState eState)
+{
+ configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
+ UBaseType_t uxTask = 0;
+
+ if (listCURRENT_LIST_LENGTH(pxList) > (UBaseType_t)0) {
+ listGET_OWNER_OF_NEXT_ENTRY(
+ pxFirstTCB,
+ pxList); /*lint !e9079 void * is used as this macro is used with
+ timers and co-routines too. Alignment is known to be
+ fine as the type of the pointer stored and retrieved is
+ the same. */
+
+ /* Populate an TaskStatus_t structure within the
+ pxTaskStatusArray array for each task that is referenced from
+ pxList. See the definition of TaskStatus_t in task.h for the
+ meaning of each TaskStatus_t structure member. */
+ do {
+ listGET_OWNER_OF_NEXT_ENTRY(
+ pxNextTCB,
+ pxList); /*lint !e9079 void * is used as this macro is used with
+ timers and co-routines too. Alignment is known to
+ be fine as the type of the pointer stored and
+ retrieved is the same. */
+ vTaskGetInfo(
+ (TaskHandle_t)pxNextTCB,
+ &(pxTaskStatusArray[uxTask]),
+ pdTRUE,
+ eState);
+ uxTask++;
+ } while (pxNextTCB != pxFirstTCB);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return uxTask;
+}
+
+#endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configUSE_TRACE_FACILITY == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark == 1) || \
+ (INCLUDE_uxTaskGetStackHighWaterMark2 == 1))
+
+static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace(
+ const uint8_t *pucStackByte)
+{
+ uint32_t ulCount = 0U;
+
+ while (*pucStackByte == (uint8_t)tskSTACK_FILL_BYTE) {
+ pucStackByte -= portSTACK_GROWTH;
+ ulCount++;
+ }
+
+ ulCount /=
+ (uint32_t)sizeof(StackType_t); /*lint !e961 Casting is not redundant on
+ smaller architectures. */
+
+ return (configSTACK_DEPTH_TYPE)ulCount;
+}
+
+#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( \
+ INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( \
+ INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_uxTaskGetStackHighWaterMark2 == 1)
+
+/* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
+same except for their return type. Using configSTACK_DEPTH_TYPE allows the
+user to determine the return type. It gets around the problem of the value
+overflowing on 8-bit types without breaking backward compatibility for
+applications that expect an 8-bit return type. */
+configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB;
+ uint8_t *pucEndOfStack;
+ configSTACK_DEPTH_TYPE uxReturn;
+
+ /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
+ the same except for their return type. Using configSTACK_DEPTH_TYPE
+ allows the user to determine the return type. It gets around the
+ problem of the value overflowing on 8-bit types without breaking
+ backward compatibility for applications that expect an 8-bit return
+ type. */
+
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+# if portSTACK_GROWTH < 0
+ {
+ pucEndOfStack = (uint8_t *)pxTCB->pxStack;
+ }
+# else
+ {
+ pucEndOfStack = (uint8_t *)pxTCB->pxEndOfStack;
+ }
+# endif
+
+ uxReturn = prvTaskCheckFreeStackSpace(pucEndOfStack);
+
+ return uxReturn;
+}
+
+#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_uxTaskGetStackHighWaterMark == 1)
+
+UBaseType_t uxTaskGetStackHighWaterMark(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB;
+ uint8_t *pucEndOfStack;
+ UBaseType_t uxReturn;
+
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+# if portSTACK_GROWTH < 0
+ {
+ pucEndOfStack = (uint8_t *)pxTCB->pxStack;
+ }
+# else
+ {
+ pucEndOfStack = (uint8_t *)pxTCB->pxEndOfStack;
+ }
+# endif
+
+ uxReturn = (UBaseType_t)prvTaskCheckFreeStackSpace(pucEndOfStack);
+
+ return uxReturn;
+}
+
+#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
+/*-----------------------------------------------------------*/
+
+#if (INCLUDE_vTaskDelete == 1)
+
+static void prvDeleteTCB(TCB_t *pxTCB)
+{
+ /* This call is required specifically for the TriCore port. It must be
+ above the vPortFree() calls. The call is also used by ports/demos that
+ want to allocate and clean RAM statically. */
+ portCLEAN_UP_TCB(pxTCB);
+
+/* Free up the memory allocated by the scheduler for the task. It is up
+to the task to free any memory allocated at the application level.
+See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
+for additional information. */
+# if (configUSE_NEWLIB_REENTRANT == 1)
+ {
+ _reclaim_reent(&(pxTCB->xNewLib_reent));
+ }
+# endif /* configUSE_NEWLIB_REENTRANT */
+
+# if ( \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1) && \
+ (configSUPPORT_STATIC_ALLOCATION == 0) && \
+ (portUSING_MPU_WRAPPERS == 0))
+ {
+ /* The task can only have been allocated dynamically - free both
+ the stack and TCB. */
+ vPortFree(pxTCB->pxStack);
+ vPortFree(pxTCB);
+ }
+# elif ( \
+ tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != \
+ 0) /*lint !e731 !e9029 Macro has been consolidated for readability \
+ reasons. */
+ {
+ /* The task could have been allocated statically or dynamically, so
+ check what was statically allocated before trying to free the
+ memory. */
+ if (pxTCB->ucStaticallyAllocated ==
+ tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB) {
+ /* Both the stack and TCB were allocated dynamically, so both
+ must be freed. */
+ vPortFree(pxTCB->pxStack);
+ vPortFree(pxTCB);
+ } else if (
+ pxTCB->ucStaticallyAllocated ==
+ tskSTATICALLY_ALLOCATED_STACK_ONLY) {
+ /* Only the stack was statically allocated, so the TCB is the
+ only memory that must be freed. */
+ vPortFree(pxTCB);
+ } else {
+ /* Neither the stack nor the TCB were allocated dynamically, so
+ nothing needs to be freed. */
+ configASSERT(
+ pxTCB->ucStaticallyAllocated ==
+ tskSTATICALLY_ALLOCATED_STACK_AND_TCB);
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+}
+
+#endif /* INCLUDE_vTaskDelete */
+/*-----------------------------------------------------------*/
+
+static void prvResetNextTaskUnblockTime(void)
+{
+ TCB_t *pxTCB;
+
+ if (listLIST_IS_EMPTY(pxDelayedTaskList) != pdFALSE) {
+ /* The new current delayed list is empty. Set xNextTaskUnblockTime to
+ the maximum possible value so it is extremely unlikely that the
+ if( xTickCount >= xNextTaskUnblockTime ) test will pass until
+ there is an item in the delayed list. */
+ xNextTaskUnblockTime = portMAX_DELAY;
+ } else {
+ /* The new current delayed list is not empty, get the value of
+ the item at the head of the delayed list. This is the time at
+ which the task at the head of the delayed list should be removed
+ from the Blocked state. */
+ (pxTCB) = listGET_OWNER_OF_HEAD_ENTRY(
+ pxDelayedTaskList); /*lint !e9079 void * is used as this macro is
+ used with timers and co-routines too.
+ Alignment is known to be fine as the type of
+ the pointer stored and retrieved is the same.
+ */
+ xNextTaskUnblockTime =
+ listGET_LIST_ITEM_VALUE(&((pxTCB)->xStateListItem));
+ }
+}
+/*-----------------------------------------------------------*/
+
+#if ((INCLUDE_xTaskGetCurrentTaskHandle == 1) || (configUSE_MUTEXES == 1))
+
+TaskHandle_t xTaskGetCurrentTaskHandle(void)
+{
+ TaskHandle_t xReturn;
+
+ /* A critical section is not required as this is not called from
+ an interrupt and the current TCB will always be the same for any
+ individual execution thread. */
+ xReturn = pxCurrentTCB;
+
+ return xReturn;
+}
+
+#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES \
+ == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+#if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
+
+BaseType_t xTaskGetSchedulerState(void)
+{
+ BaseType_t xReturn;
+
+ if (xSchedulerRunning == pdFALSE) {
+ xReturn = taskSCHEDULER_NOT_STARTED;
+ } else {
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ xReturn = taskSCHEDULER_RUNNING;
+ } else {
+ xReturn = taskSCHEDULER_SUSPENDED;
+ }
+ }
+
+ return xReturn;
+}
+
+#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 \
+ ) ) */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+BaseType_t xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)
+{
+ TCB_t *const pxMutexHolderTCB = pxMutexHolder;
+ BaseType_t xReturn = pdFALSE;
+
+ /* If the mutex was given back by an interrupt while the queue was
+ locked then the mutex holder might now be NULL. _RB_ Is this still
+ needed as interrupts can no longer use mutexes? */
+ if (pxMutexHolder != NULL) {
+ /* If the holder of the mutex has a priority below the priority of
+ the task attempting to obtain the mutex then it will temporarily
+ inherit the priority of the task attempting to obtain the mutex. */
+ if (pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority) {
+ /* Adjust the mutex holder state to account for its new
+ priority. Only reset the event list item value if the value is
+ not being used for anything else. */
+ if ((listGET_LIST_ITEM_VALUE(&(pxMutexHolderTCB->xEventListItem)) &
+ taskEVENT_LIST_ITEM_VALUE_IN_USE) == 0UL) {
+ listSET_LIST_ITEM_VALUE(
+ &(pxMutexHolderTCB->xEventListItem),
+ (TickType_t)configMAX_PRIORITIES -
+ (TickType_t)pxCurrentTCB
+ ->uxPriority); /*lint !e961 MISRA exception as the
+ casts are only redundant for some
+ ports. */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* If the task being modified is in the ready state it will need
+ to be moved into a new list. */
+ if (listIS_CONTAINED_WITHIN(
+ &(pxReadyTasksLists[pxMutexHolderTCB->uxPriority]),
+ &(pxMutexHolderTCB->xStateListItem)) != pdFALSE) {
+ if (uxListRemove(&(pxMutexHolderTCB->xStateListItem)) ==
+ (UBaseType_t)0) {
+ /* It is known that the task is in its ready list so
+ there is no need to check again and the port level
+ reset macro can be called directly. */
+ portRESET_READY_PRIORITY(
+ pxMutexHolderTCB->uxPriority, uxTopReadyPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Inherit the priority before being moved into the new list. */
+ pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
+ prvAddTaskToReadyList(pxMutexHolderTCB);
+ } else {
+ /* Just inherit the priority. */
+ pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
+ }
+
+ traceTASK_PRIORITY_INHERIT(
+ pxMutexHolderTCB, pxCurrentTCB->uxPriority);
+
+ /* Inheritance occurred. */
+ xReturn = pdTRUE;
+ } else {
+ if (pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority) {
+ /* The base priority of the mutex holder is lower than the
+ priority of the task attempting to take the mutex, but the
+ current priority of the mutex holder is not lower than the
+ priority of the task attempting to take the mutex.
+ Therefore the mutex holder must have already inherited a
+ priority, but inheritance would have occurred if that had
+ not been the case. */
+ xReturn = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+BaseType_t xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)
+{
+ TCB_t *const pxTCB = pxMutexHolder;
+ BaseType_t xReturn = pdFALSE;
+
+ if (pxMutexHolder != NULL) {
+ /* A task can only have an inherited priority if it holds the mutex.
+ If the mutex is held by a task then it cannot be given from an
+ interrupt, and if a mutex is given by the holding task then it must
+ be the running state task. */
+ configASSERT(pxTCB == pxCurrentTCB);
+ configASSERT(pxTCB->uxMutexesHeld);
+ (pxTCB->uxMutexesHeld)--;
+
+ /* Has the holder of the mutex inherited the priority of another
+ task? */
+ if (pxTCB->uxPriority != pxTCB->uxBasePriority) {
+ /* Only disinherit if no other mutexes are held. */
+ if (pxTCB->uxMutexesHeld == (UBaseType_t)0) {
+ /* A task can only have an inherited priority if it holds
+ the mutex. If the mutex is held by a task then it cannot be
+ given from an interrupt, and if a mutex is given by the
+ holding task then it must be the running state task. Remove
+ the holding task from the ready/delayed list. */
+ if (uxListRemove(&(pxTCB->xStateListItem)) == (UBaseType_t)0) {
+ taskRESET_READY_PRIORITY(pxTCB->uxPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Disinherit the priority before adding the task into the
+ new ready list. */
+ traceTASK_PRIORITY_DISINHERIT(pxTCB, pxTCB->uxBasePriority);
+ pxTCB->uxPriority = pxTCB->uxBasePriority;
+
+ /* Reset the event list item value. It cannot be in use for
+ any other purpose if this task is running, and it must be
+ running to give back the mutex. */
+ listSET_LIST_ITEM_VALUE(
+ &(pxTCB->xEventListItem),
+ (TickType_t)configMAX_PRIORITIES -
+ (TickType_t)
+ pxTCB->uxPriority); /*lint !e961 MISRA exception as
+ the casts are only redundant
+ for some ports. */
+ prvAddTaskToReadyList(pxTCB);
+
+ /* Return true to indicate that a context switch is required.
+ This is only actually required in the corner case whereby
+ multiple mutexes were held and the mutexes were given back
+ in an order different to that in which they were taken.
+ If a context switch did not occur when the first mutex was
+ returned, even if a task was waiting on it, then a context
+ switch should occur when the last mutex is returned whether
+ a task is waiting on it or not. */
+ xReturn = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xReturn;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+void vTaskPriorityDisinheritAfterTimeout(
+ TaskHandle_t const pxMutexHolder,
+ UBaseType_t uxHighestPriorityWaitingTask)
+{
+ TCB_t *const pxTCB = pxMutexHolder;
+ UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
+ const UBaseType_t uxOnlyOneMutexHeld = (UBaseType_t)1;
+
+ if (pxMutexHolder != NULL) {
+ /* If pxMutexHolder is not NULL then the holder must hold at least
+ one mutex. */
+ configASSERT(pxTCB->uxMutexesHeld);
+
+ /* Determine the priority to which the priority of the task that
+ holds the mutex should be set. This will be the greater of the
+ holding task's base priority and the priority of the highest
+ priority task that is waiting to obtain the mutex. */
+ if (pxTCB->uxBasePriority < uxHighestPriorityWaitingTask) {
+ uxPriorityToUse = uxHighestPriorityWaitingTask;
+ } else {
+ uxPriorityToUse = pxTCB->uxBasePriority;
+ }
+
+ /* Does the priority need to change? */
+ if (pxTCB->uxPriority != uxPriorityToUse) {
+ /* Only disinherit if no other mutexes are held. This is a
+ simplification in the priority inheritance implementation. If
+ the task that holds the mutex is also holding other mutexes then
+ the other mutexes may have caused the priority inheritance. */
+ if (pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld) {
+ /* If a task has timed out because it already holds the
+ mutex it was trying to obtain then it cannot of inherited
+ its own priority. */
+ configASSERT(pxTCB != pxCurrentTCB);
+
+ /* Disinherit the priority, remembering the previous
+ priority to facilitate determining the subject task's
+ state. */
+ traceTASK_PRIORITY_DISINHERIT(pxTCB, pxTCB->uxBasePriority);
+ uxPriorityUsedOnEntry = pxTCB->uxPriority;
+ pxTCB->uxPriority = uxPriorityToUse;
+
+ /* Only reset the event list item value if the value is not
+ being used for anything else. */
+ if ((listGET_LIST_ITEM_VALUE(&(pxTCB->xEventListItem)) &
+ taskEVENT_LIST_ITEM_VALUE_IN_USE) == 0UL) {
+ listSET_LIST_ITEM_VALUE(
+ &(pxTCB->xEventListItem),
+ (TickType_t)configMAX_PRIORITIES -
+ (TickType_t)
+ uxPriorityToUse); /*lint !e961 MISRA exception
+ as the casts are only
+ redundant for some ports.
+ */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* If the running task is not the task that holds the mutex
+ then the task that holds the mutex could be in either the
+ Ready, Blocked or Suspended states. Only remove the task
+ from its current state list if it is in the Ready state as
+ the task's priority is going to change and there is one
+ Ready list per priority. */
+ if (listIS_CONTAINED_WITHIN(
+ &(pxReadyTasksLists[uxPriorityUsedOnEntry]),
+ &(pxTCB->xStateListItem)) != pdFALSE) {
+ if (uxListRemove(&(pxTCB->xStateListItem)) ==
+ (UBaseType_t)0) {
+ /* It is known that the task is in its ready list so
+ there is no need to check again and the port level
+ reset macro can be called directly. */
+ portRESET_READY_PRIORITY(
+ pxTCB->uxPriority, uxTopReadyPriority);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ prvAddTaskToReadyList(pxTCB);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if (portCRITICAL_NESTING_IN_TCB == 1)
+
+void vTaskEnterCritical(void)
+{
+ portDISABLE_INTERRUPTS();
+
+ if (xSchedulerRunning != pdFALSE) {
+ (pxCurrentTCB->uxCriticalNesting)++;
+
+ /* This is not the interrupt safe version of the enter critical
+ function so assert() if it is being called from an interrupt
+ context. Only API functions that end in "FromISR" can be used in an
+ interrupt. Only assert if the critical nesting count is 1 to
+ protect against recursive calls if the assert function also uses a
+ critical section. */
+ if (pxCurrentTCB->uxCriticalNesting == 1) {
+ portASSERT_IF_IN_ISR();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* portCRITICAL_NESTING_IN_TCB */
+/*-----------------------------------------------------------*/
+
+#if (portCRITICAL_NESTING_IN_TCB == 1)
+
+void vTaskExitCritical(void)
+{
+ if (xSchedulerRunning != pdFALSE) {
+ if (pxCurrentTCB->uxCriticalNesting > 0U) {
+ (pxCurrentTCB->uxCriticalNesting)--;
+
+ if (pxCurrentTCB->uxCriticalNesting == 0U) {
+ portENABLE_INTERRUPTS();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* portCRITICAL_NESTING_IN_TCB */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configUSE_TRACE_FACILITY == 1) && \
+ (configUSE_STATS_FORMATTING_FUNCTIONS > 0))
+
+static char *prvWriteNameToBuffer(char *pcBuffer, const char *pcTaskName)
+{
+ size_t x;
+
+ /* Start by copying the entire string. */
+ strcpy(pcBuffer, pcTaskName);
+
+ /* Pad the end of the string with spaces to ensure columns line up when
+ printed out. */
+ for (x = strlen(pcBuffer); x < (size_t)(configMAX_TASK_NAME_LEN - 1); x++) {
+ pcBuffer[x] = ' ';
+ }
+
+ /* Terminate. */
+ pcBuffer[x] = (char)0x00;
+
+ /* Return the new end of string. */
+ return &(pcBuffer[x]);
+}
+
+#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( \
+ configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configUSE_TRACE_FACILITY == 1) && \
+ (configUSE_STATS_FORMATTING_FUNCTIONS > 0) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+void vTaskList(char *pcWriteBuffer)
+{
+ TaskStatus_t *pxTaskStatusArray;
+ UBaseType_t uxArraySize, x;
+ char cStatus;
+
+ /*
+ * PLEASE NOTE:
+ *
+ * This function is provided for convenience only, and is used by many
+ * of the demo applications. Do not consider it to be part of the
+ * scheduler.
+ *
+ * vTaskList() calls uxTaskGetSystemState(), then formats part of the
+ * uxTaskGetSystemState() output into a human readable table that
+ * displays task names, states and stack usage.
+ *
+ * vTaskList() has a dependency on the sprintf() C library function that
+ * might bloat the code size, use a lot of stack, and provide different
+ * results on different platforms. An alternative, tiny, third party,
+ * and limited functionality implementation of sprintf() is provided in
+ * many of the FreeRTOS/Demo sub-directories in a file called
+ * printf-stdarg.c (note printf-stdarg.c does not provide a full
+ * snprintf() implementation!).
+ *
+ * It is recommended that production systems call uxTaskGetSystemState()
+ * directly to get access to raw stats data, rather than indirectly
+ * through a call to vTaskList().
+ */
+
+ /* Make sure the write buffer does not contain a string. */
+ *pcWriteBuffer = (char)0x00;
+
+ /* Take a snapshot of the number of tasks in case it changes while this
+ function is executing. */
+ uxArraySize = uxCurrentNumberOfTasks;
+
+ /* Allocate an array index for each task. NOTE! if
+ configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
+ equate to NULL. */
+ pxTaskStatusArray = pvPortMalloc(
+ uxCurrentNumberOfTasks *
+ sizeof(
+ TaskStatus_t)); /*lint !e9079 All values returned by pvPortMalloc()
+ have at least the alignment required by the MCU's
+ stack and this allocation allocates a struct that
+ has the alignment requirements of a pointer. */
+
+ if (pxTaskStatusArray != NULL) {
+ /* Generate the (binary) data. */
+ uxArraySize =
+ uxTaskGetSystemState(pxTaskStatusArray, uxArraySize, NULL);
+
+ /* Create a human readable table from the binary data. */
+ for (x = 0; x < uxArraySize; x++) {
+ switch (pxTaskStatusArray[x].eCurrentState) {
+ case eRunning:
+ cStatus = tskRUNNING_CHAR;
+ break;
+
+ case eReady:
+ cStatus = tskREADY_CHAR;
+ break;
+
+ case eBlocked:
+ cStatus = tskBLOCKED_CHAR;
+ break;
+
+ case eSuspended:
+ cStatus = tskSUSPENDED_CHAR;
+ break;
+
+ case eDeleted:
+ cStatus = tskDELETED_CHAR;
+ break;
+
+ case eInvalid: /* Fall through. */
+ default: /* Should not get here, but it is included
+ to prevent static checking errors. */
+ cStatus = (char)0x00;
+ break;
+ }
+
+ /* Write the task name to the string, padding with spaces so it
+ can be printed in tabular form more easily. */
+ pcWriteBuffer = prvWriteNameToBuffer(
+ pcWriteBuffer, pxTaskStatusArray[x].pcTaskName);
+
+ /* Write the rest of the string. */
+ sprintf(
+ pcWriteBuffer,
+ "\t%c\t%u\t%u\t%u\r\n",
+ cStatus,
+ (unsigned int)pxTaskStatusArray[x].uxCurrentPriority,
+ (unsigned int)pxTaskStatusArray[x].usStackHighWaterMark,
+ (unsigned int)pxTaskStatusArray[x]
+ .xTaskNumber); /*lint !e586 sprintf() allowed as this is
+ compiled with many compilers and this is a
+ utility function only - not part of the
+ core kernel implementation. */
+ pcWriteBuffer += strlen(
+ pcWriteBuffer); /*lint !e9016 Pointer arithmetic ok on char
+ pointers especially as in this case where it
+ best denotes the intent of the code. */
+ }
+
+ /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
+ is 0 then vPortFree() will be #defined to nothing. */
+ vPortFree(pxTaskStatusArray);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( \
+ configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( \
+ configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
+/*----------------------------------------------------------*/
+
+#if ( \
+ (configGENERATE_RUN_TIME_STATS == 1) && \
+ (configUSE_STATS_FORMATTING_FUNCTIONS > 0) && \
+ (configSUPPORT_DYNAMIC_ALLOCATION == 1))
+
+void vTaskGetRunTimeStats(char *pcWriteBuffer)
+{
+ TaskStatus_t *pxTaskStatusArray;
+ UBaseType_t uxArraySize, x;
+ uint32_t ulTotalTime, ulStatsAsPercentage;
+
+# if (configUSE_TRACE_FACILITY != 1)
+ {
+#error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
+ }
+# endif
+
+ /*
+ * PLEASE NOTE:
+ *
+ * This function is provided for convenience only, and is used by many
+ * of the demo applications. Do not consider it to be part of the
+ * scheduler.
+ *
+ * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
+ * of the uxTaskGetSystemState() output into a human readable table that
+ * displays the amount of time each task has spent in the Running state
+ * in both absolute and percentage terms.
+ *
+ * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
+ * function that might bloat the code size, use a lot of stack, and
+ * provide different results on different platforms. An alternative,
+ * tiny, third party, and limited functionality implementation of
+ * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
+ * a file called printf-stdarg.c (note printf-stdarg.c does not provide
+ * a full snprintf() implementation!).
+ *
+ * It is recommended that production systems call uxTaskGetSystemState()
+ * directly to get access to raw stats data, rather than indirectly
+ * through a call to vTaskGetRunTimeStats().
+ */
+
+ /* Make sure the write buffer does not contain a string. */
+ *pcWriteBuffer = (char)0x00;
+
+ /* Take a snapshot of the number of tasks in case it changes while this
+ function is executing. */
+ uxArraySize = uxCurrentNumberOfTasks;
+
+ /* Allocate an array index for each task. NOTE! If
+ configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
+ equate to NULL. */
+ pxTaskStatusArray = pvPortMalloc(
+ uxCurrentNumberOfTasks *
+ sizeof(
+ TaskStatus_t)); /*lint !e9079 All values returned by pvPortMalloc()
+ have at least the alignment required by the MCU's
+ stack and this allocation allocates a struct that
+ has the alignment requirements of a pointer. */
+
+ if (pxTaskStatusArray != NULL) {
+ /* Generate the (binary) data. */
+ uxArraySize =
+ uxTaskGetSystemState(pxTaskStatusArray, uxArraySize, &ulTotalTime);
+
+ /* For percentage calculations. */
+ ulTotalTime /= 100UL;
+
+ /* Avoid divide by zero errors. */
+ if (ulTotalTime > 0UL) {
+ /* Create a human readable table from the binary data. */
+ for (x = 0; x < uxArraySize; x++) {
+ /* What percentage of the total run time has the task used?
+ This will always be rounded down to the nearest integer.
+ ulTotalRunTimeDiv100 has already been divided by 100. */
+ ulStatsAsPercentage =
+ pxTaskStatusArray[x].ulRunTimeCounter / ulTotalTime;
+
+ /* Write the task name to the string, padding with
+ spaces so it can be printed in tabular form more
+ easily. */
+ pcWriteBuffer = prvWriteNameToBuffer(
+ pcWriteBuffer, pxTaskStatusArray[x].pcTaskName);
+
+ if (ulStatsAsPercentage > 0UL) {
+# ifdef portLU_PRINTF_SPECIFIER_REQUIRED
+ {
+ sprintf(
+ pcWriteBuffer,
+ "\t%lu\t\t%lu%%\r\n",
+ pxTaskStatusArray[x].ulRunTimeCounter,
+ ulStatsAsPercentage);
+ }
+# else
+ {
+ /* sizeof( int ) == sizeof( long ) so a smaller
+ printf() library can be used. */
+ sprintf(
+ pcWriteBuffer,
+ "\t%u\t\t%u%%\r\n",
+ (unsigned int)pxTaskStatusArray[x].ulRunTimeCounter,
+ (unsigned int)
+ ulStatsAsPercentage); /*lint !e586 sprintf()
+ allowed as this is
+ compiled with many
+ compilers and this is a
+ utility function only -
+ not part of the core
+ kernel implementation.
+ */
+ }
+# endif
+ } else {
+/* If the percentage is zero here then the task has
+consumed less than 1% of the total run time. */
+# ifdef portLU_PRINTF_SPECIFIER_REQUIRED
+ {
+ sprintf(
+ pcWriteBuffer,
+ "\t%lu\t\t<1%%\r\n",
+ pxTaskStatusArray[x].ulRunTimeCounter);
+ }
+# else
+ {
+ /* sizeof( int ) == sizeof( long ) so a smaller
+ printf() library can be used. */
+ sprintf(
+ pcWriteBuffer,
+ "\t%u\t\t<1%%\r\n",
+ (unsigned int)pxTaskStatusArray[x]
+ .ulRunTimeCounter); /*lint !e586 sprintf()
+ allowed as this is
+ compiled with many
+ compilers and this is a
+ utility function only -
+ not part of the core
+ kernel implementation. */
+ }
+# endif
+ }
+
+ pcWriteBuffer +=
+ strlen(pcWriteBuffer); /*lint !e9016 Pointer arithmetic ok
+ on char pointers especially as in
+ this case where it best denotes
+ the intent of the code. */
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
+ is 0 then vPortFree() will be #defined to nothing. */
+ vPortFree(pxTaskStatusArray);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+}
+
+#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( \
+ configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( \
+ configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
+/*-----------------------------------------------------------*/
+
+TickType_t uxTaskResetEventItemValue(void)
+{
+ TickType_t uxReturn;
+
+ uxReturn = listGET_LIST_ITEM_VALUE(&(pxCurrentTCB->xEventListItem));
+
+ /* Reset the event list item to its normal value - so it can be used with
+ queues and semaphores. */
+ listSET_LIST_ITEM_VALUE(
+ &(pxCurrentTCB->xEventListItem),
+ ((TickType_t)configMAX_PRIORITIES -
+ (TickType_t)pxCurrentTCB
+ ->uxPriority)); /*lint !e961 MISRA exception as the casts are only
+ redundant for some ports. */
+
+ return uxReturn;
+}
+/*-----------------------------------------------------------*/
+
+#if (configUSE_MUTEXES == 1)
+
+TaskHandle_t pvTaskIncrementMutexHeldCount(void)
+{
+ /* If xSemaphoreCreateMutex() is called before any tasks have been created
+ then pxCurrentTCB will be NULL. */
+ if (pxCurrentTCB != NULL) {
+ (pxCurrentTCB->uxMutexesHeld)++;
+ }
+
+ return pxCurrentTCB;
+}
+
+#endif /* configUSE_MUTEXES */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+uint32_t ulTaskNotifyTake(BaseType_t xClearCountOnExit, TickType_t xTicksToWait)
+{
+ uint32_t ulReturn;
+
+ taskENTER_CRITICAL();
+ {
+ /* Only block if the notification count is not already non-zero. */
+ if (pxCurrentTCB->ulNotifiedValue == 0UL) {
+ /* Mark this task as waiting for a notification. */
+ pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
+
+ if (xTicksToWait > (TickType_t)0) {
+ prvAddCurrentTaskToDelayedList(xTicksToWait, pdTRUE);
+ traceTASK_NOTIFY_TAKE_BLOCK();
+
+ /* All ports are written to allow a yield in a critical
+ section (some will yield immediately, others wait until the
+ critical section exits) - but it is not something that
+ application code should ever do. */
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ taskENTER_CRITICAL();
+ {
+ traceTASK_NOTIFY_TAKE();
+ ulReturn = pxCurrentTCB->ulNotifiedValue;
+
+ if (ulReturn != 0UL) {
+ if (xClearCountOnExit != pdFALSE) {
+ pxCurrentTCB->ulNotifiedValue = 0UL;
+ } else {
+ pxCurrentTCB->ulNotifiedValue = ulReturn - (uint32_t)1;
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
+ }
+ taskEXIT_CRITICAL();
+
+ return ulReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+BaseType_t xTaskNotifyWait(
+ uint32_t ulBitsToClearOnEntry,
+ uint32_t ulBitsToClearOnExit,
+ uint32_t *pulNotificationValue,
+ TickType_t xTicksToWait)
+{
+ BaseType_t xReturn;
+
+ taskENTER_CRITICAL();
+ {
+ /* Only block if a notification is not already pending. */
+ if (pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED) {
+ /* Clear bits in the task's notification value as bits may get
+ set by the notifying task or interrupt. This can be used to
+ clear the value to zero. */
+ pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
+
+ /* Mark this task as waiting for a notification. */
+ pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
+
+ if (xTicksToWait > (TickType_t)0) {
+ prvAddCurrentTaskToDelayedList(xTicksToWait, pdTRUE);
+ traceTASK_NOTIFY_WAIT_BLOCK();
+
+ /* All ports are written to allow a yield in a critical
+ section (some will yield immediately, others wait until the
+ critical section exits) - but it is not something that
+ application code should ever do. */
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ taskENTER_CRITICAL();
+ {
+ traceTASK_NOTIFY_WAIT();
+
+ if (pulNotificationValue != NULL) {
+ /* Output the current notification value, which may or may not
+ have changed. */
+ *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
+ }
+
+ /* If ucNotifyValue is set then either the task never entered the
+ blocked state (because a notification was already pending) or the
+ task unblocked because of a notification. Otherwise the task
+ unblocked because of a timeout. */
+ if (pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED) {
+ /* A notification was not received. */
+ xReturn = pdFALSE;
+ } else {
+ /* A notification was already pending or a notification was
+ received while the task was waiting. */
+ pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
+ xReturn = pdTRUE;
+ }
+
+ pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+BaseType_t xTaskGenericNotify(
+ TaskHandle_t xTaskToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t *pulPreviousNotificationValue)
+{
+ TCB_t *pxTCB;
+ BaseType_t xReturn = pdPASS;
+ uint8_t ucOriginalNotifyState;
+
+ configASSERT(xTaskToNotify);
+ pxTCB = xTaskToNotify;
+
+ taskENTER_CRITICAL();
+ {
+ if (pulPreviousNotificationValue != NULL) {
+ *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
+ }
+
+ ucOriginalNotifyState = pxTCB->ucNotifyState;
+
+ pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
+
+ switch (eAction) {
+ case eSetBits:
+ pxTCB->ulNotifiedValue |= ulValue;
+ break;
+
+ case eIncrement:
+ (pxTCB->ulNotifiedValue)++;
+ break;
+
+ case eSetValueWithOverwrite:
+ pxTCB->ulNotifiedValue = ulValue;
+ break;
+
+ case eSetValueWithoutOverwrite:
+ if (ucOriginalNotifyState != taskNOTIFICATION_RECEIVED) {
+ pxTCB->ulNotifiedValue = ulValue;
+ } else {
+ /* The value could not be written to the task. */
+ xReturn = pdFAIL;
+ }
+ break;
+
+ case eNoAction:
+ /* The task is being notified without its notify value being
+ updated. */
+ break;
+
+ default:
+ /* Should not get here if all enums are handled.
+ Artificially force an assert by testing a value the
+ compiler can't assume is const. */
+ configASSERT(pxTCB->ulNotifiedValue == ~0UL);
+
+ break;
+ }
+
+ traceTASK_NOTIFY();
+
+ /* If the task is in the blocked state specifically to wait for a
+ notification then unblock it now. */
+ if (ucOriginalNotifyState == taskWAITING_NOTIFICATION) {
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+
+ /* The task should not have been on an event list. */
+ configASSERT(
+ listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) == NULL);
+
+# if (configUSE_TICKLESS_IDLE != 0)
+ {
+ /* If a task is blocked waiting for a notification then
+ xNextTaskUnblockTime might be set to the blocked task's time
+ out time. If the task is unblocked for a reason other than
+ a timeout xNextTaskUnblockTime is normally left unchanged,
+ because it will automatically get reset to a new value when
+ the tick count equals xNextTaskUnblockTime. However if
+ tickless idling is used it might be more important to enter
+ sleep mode at the earliest possible time - so reset
+ xNextTaskUnblockTime here to ensure it is updated at the
+ earliest possible time. */
+ prvResetNextTaskUnblockTime();
+ }
+# endif
+
+ if (pxTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* The notified task has a priority above the currently
+ executing task so a yield is required. */
+ taskYIELD_IF_USING_PREEMPTION();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+BaseType_t xTaskGenericNotifyFromISR(
+ TaskHandle_t xTaskToNotify,
+ uint32_t ulValue,
+ eNotifyAction eAction,
+ uint32_t *pulPreviousNotificationValue,
+ BaseType_t *pxHigherPriorityTaskWoken)
+{
+ TCB_t *pxTCB;
+ uint8_t ucOriginalNotifyState;
+ BaseType_t xReturn = pdPASS;
+ UBaseType_t uxSavedInterruptStatus;
+
+ configASSERT(xTaskToNotify);
+
+ /* RTOS ports that support interrupt nesting have the concept of a
+ maximum system call (or maximum API call) interrupt priority.
+ Interrupts that are above the maximum system call priority are keep
+ permanently enabled, even when the RTOS kernel is in a critical section,
+ but cannot make any calls to FreeRTOS API functions. If configASSERT()
+ is defined in FreeRTOSConfig.h then
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has
+ been assigned a priority above the configured maximum system call
+ priority. Only FreeRTOS functions that end in FromISR can be called
+ from interrupts that have been assigned a priority at or (logically)
+ below the maximum system call interrupt priority. FreeRTOS maintains a
+ separate interrupt safe API to ensure interrupt entry is as fast and as
+ simple as possible. More information (albeit Cortex-M specific) is
+ provided on the following link:
+ http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ pxTCB = xTaskToNotify;
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ if (pulPreviousNotificationValue != NULL) {
+ *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
+ }
+
+ ucOriginalNotifyState = pxTCB->ucNotifyState;
+ pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
+
+ switch (eAction) {
+ case eSetBits:
+ pxTCB->ulNotifiedValue |= ulValue;
+ break;
+
+ case eIncrement:
+ (pxTCB->ulNotifiedValue)++;
+ break;
+
+ case eSetValueWithOverwrite:
+ pxTCB->ulNotifiedValue = ulValue;
+ break;
+
+ case eSetValueWithoutOverwrite:
+ if (ucOriginalNotifyState != taskNOTIFICATION_RECEIVED) {
+ pxTCB->ulNotifiedValue = ulValue;
+ } else {
+ /* The value could not be written to the task. */
+ xReturn = pdFAIL;
+ }
+ break;
+
+ case eNoAction:
+ /* The task is being notified without its notify value being
+ updated. */
+ break;
+
+ default:
+ /* Should not get here if all enums are handled.
+ Artificially force an assert by testing a value the
+ compiler can't assume is const. */
+ configASSERT(pxTCB->ulNotifiedValue == ~0UL);
+ break;
+ }
+
+ traceTASK_NOTIFY_FROM_ISR();
+
+ /* If the task is in the blocked state specifically to wait for a
+ notification then unblock it now. */
+ if (ucOriginalNotifyState == taskWAITING_NOTIFICATION) {
+ /* The task should not have been on an event list. */
+ configASSERT(
+ listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) == NULL);
+
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+ } else {
+ /* The delayed and ready lists cannot be accessed, so hold
+ this task pending until the scheduler is resumed. */
+ vListInsertEnd(&(xPendingReadyList), &(pxTCB->xEventListItem));
+ }
+
+ if (pxTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* The notified task has a priority above the currently
+ executing task so a yield is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ }
+
+ /* Mark that a yield is pending in case the user is not
+ using the "xHigherPriorityTaskWoken" parameter to an ISR
+ safe FreeRTOS function. */
+ xYieldPending = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+
+ return xReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+void vTaskNotifyGiveFromISR(
+ TaskHandle_t xTaskToNotify,
+ BaseType_t *pxHigherPriorityTaskWoken)
+{
+ TCB_t *pxTCB;
+ uint8_t ucOriginalNotifyState;
+ UBaseType_t uxSavedInterruptStatus;
+
+ configASSERT(xTaskToNotify);
+
+ /* RTOS ports that support interrupt nesting have the concept of a
+ maximum system call (or maximum API call) interrupt priority.
+ Interrupts that are above the maximum system call priority are keep
+ permanently enabled, even when the RTOS kernel is in a critical section,
+ but cannot make any calls to FreeRTOS API functions. If configASSERT()
+ is defined in FreeRTOSConfig.h then
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has
+ been assigned a priority above the configured maximum system call
+ priority. Only FreeRTOS functions that end in FromISR can be called
+ from interrupts that have been assigned a priority at or (logically)
+ below the maximum system call interrupt priority. FreeRTOS maintains a
+ separate interrupt safe API to ensure interrupt entry is as fast and as
+ simple as possible. More information (albeit Cortex-M specific) is
+ provided on the following link:
+ http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ pxTCB = xTaskToNotify;
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ ucOriginalNotifyState = pxTCB->ucNotifyState;
+ pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
+
+ /* 'Giving' is equivalent to incrementing a count in a counting
+ semaphore. */
+ (pxTCB->ulNotifiedValue)++;
+
+ traceTASK_NOTIFY_GIVE_FROM_ISR();
+
+ /* If the task is in the blocked state specifically to wait for a
+ notification then unblock it now. */
+ if (ucOriginalNotifyState == taskWAITING_NOTIFICATION) {
+ /* The task should not have been on an event list. */
+ configASSERT(
+ listLIST_ITEM_CONTAINER(&(pxTCB->xEventListItem)) == NULL);
+
+ if (uxSchedulerSuspended == (UBaseType_t)pdFALSE) {
+ (void)uxListRemove(&(pxTCB->xStateListItem));
+ prvAddTaskToReadyList(pxTCB);
+ } else {
+ /* The delayed and ready lists cannot be accessed, so hold
+ this task pending until the scheduler is resumed. */
+ vListInsertEnd(&(xPendingReadyList), &(pxTCB->xEventListItem));
+ }
+
+ if (pxTCB->uxPriority > pxCurrentTCB->uxPriority) {
+ /* The notified task has a priority above the currently
+ executing task so a yield is required. */
+ if (pxHigherPriorityTaskWoken != NULL) {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ }
+
+ /* Mark that a yield is pending in case the user is not
+ using the "xHigherPriorityTaskWoken" parameter in an ISR
+ safe FreeRTOS function. */
+ xYieldPending = pdTRUE;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+BaseType_t xTaskNotifyStateClear(TaskHandle_t xTask)
+{
+ TCB_t *pxTCB;
+ BaseType_t xReturn;
+
+ /* If null is passed in here then it is the calling task that is having
+ its notification state cleared. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ taskENTER_CRITICAL();
+ {
+ if (pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED) {
+ pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
+ xReturn = pdPASS;
+ } else {
+ xReturn = pdFAIL;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if (configUSE_TASK_NOTIFICATIONS == 1)
+
+uint32_t ulTaskNotifyValueClear(TaskHandle_t xTask, uint32_t ulBitsToClear)
+{
+ TCB_t *pxTCB;
+ uint32_t ulReturn;
+
+ /* If null is passed in here then it is the calling task that is having
+ its notification state cleared. */
+ pxTCB = prvGetTCBFromHandle(xTask);
+
+ taskENTER_CRITICAL();
+ {
+ /* Return the notification as it was before the bits were cleared,
+ then clear the bit mask. */
+ ulReturn = pxCurrentTCB->ulNotifiedValue;
+ pxTCB->ulNotifiedValue &= ~ulBitsToClear;
+ }
+ taskEXIT_CRITICAL();
+
+ return ulReturn;
+}
+
+#endif /* configUSE_TASK_NOTIFICATIONS */
+/*-----------------------------------------------------------*/
+
+#if ( \
+ (configGENERATE_RUN_TIME_STATS == 1) && \
+ (INCLUDE_xTaskGetIdleTaskHandle == 1))
+
+uint32_t ulTaskGetIdleRunTimeCounter(void)
+{
+ return xIdleTaskHandle->ulRunTimeCounter;
+}
+
+#endif
+/*-----------------------------------------------------------*/
+
+static void prvAddCurrentTaskToDelayedList(
+ TickType_t xTicksToWait,
+ const BaseType_t xCanBlockIndefinitely)
+{
+ TickType_t xTimeToWake;
+ const TickType_t xConstTickCount = xTickCount;
+
+#if (INCLUDE_xTaskAbortDelay == 1)
+ {
+ /* About to enter a delayed list, so ensure the ucDelayAborted flag is
+ reset to pdFALSE so it can be detected as having been set to pdTRUE
+ when the task leaves the Blocked state. */
+ pxCurrentTCB->ucDelayAborted = pdFALSE;
+ }
+#endif
+
+ /* Remove the task from the ready list before adding it to the blocked list
+ as the same list item is used for both lists. */
+ if (uxListRemove(&(pxCurrentTCB->xStateListItem)) == (UBaseType_t)0) {
+ /* The current task must be in a ready list, so there is no need to
+ check, and the port reset macro can be called directly. */
+ portRESET_READY_PRIORITY(
+ pxCurrentTCB->uxPriority,
+ uxTopReadyPriority); /*lint !e931 pxCurrentTCB cannot change as it
+ is the calling task.
+ pxCurrentTCB->uxPriority and
+ uxTopReadyPriority cannot change as called
+ with scheduler suspended or in a critical
+ section. */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+#if (INCLUDE_vTaskSuspend == 1)
+ {
+ if ((xTicksToWait == portMAX_DELAY) &&
+ (xCanBlockIndefinitely != pdFALSE)) {
+ /* Add the task to the suspended task list instead of a delayed task
+ list to ensure it is not woken by a timing event. It will block
+ indefinitely. */
+ vListInsertEnd(
+ &xSuspendedTaskList, &(pxCurrentTCB->xStateListItem));
+ } else {
+ /* Calculate the time at which the task should be woken if the event
+ does not occur. This may overflow but this doesn't matter, the
+ kernel will manage it correctly. */
+ xTimeToWake = xConstTickCount + xTicksToWait;
+
+ /* The list item will be inserted in wake time order. */
+ listSET_LIST_ITEM_VALUE(
+ &(pxCurrentTCB->xStateListItem), xTimeToWake);
+
+ if (xTimeToWake < xConstTickCount) {
+ /* Wake time has overflowed. Place this item in the overflow
+ list. */
+ vListInsert(
+ pxOverflowDelayedTaskList, &(pxCurrentTCB->xStateListItem));
+ } else {
+ /* The wake time has not overflowed, so the current block list
+ is used. */
+ vListInsert(pxDelayedTaskList, &(pxCurrentTCB->xStateListItem));
+
+ /* If the task entering the blocked state was placed at the
+ head of the list of blocked tasks then xNextTaskUnblockTime
+ needs to be updated too. */
+ if (xTimeToWake < xNextTaskUnblockTime) {
+ xNextTaskUnblockTime = xTimeToWake;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ }
+#else /* INCLUDE_vTaskSuspend */
+ {
+ /* Calculate the time at which the task should be woken if the event
+ does not occur. This may overflow but this doesn't matter, the kernel
+ will manage it correctly. */
+ xTimeToWake = xConstTickCount + xTicksToWait;
+
+ /* The list item will be inserted in wake time order. */
+ listSET_LIST_ITEM_VALUE(&(pxCurrentTCB->xStateListItem), xTimeToWake);
+
+ if (xTimeToWake < xConstTickCount) {
+ /* Wake time has overflowed. Place this item in the overflow list.
+ */
+ vListInsert(
+ pxOverflowDelayedTaskList, &(pxCurrentTCB->xStateListItem));
+ } else {
+ /* The wake time has not overflowed, so the current block list is
+ * used. */
+ vListInsert(pxDelayedTaskList, &(pxCurrentTCB->xStateListItem));
+
+ /* If the task entering the blocked state was placed at the head of
+ the list of blocked tasks then xNextTaskUnblockTime needs to be
+ updated too. */
+ if (xTimeToWake < xNextTaskUnblockTime) {
+ xNextTaskUnblockTime = xTimeToWake;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
+ (void)xCanBlockIndefinitely;
+ }
+#endif /* INCLUDE_vTaskSuspend */
+}
+
+/* Code below here allows additional code to be inserted into this source file,
+especially where access to file scope functions and data is needed (for example
+when performing module tests). */
+
+#ifdef FREERTOS_MODULE_TEST
+# include "tasks_test_access_functions.h"
+#endif
+
+#if (configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1)
+
+# include "freertos_tasks_c_additions.h"
+
+# ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
+static void freertos_tasks_c_additions_init(void)
+{
+ FREERTOS_TASKS_C_ADDITIONS_INIT();
+}
+# endif
+
+#endif
diff --git a/product/rcar/src/CMSIS-FreeRTOS/Source/timers.c b/product/rcar/src/CMSIS-FreeRTOS/Source/timers.c
new file mode 100644
index 00000000..25302292
--- /dev/null
+++ b/product/rcar/src/CMSIS-FreeRTOS/Source/timers.c
@@ -0,0 +1,1200 @@
+/*
+ * FreeRTOS Kernel V10.3.1
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * http://www.FreeRTOS.org
+ * http://aws.amazon.com/freertos
+ *
+ * 1 tab == 4 spaces!
+ */
+
+/* Standard includes. */
+#include <stdlib.h>
+
+/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
+all the API functions to use the MPU wrappers. That should only be done when
+task.h is included from an application file. */
+#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
+
+#include "FreeRTOS.h"
+#include "queue.h"
+#include "task.h"
+#include "timers.h"
+
+#if (INCLUDE_xTimerPendFunctionCall == 1) && (configUSE_TIMERS == 0)
+#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
+#endif
+
+/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
+because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
+for the header files above, but not in this file, in order to generate the
+correct privileged Vs unprivileged linkage and placement. */
+#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e9021 !e961 !e750. */
+
+/* This entire source file will be skipped if the application is not configured
+to include software timer functionality. This #if is closed at the very bottom
+of this file. If you want to include software timer functionality then ensure
+configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */
+#if (configUSE_TIMERS == 1)
+
+/* Misc definitions. */
+# define tmrNO_DELAY (TickType_t)0U
+
+/* The name assigned to the timer service task. This can be overridden by
+defining trmTIMER_SERVICE_TASK_NAME in FreeRTOSConfig.h. */
+# ifndef configTIMER_SERVICE_TASK_NAME
+# define configTIMER_SERVICE_TASK_NAME "Tmr Svc"
+# endif
+
+/* Bit definitions used in the ucStatus member of a timer structure. */
+# define tmrSTATUS_IS_ACTIVE ((uint8_t)0x01)
+# define tmrSTATUS_IS_STATICALLY_ALLOCATED ((uint8_t)0x02)
+# define tmrSTATUS_IS_AUTORELOAD ((uint8_t)0x04)
+
+/* The definition of the timers themselves. */
+typedef struct tmrTimerControl /* The old naming convention is used to prevent
+ breaking kernel aware debuggers. */
+{
+ const char *pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
+ ListItem_t xTimerListItem; /*<< Standard linked list item as used by all
+ kernel features for event management. */
+ TickType_t
+ xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */
+ void *pvTimerID; /*<< An ID to identify the timer. This allows the timer to
+ be identified when the same callback is used for
+ multiple timers. */
+ TimerCallbackFunction_t
+ pxCallbackFunction; /*<< The function that will be called when the timer
+ expires. */
+# if (configUSE_TRACE_FACILITY == 1)
+ UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as
+ FreeRTOS+Trace */
+# endif
+ uint8_t ucStatus; /*<< Holds bits to say if the timer was statically
+ allocated or not, and if it is active or not. */
+} xTIMER;
+
+/* The old xTIMER name is maintained above then typedefed to the new Timer_t
+name below to enable the use of older kernel aware debuggers. */
+typedef xTIMER Timer_t;
+
+/* The definition of messages that can be sent and received on the timer queue.
+Two types of message can be queued - messages that manipulate a software timer,
+and messages that request the execution of a non-timer related callback. The
+two message types are defined in two separate structures, xTimerParametersType
+and xCallbackParametersType respectively. */
+typedef struct tmrTimerParameters {
+ TickType_t
+ xMessageValue; /*<< An optional value used by a subset of commands, for
+ example, when changing the period of a timer. */
+ Timer_t *pxTimer; /*<< The timer to which the command will be applied. */
+} TimerParameter_t;
+
+typedef struct tmrCallbackParameters {
+ PendedFunction_t
+ pxCallbackFunction; /* << The callback function to execute. */
+ void *pvParameter1; /* << The value that will be used as the callback
+ functions first parameter. */
+ uint32_t ulParameter2; /* << The value that will be used as the callback
+ functions second parameter. */
+} CallbackParameters_t;
+
+/* The structure that contains the two message types, along with an identifier
+that is used to determine which message type is valid. */
+typedef struct tmrTimerQueueMessage {
+ BaseType_t
+ xMessageID; /*<< The command being sent to the timer service task. */
+ union {
+ TimerParameter_t xTimerParameters;
+
+/* Don't include xCallbackParameters if it is not going to be used as
+it makes the structure (and therefore the timer queue) larger. */
+# if (INCLUDE_xTimerPendFunctionCall == 1)
+ CallbackParameters_t xCallbackParameters;
+# endif /* INCLUDE_xTimerPendFunctionCall */
+ } u;
+} DaemonTaskMessage_t;
+
+/*lint -save -e956 A manual analysis and inspection has been used to determine
+which static variables must be declared volatile. */
+
+/* The list in which active timers are stored. Timers are referenced in expire
+time order, with the nearest expiry time at the front of the list. Only the
+timer service task is allowed to access these lists.
+xActiveTimerList1 and xActiveTimerList2 could be at function scope but that
+breaks some kernel aware debuggers, and debuggers that reply on removing the
+static qualifier. */
+PRIVILEGED_DATA static List_t xActiveTimerList1;
+PRIVILEGED_DATA static List_t xActiveTimerList2;
+PRIVILEGED_DATA static List_t *pxCurrentTimerList;
+PRIVILEGED_DATA static List_t *pxOverflowTimerList;
+
+/* A queue that is used to send commands to the timer service task. */
+PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL;
+PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
+
+/*lint -restore */
+
+/*-----------------------------------------------------------*/
+
+# if (configSUPPORT_STATIC_ALLOCATION == 1)
+
+/* If static allocation is supported then the application must provide the
+following callback function - which enables the application to optionally
+provide the memory that will be used by the timer task as the task's stack
+and TCB. */
+extern void vApplicationGetTimerTaskMemory(
+ StaticTask_t **ppxTimerTaskTCBBuffer,
+ StackType_t **ppxTimerTaskStackBuffer,
+ uint32_t *pulTimerTaskStackSize);
+
+# endif
+
+/*
+ * Initialise the infrastructure used by the timer service task if it has not
+ * been initialised already.
+ */
+static void prvCheckForValidListAndQueue(void) PRIVILEGED_FUNCTION;
+
+/*
+ * The timer service task (daemon). Timer functionality is controlled by this
+ * task. Other tasks communicate with the timer service task using the
+ * xTimerQueue queue.
+ */
+static portTASK_FUNCTION_PROTO(prvTimerTask, pvParameters) PRIVILEGED_FUNCTION;
+
+/*
+ * Called by the timer service task to interpret and process a command it
+ * received on the timer queue.
+ */
+static void prvProcessReceivedCommands(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Insert the timer into either xActiveTimerList1, or xActiveTimerList2,
+ * depending on if the expire time causes a timer counter overflow.
+ */
+static BaseType_t prvInsertTimerInActiveList(
+ Timer_t *const pxTimer,
+ const TickType_t xNextExpiryTime,
+ const TickType_t xTimeNow,
+ const TickType_t xCommandTime) PRIVILEGED_FUNCTION;
+
+/*
+ * An active timer has reached its expire time. Reload the timer if it is an
+ * auto-reload timer, then call its callback.
+ */
+static void prvProcessExpiredTimer(
+ const TickType_t xNextExpireTime,
+ const TickType_t xTimeNow) PRIVILEGED_FUNCTION;
+
+/*
+ * The tick count has overflowed. Switch the timer lists after ensuring the
+ * current timer list does not still reference some timers.
+ */
+static void prvSwitchTimerLists(void) PRIVILEGED_FUNCTION;
+
+/*
+ * Obtain the current tick count, setting *pxTimerListsWereSwitched to pdTRUE
+ * if a tick count overflow occurred since prvSampleTimeNow() was last called.
+ */
+static TickType_t prvSampleTimeNow(BaseType_t *const pxTimerListsWereSwitched)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * If the timer list contains any active timers then return the expire time of
+ * the timer that will expire first and set *pxListWasEmpty to false. If the
+ * timer list does not contain any timers then return 0 and set *pxListWasEmpty
+ * to pdTRUE.
+ */
+static TickType_t prvGetNextExpireTime(BaseType_t *const pxListWasEmpty)
+ PRIVILEGED_FUNCTION;
+
+/*
+ * If a timer has expired, process it. Otherwise, block the timer service task
+ * until either a timer does expire or a command is received.
+ */
+static void prvProcessTimerOrBlockTask(
+ const TickType_t xNextExpireTime,
+ BaseType_t xListWasEmpty) PRIVILEGED_FUNCTION;
+
+/*
+ * Called after a Timer_t structure has been allocated either statically or
+ * dynamically to fill in the structure's members.
+ */
+static void prvInitialiseNewTimer(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ Timer_t *pxNewTimer) PRIVILEGED_FUNCTION;
+/*-----------------------------------------------------------*/
+
+BaseType_t xTimerCreateTimerTask(void)
+{
+ BaseType_t xReturn = pdFAIL;
+
+ /* This function is called when the scheduler is started if
+ configUSE_TIMERS is set to 1. Check that the infrastructure used by the
+ timer service task has been created/initialised. If timers have already
+ been created then the initialisation will already have been performed. */
+ prvCheckForValidListAndQueue();
+
+ if (xTimerQueue != NULL) {
+# if (configSUPPORT_STATIC_ALLOCATION == 1)
+ {
+ StaticTask_t *pxTimerTaskTCBBuffer = NULL;
+ StackType_t *pxTimerTaskStackBuffer = NULL;
+ uint32_t ulTimerTaskStackSize;
+
+ vApplicationGetTimerTaskMemory(
+ &pxTimerTaskTCBBuffer,
+ &pxTimerTaskStackBuffer,
+ &ulTimerTaskStackSize);
+ xTimerTaskHandle = xTaskCreateStatic(
+ prvTimerTask,
+ configTIMER_SERVICE_TASK_NAME,
+ ulTimerTaskStackSize,
+ NULL,
+ ((UBaseType_t)configTIMER_TASK_PRIORITY) | portPRIVILEGE_BIT,
+ pxTimerTaskStackBuffer,
+ pxTimerTaskTCBBuffer);
+
+ if (xTimerTaskHandle != NULL) {
+ xReturn = pdPASS;
+ }
+ }
+# else
+ {
+ xReturn = xTaskCreate(
+ prvTimerTask,
+ configTIMER_SERVICE_TASK_NAME,
+ configTIMER_TASK_STACK_DEPTH,
+ NULL,
+ ((UBaseType_t)configTIMER_TASK_PRIORITY) | portPRIVILEGE_BIT,
+ &xTimerTaskHandle);
+ }
+# endif /* configSUPPORT_STATIC_ALLOCATION */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ configASSERT(xReturn);
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+# if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+
+TimerHandle_t xTimerCreate(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction)
+{
+ Timer_t *pxNewTimer;
+
+ pxNewTimer = (Timer_t *)pvPortMalloc(
+ sizeof(Timer_t)); /*lint !e9087 !e9079 All values returned by
+ pvPortMalloc() have at least the alignment required
+ by the MCU's stack, and the first member of Timer_t
+ is always a pointer to the timer's mame. */
+
+ if (pxNewTimer != NULL) {
+ /* Status is thus far zero as the timer is not created statically
+ and has not been started. The auto-reload bit may get set in
+ prvInitialiseNewTimer. */
+ pxNewTimer->ucStatus = 0x00;
+ prvInitialiseNewTimer(
+ pcTimerName,
+ xTimerPeriodInTicks,
+ uxAutoReload,
+ pvTimerID,
+ pxCallbackFunction,
+ pxNewTimer);
+ }
+
+ return pxNewTimer;
+}
+
+# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+# if (configSUPPORT_STATIC_ALLOCATION == 1)
+
+TimerHandle_t xTimerCreateStatic(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ StaticTimer_t *pxTimerBuffer)
+{
+ Timer_t *pxNewTimer;
+
+# if (configASSERT_DEFINED == 1)
+ {
+ /* Sanity check that the size of the structure used to declare a
+ variable of type StaticTimer_t equals the size of the real timer
+ structure. */
+ volatile size_t xSize = sizeof(StaticTimer_t);
+ configASSERT(xSize == sizeof(Timer_t));
+ (void)xSize; /* Keeps lint quiet when configASSERT() is not defined. */
+ }
+# endif /* configASSERT_DEFINED */
+
+ /* A pointer to a StaticTimer_t structure MUST be provided, use it. */
+ configASSERT(pxTimerBuffer);
+ pxNewTimer =
+ (Timer_t *)pxTimerBuffer; /*lint !e740 !e9087 StaticTimer_t is a pointer
+ to a Timer_t, so guaranteed to be aligned
+ and sized correctly (checked by an
+ assert()), so this is safe. */
+
+ if (pxNewTimer != NULL) {
+ /* Timers can be created statically or dynamically so note this
+ timer was created statically in case it is later deleted. The
+ auto-reload bit may get set in prvInitialiseNewTimer(). */
+ pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED;
+
+ prvInitialiseNewTimer(
+ pcTimerName,
+ xTimerPeriodInTicks,
+ uxAutoReload,
+ pvTimerID,
+ pxCallbackFunction,
+ pxNewTimer);
+ }
+
+ return pxNewTimer;
+}
+
+# endif /* configSUPPORT_STATIC_ALLOCATION */
+/*-----------------------------------------------------------*/
+
+static void prvInitialiseNewTimer(
+ const char
+ *const pcTimerName, /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+ const TickType_t xTimerPeriodInTicks,
+ const UBaseType_t uxAutoReload,
+ void *const pvTimerID,
+ TimerCallbackFunction_t pxCallbackFunction,
+ Timer_t *pxNewTimer)
+{
+ /* 0 is not a valid value for xTimerPeriodInTicks. */
+ configASSERT((xTimerPeriodInTicks > 0));
+
+ if (pxNewTimer != NULL) {
+ /* Ensure the infrastructure used by the timer service task has been
+ created/initialised. */
+ prvCheckForValidListAndQueue();
+
+ /* Initialise the timer structure members using the function
+ parameters. */
+ pxNewTimer->pcTimerName = pcTimerName;
+ pxNewTimer->xTimerPeriodInTicks = xTimerPeriodInTicks;
+ pxNewTimer->pvTimerID = pvTimerID;
+ pxNewTimer->pxCallbackFunction = pxCallbackFunction;
+ vListInitialiseItem(&(pxNewTimer->xTimerListItem));
+ if (uxAutoReload != pdFALSE) {
+ pxNewTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD;
+ }
+ traceTIMER_CREATE(pxNewTimer);
+ }
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xTimerGenericCommand(
+ TimerHandle_t xTimer,
+ const BaseType_t xCommandID,
+ const TickType_t xOptionalValue,
+ BaseType_t *const pxHigherPriorityTaskWoken,
+ const TickType_t xTicksToWait)
+{
+ BaseType_t xReturn = pdFAIL;
+ DaemonTaskMessage_t xMessage;
+
+ configASSERT(xTimer);
+
+ /* Send a message to the timer service task to perform a particular action
+ on a particular timer definition. */
+ if (xTimerQueue != NULL) {
+ /* Send a command to the timer service task to start the xTimer timer.
+ */
+ xMessage.xMessageID = xCommandID;
+ xMessage.u.xTimerParameters.xMessageValue = xOptionalValue;
+ xMessage.u.xTimerParameters.pxTimer = xTimer;
+
+ if (xCommandID < tmrFIRST_FROM_ISR_COMMAND) {
+ if (xTaskGetSchedulerState() == taskSCHEDULER_RUNNING) {
+ xReturn =
+ xQueueSendToBack(xTimerQueue, &xMessage, xTicksToWait);
+ } else {
+ xReturn = xQueueSendToBack(xTimerQueue, &xMessage, tmrNO_DELAY);
+ }
+ } else {
+ xReturn = xQueueSendToBackFromISR(
+ xTimerQueue, &xMessage, pxHigherPriorityTaskWoken);
+ }
+
+ traceTIMER_COMMAND_SEND(xTimer, xCommandID, xOptionalValue, xReturn);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+TaskHandle_t xTimerGetTimerDaemonTaskHandle(void)
+{
+ /* If xTimerGetTimerDaemonTaskHandle() is called before the scheduler has
+ been started, then xTimerTaskHandle will be NULL. */
+ configASSERT((xTimerTaskHandle != NULL));
+ return xTimerTaskHandle;
+}
+/*-----------------------------------------------------------*/
+
+TickType_t xTimerGetPeriod(TimerHandle_t xTimer)
+{
+ Timer_t *pxTimer = xTimer;
+
+ configASSERT(xTimer);
+ return pxTimer->xTimerPeriodInTicks;
+}
+/*-----------------------------------------------------------*/
+
+void vTimerSetReloadMode(TimerHandle_t xTimer, const UBaseType_t uxAutoReload)
+{
+ Timer_t *pxTimer = xTimer;
+
+ configASSERT(xTimer);
+ taskENTER_CRITICAL();
+ {
+ if (uxAutoReload != pdFALSE) {
+ pxTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD;
+ } else {
+ pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD;
+ }
+ }
+ taskEXIT_CRITICAL();
+}
+/*-----------------------------------------------------------*/
+
+UBaseType_t uxTimerGetReloadMode(TimerHandle_t xTimer)
+{
+ Timer_t *pxTimer = xTimer;
+ UBaseType_t uxReturn;
+
+ configASSERT(xTimer);
+ taskENTER_CRITICAL();
+ {
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD) == 0) {
+ /* Not an auto-reload timer. */
+ uxReturn = (UBaseType_t)pdFALSE;
+ } else {
+ /* Is an auto-reload timer. */
+ uxReturn = (UBaseType_t)pdTRUE;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return uxReturn;
+}
+/*-----------------------------------------------------------*/
+
+TickType_t xTimerGetExpiryTime(TimerHandle_t xTimer)
+{
+ Timer_t *pxTimer = xTimer;
+ TickType_t xReturn;
+
+ configASSERT(xTimer);
+ xReturn = listGET_LIST_ITEM_VALUE(&(pxTimer->xTimerListItem));
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+const char *pcTimerGetName(
+ TimerHandle_t xTimer) /*lint !e971 Unqualified char types are allowed for
+ strings and single characters only. */
+{
+ Timer_t *pxTimer = xTimer;
+
+ configASSERT(xTimer);
+ return pxTimer->pcTimerName;
+}
+/*-----------------------------------------------------------*/
+
+static void prvProcessExpiredTimer(
+ const TickType_t xNextExpireTime,
+ const TickType_t xTimeNow)
+{
+ BaseType_t xResult;
+ Timer_t *const pxTimer = (Timer_t *)listGET_OWNER_OF_HEAD_ENTRY(
+ pxCurrentTimerList); /*lint !e9087 !e9079 void * is used as this macro
+ is used with tasks and co-routines too.
+ Alignment is known to be fine as the type of the
+ pointer stored and retrieved is the same. */
+
+ /* Remove the timer from the list of active timers. A check has already
+ been performed to ensure the list is not empty. */
+ (void)uxListRemove(&(pxTimer->xTimerListItem));
+ traceTIMER_EXPIRED(pxTimer);
+
+ /* If the timer is an auto-reload timer then calculate the next
+ expiry time and re-insert the timer in the list of active timers. */
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD) != 0) {
+ /* The timer is inserted into a list using a time relative to anything
+ other than the current time. It will therefore be inserted into the
+ correct list relative to the time this task thinks it is now. */
+ if (prvInsertTimerInActiveList(
+ pxTimer,
+ (xNextExpireTime + pxTimer->xTimerPeriodInTicks),
+ xTimeNow,
+ xNextExpireTime) != pdFALSE) {
+ /* The timer expired before it was added to the active timer
+ list. Reload it now. */
+ xResult = xTimerGenericCommand(
+ pxTimer,
+ tmrCOMMAND_START_DONT_TRACE,
+ xNextExpireTime,
+ NULL,
+ tmrNO_DELAY);
+ configASSERT(xResult);
+ (void)xResult;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE;
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ /* Call the timer callback. */
+ pxTimer->pxCallbackFunction((TimerHandle_t)pxTimer);
+}
+/*-----------------------------------------------------------*/
+
+static portTASK_FUNCTION(prvTimerTask, pvParameters)
+{
+ TickType_t xNextExpireTime;
+ BaseType_t xListWasEmpty;
+
+ /* Just to avoid compiler warnings. */
+ (void)pvParameters;
+
+# if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
+ {
+ extern void vApplicationDaemonTaskStartupHook(void);
+
+ /* Allow the application writer to execute some code in the context of
+ this task at the point the task starts executing. This is useful if the
+ application includes initialisation code that would benefit from
+ executing after the scheduler has been started. */
+ vApplicationDaemonTaskStartupHook();
+ }
+# endif /* configUSE_DAEMON_TASK_STARTUP_HOOK */
+
+ for (;;) {
+ /* Query the timers list to see if it contains any timers, and if so,
+ obtain the time at which the next timer will expire. */
+ xNextExpireTime = prvGetNextExpireTime(&xListWasEmpty);
+
+ /* If a timer has expired, process it. Otherwise, block this task
+ until either a timer does expire, or a command is received. */
+ prvProcessTimerOrBlockTask(xNextExpireTime, xListWasEmpty);
+
+ /* Empty the command queue. */
+ prvProcessReceivedCommands();
+ }
+}
+/*-----------------------------------------------------------*/
+
+static void prvProcessTimerOrBlockTask(
+ const TickType_t xNextExpireTime,
+ BaseType_t xListWasEmpty)
+{
+ TickType_t xTimeNow;
+ BaseType_t xTimerListsWereSwitched;
+
+ vTaskSuspendAll();
+ {
+ /* Obtain the time now to make an assessment as to whether the timer
+ has expired or not. If obtaining the time causes the lists to switch
+ then don't process this timer as any timers that remained in the list
+ when the lists were switched will have been processed within the
+ prvSampleTimeNow() function. */
+ xTimeNow = prvSampleTimeNow(&xTimerListsWereSwitched);
+ if (xTimerListsWereSwitched == pdFALSE) {
+ /* The tick count has not overflowed, has the timer expired? */
+ if ((xListWasEmpty == pdFALSE) && (xNextExpireTime <= xTimeNow)) {
+ (void)xTaskResumeAll();
+ prvProcessExpiredTimer(xNextExpireTime, xTimeNow);
+ } else {
+ /* The tick count has not overflowed, and the next expire
+ time has not been reached yet. This task should therefore
+ block to wait for the next expire time or a command to be
+ received - whichever comes first. The following line cannot
+ be reached unless xNextExpireTime > xTimeNow, except in the
+ case when the current timer list is empty. */
+ if (xListWasEmpty != pdFALSE) {
+ /* The current timer list is empty - is the overflow list
+ also empty? */
+ xListWasEmpty = listLIST_IS_EMPTY(pxOverflowTimerList);
+ }
+
+ vQueueWaitForMessageRestricted(
+ xTimerQueue, (xNextExpireTime - xTimeNow), xListWasEmpty);
+
+ if (xTaskResumeAll() == pdFALSE) {
+ /* Yield to wait for either a command to arrive, or the
+ block time to expire. If a command arrived between the
+ critical section being exited and this yield then the yield
+ will not cause the task to block. */
+ portYIELD_WITHIN_API();
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ } else {
+ (void)xTaskResumeAll();
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+static TickType_t prvGetNextExpireTime(BaseType_t *const pxListWasEmpty)
+{
+ TickType_t xNextExpireTime;
+
+ /* Timers are listed in expiry time order, with the head of the list
+ referencing the task that will expire first. Obtain the time at which
+ the timer with the nearest expiry time will expire. If there are no
+ active timers then just set the next expire time to 0. That will cause
+ this task to unblock when the tick count overflows, at which point the
+ timer lists will be switched and the next expiry time can be
+ re-assessed. */
+ *pxListWasEmpty = listLIST_IS_EMPTY(pxCurrentTimerList);
+ if (*pxListWasEmpty == pdFALSE) {
+ xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY(pxCurrentTimerList);
+ } else {
+ /* Ensure the task unblocks when the tick count rolls over. */
+ xNextExpireTime = (TickType_t)0U;
+ }
+
+ return xNextExpireTime;
+}
+/*-----------------------------------------------------------*/
+
+static TickType_t prvSampleTimeNow(BaseType_t *const pxTimerListsWereSwitched)
+{
+ TickType_t xTimeNow;
+ PRIVILEGED_DATA static TickType_t xLastTime =
+ (TickType_t)0U; /*lint !e956 Variable is only accessible to one task. */
+
+ xTimeNow = xTaskGetTickCount();
+
+ if (xTimeNow < xLastTime) {
+ prvSwitchTimerLists();
+ *pxTimerListsWereSwitched = pdTRUE;
+ } else {
+ *pxTimerListsWereSwitched = pdFALSE;
+ }
+
+ xLastTime = xTimeNow;
+
+ return xTimeNow;
+}
+/*-----------------------------------------------------------*/
+
+static BaseType_t prvInsertTimerInActiveList(
+ Timer_t *const pxTimer,
+ const TickType_t xNextExpiryTime,
+ const TickType_t xTimeNow,
+ const TickType_t xCommandTime)
+{
+ BaseType_t xProcessTimerNow = pdFALSE;
+
+ listSET_LIST_ITEM_VALUE(&(pxTimer->xTimerListItem), xNextExpiryTime);
+ listSET_LIST_ITEM_OWNER(&(pxTimer->xTimerListItem), pxTimer);
+
+ if (xNextExpiryTime <= xTimeNow) {
+ /* Has the expiry time elapsed between the command to start/reset a
+ timer was issued, and the time the command was processed? */
+ if (((TickType_t)(xTimeNow - xCommandTime)) >=
+ pxTimer
+ ->xTimerPeriodInTicks) /*lint !e961 MISRA exception as the casts
+ are only redundant for some ports. */
+ {
+ /* The time between a command being issued and the command being
+ processed actually exceeds the timers period. */
+ xProcessTimerNow = pdTRUE;
+ } else {
+ vListInsert(pxOverflowTimerList, &(pxTimer->xTimerListItem));
+ }
+ } else {
+ if ((xTimeNow < xCommandTime) && (xNextExpiryTime >= xCommandTime)) {
+ /* If, since the command was issued, the tick count has overflowed
+ but the expiry time has not, then the timer must have already passed
+ its expiry time and should be processed immediately. */
+ xProcessTimerNow = pdTRUE;
+ } else {
+ vListInsert(pxCurrentTimerList, &(pxTimer->xTimerListItem));
+ }
+ }
+
+ return xProcessTimerNow;
+}
+/*-----------------------------------------------------------*/
+
+static void prvProcessReceivedCommands(void)
+{
+ DaemonTaskMessage_t xMessage;
+ Timer_t *pxTimer;
+ BaseType_t xTimerListsWereSwitched, xResult;
+ TickType_t xTimeNow;
+
+ while (xQueueReceive(xTimerQueue, &xMessage, tmrNO_DELAY) !=
+ pdFAIL) /*lint !e603 xMessage does not have to be initialised as it
+ is passed out, not in, and it is not used unless
+ xQueueReceive() returns pdTRUE. */
+ {
+# if (INCLUDE_xTimerPendFunctionCall == 1)
+ {
+ /* Negative commands are pended function calls rather than timer
+ commands. */
+ if (xMessage.xMessageID < (BaseType_t)0) {
+ const CallbackParameters_t *const pxCallback =
+ &(xMessage.u.xCallbackParameters);
+
+ /* The timer uses the xCallbackParameters member to request a
+ callback be executed. Check the callback is not NULL. */
+ configASSERT(pxCallback);
+
+ /* Call the function. */
+ pxCallback->pxCallbackFunction(
+ pxCallback->pvParameter1, pxCallback->ulParameter2);
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+# endif /* INCLUDE_xTimerPendFunctionCall */
+
+ /* Commands that are positive are timer commands rather than pended
+ function calls. */
+ if (xMessage.xMessageID >= (BaseType_t)0) {
+ /* The messages uses the xTimerParameters member to work on a
+ software timer. */
+ pxTimer = xMessage.u.xTimerParameters.pxTimer;
+
+ if (listIS_CONTAINED_WITHIN(NULL, &(pxTimer->xTimerListItem)) ==
+ pdFALSE) /*lint !e961. The cast is only redundant when NULL is
+ passed into the macro. */
+ {
+ /* The timer is in a list, remove it. */
+ (void)uxListRemove(&(pxTimer->xTimerListItem));
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+
+ traceTIMER_COMMAND_RECEIVED(
+ pxTimer,
+ xMessage.xMessageID,
+ xMessage.u.xTimerParameters.xMessageValue);
+
+ /* In this case the xTimerListsWereSwitched parameter is not used,
+ but it must be present in the function call. prvSampleTimeNow()
+ must be called after the message is received from xTimerQueue so
+ there is no possibility of a higher priority task adding a message
+ to the message queue with a time that is ahead of the timer daemon
+ task (because it
+ pre-empted the timer daemon task after the xTimeNow value was set).
+ */
+ xTimeNow = prvSampleTimeNow(&xTimerListsWereSwitched);
+
+ switch (xMessage.xMessageID) {
+ case tmrCOMMAND_START:
+ case tmrCOMMAND_START_FROM_ISR:
+ case tmrCOMMAND_RESET:
+ case tmrCOMMAND_RESET_FROM_ISR:
+ case tmrCOMMAND_START_DONT_TRACE:
+ /* Start or restart a timer. */
+ pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE;
+ if (prvInsertTimerInActiveList(
+ pxTimer,
+ xMessage.u.xTimerParameters.xMessageValue +
+ pxTimer->xTimerPeriodInTicks,
+ xTimeNow,
+ xMessage.u.xTimerParameters.xMessageValue) != pdFALSE) {
+ /* The timer expired before it was added to the active
+ timer list. Process it now. */
+ pxTimer->pxCallbackFunction((TimerHandle_t)pxTimer);
+ traceTIMER_EXPIRED(pxTimer);
+
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD) != 0) {
+ xResult = xTimerGenericCommand(
+ pxTimer,
+ tmrCOMMAND_START_DONT_TRACE,
+ xMessage.u.xTimerParameters.xMessageValue +
+ pxTimer->xTimerPeriodInTicks,
+ NULL,
+ tmrNO_DELAY);
+ configASSERT(xResult);
+ (void)xResult;
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ break;
+
+ case tmrCOMMAND_STOP:
+ case tmrCOMMAND_STOP_FROM_ISR:
+ /* The timer has already been removed from the active list. */
+ pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE;
+ break;
+
+ case tmrCOMMAND_CHANGE_PERIOD:
+ case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR:
+ pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE;
+ pxTimer->xTimerPeriodInTicks =
+ xMessage.u.xTimerParameters.xMessageValue;
+ configASSERT((pxTimer->xTimerPeriodInTicks > 0));
+
+ /* The new period does not really have a reference, and can
+ be longer or shorter than the old one. The command time is
+ therefore set to the current time, and as the period cannot
+ be zero the next expiry time can only be in the future,
+ meaning (unlike for the xTimerStart() case above) there is
+ no fail case that needs to be handled here. */
+ (void)prvInsertTimerInActiveList(
+ pxTimer,
+ (xTimeNow + pxTimer->xTimerPeriodInTicks),
+ xTimeNow,
+ xTimeNow);
+ break;
+
+ case tmrCOMMAND_DELETE:
+# if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
+ {
+ /* The timer has already been removed from the active list,
+ just free up the memory if the memory was dynamically
+ allocated. */
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED) ==
+ (uint8_t)0) {
+ vPortFree(pxTimer);
+ } else {
+ pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE;
+ }
+ }
+# else
+ {
+ /* If dynamic allocation is not enabled, the memory
+ could not have been dynamically allocated. So there is
+ no need to free the memory - just mark the timer as
+ "not active". */
+ pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE;
+ }
+# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
+ break;
+
+ default:
+ /* Don't expect to get here. */
+ break;
+ }
+ }
+ }
+}
+/*-----------------------------------------------------------*/
+
+static void prvSwitchTimerLists(void)
+{
+ TickType_t xNextExpireTime, xReloadTime;
+ List_t *pxTemp;
+ Timer_t *pxTimer;
+ BaseType_t xResult;
+
+ /* The tick count has overflowed. The timer lists must be switched.
+ If there are any timers still referenced from the current timer list
+ then they must have expired and should be processed before the lists
+ are switched. */
+ while (listLIST_IS_EMPTY(pxCurrentTimerList) == pdFALSE) {
+ xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY(pxCurrentTimerList);
+
+ /* Remove the timer from the list. */
+ pxTimer = (Timer_t *)listGET_OWNER_OF_HEAD_ENTRY(
+ pxCurrentTimerList); /*lint !e9087 !e9079 void * is used as this
+ macro is used with tasks and co-routines
+ too. Alignment is known to be fine as the
+ type of the pointer stored and retrieved is
+ the same. */
+ (void)uxListRemove(&(pxTimer->xTimerListItem));
+ traceTIMER_EXPIRED(pxTimer);
+
+ /* Execute its callback, then send a command to restart the timer if
+ it is an auto-reload timer. It cannot be restarted here as the lists
+ have not yet been switched. */
+ pxTimer->pxCallbackFunction((TimerHandle_t)pxTimer);
+
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD) != 0) {
+ /* Calculate the reload value, and if the reload value results in
+ the timer going into the same timer list then it has already expired
+ and the timer should be re-inserted into the current list so it is
+ processed again within this loop. Otherwise a command should be
+ sent to restart the timer to ensure it is only inserted into a list
+ after the lists have been swapped. */
+ xReloadTime = (xNextExpireTime + pxTimer->xTimerPeriodInTicks);
+ if (xReloadTime > xNextExpireTime) {
+ listSET_LIST_ITEM_VALUE(
+ &(pxTimer->xTimerListItem), xReloadTime);
+ listSET_LIST_ITEM_OWNER(&(pxTimer->xTimerListItem), pxTimer);
+ vListInsert(pxCurrentTimerList, &(pxTimer->xTimerListItem));
+ } else {
+ xResult = xTimerGenericCommand(
+ pxTimer,
+ tmrCOMMAND_START_DONT_TRACE,
+ xNextExpireTime,
+ NULL,
+ tmrNO_DELAY);
+ configASSERT(xResult);
+ (void)xResult;
+ }
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+
+ pxTemp = pxCurrentTimerList;
+ pxCurrentTimerList = pxOverflowTimerList;
+ pxOverflowTimerList = pxTemp;
+}
+/*-----------------------------------------------------------*/
+
+static void prvCheckForValidListAndQueue(void)
+{
+ /* Check that the list from which active timers are referenced, and the
+ queue used to communicate with the timer service, have been
+ initialised. */
+ taskENTER_CRITICAL();
+ {
+ if (xTimerQueue == NULL) {
+ vListInitialise(&xActiveTimerList1);
+ vListInitialise(&xActiveTimerList2);
+ pxCurrentTimerList = &xActiveTimerList1;
+ pxOverflowTimerList = &xActiveTimerList2;
+
+# if (configSUPPORT_STATIC_ALLOCATION == 1)
+ {
+ /* The timer queue is allocated statically in case
+ configSUPPORT_DYNAMIC_ALLOCATION is 0. */
+ static StaticQueue_t
+ xStaticTimerQueue; /*lint !e956 Ok to declare in this manner
+ to prevent additional conditional
+ compilation guards in other locations.
+ */
+ static uint8_t ucStaticTimerQueueStorage
+ [(size_t)configTIMER_QUEUE_LENGTH *
+ sizeof(DaemonTaskMessage_t)]; /*lint !e956 Ok to declare in
+ this manner to prevent
+ additional conditional
+ compilation guards in
+ other locations. */
+
+ xTimerQueue = xQueueCreateStatic(
+ (UBaseType_t)configTIMER_QUEUE_LENGTH,
+ (UBaseType_t)sizeof(DaemonTaskMessage_t),
+ &(ucStaticTimerQueueStorage[0]),
+ &xStaticTimerQueue);
+ }
+# else
+ {
+ xTimerQueue = xQueueCreate(
+ (UBaseType_t)configTIMER_QUEUE_LENGTH,
+ sizeof(DaemonTaskMessage_t));
+ }
+# endif
+
+# if (configQUEUE_REGISTRY_SIZE > 0)
+ {
+ if (xTimerQueue != NULL) {
+ vQueueAddToRegistry(xTimerQueue, "TmrQ");
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+# endif /* configQUEUE_REGISTRY_SIZE */
+ } else {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ taskEXIT_CRITICAL();
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xTimerIsTimerActive(TimerHandle_t xTimer)
+{
+ BaseType_t xReturn;
+ Timer_t *pxTimer = xTimer;
+
+ configASSERT(xTimer);
+
+ /* Is the timer in the list of active timers? */
+ taskENTER_CRITICAL();
+ {
+ if ((pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE) == 0) {
+ xReturn = pdFALSE;
+ } else {
+ xReturn = pdTRUE;
+ }
+ }
+ taskEXIT_CRITICAL();
+
+ return xReturn;
+} /*lint !e818 Can't be pointer to const due to the typedef. */
+/*-----------------------------------------------------------*/
+
+void *pvTimerGetTimerID(const TimerHandle_t xTimer)
+{
+ Timer_t *const pxTimer = xTimer;
+ void *pvReturn;
+
+ configASSERT(xTimer);
+
+ taskENTER_CRITICAL();
+ {
+ pvReturn = pxTimer->pvTimerID;
+ }
+ taskEXIT_CRITICAL();
+
+ return pvReturn;
+}
+/*-----------------------------------------------------------*/
+
+void vTimerSetTimerID(TimerHandle_t xTimer, void *pvNewID)
+{
+ Timer_t *const pxTimer = xTimer;
+
+ configASSERT(xTimer);
+
+ taskENTER_CRITICAL();
+ {
+ pxTimer->pvTimerID = pvNewID;
+ }
+ taskEXIT_CRITICAL();
+}
+/*-----------------------------------------------------------*/
+
+# if (INCLUDE_xTimerPendFunctionCall == 1)
+
+BaseType_t xTimerPendFunctionCallFromISR(
+ PendedFunction_t xFunctionToPend,
+ void *pvParameter1,
+ uint32_t ulParameter2,
+ BaseType_t *pxHigherPriorityTaskWoken)
+{
+ DaemonTaskMessage_t xMessage;
+ BaseType_t xReturn;
+
+ /* Complete the message with the function parameters and post it to the
+ daemon task. */
+ xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR;
+ xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend;
+ xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1;
+ xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2;
+
+ xReturn =
+ xQueueSendFromISR(xTimerQueue, &xMessage, pxHigherPriorityTaskWoken);
+
+ tracePEND_FUNC_CALL_FROM_ISR(
+ xFunctionToPend, pvParameter1, ulParameter2, xReturn);
+
+ return xReturn;
+}
+
+# endif /* INCLUDE_xTimerPendFunctionCall */
+/*-----------------------------------------------------------*/
+
+# if (INCLUDE_xTimerPendFunctionCall == 1)
+
+BaseType_t xTimerPendFunctionCall(
+ PendedFunction_t xFunctionToPend,
+ void *pvParameter1,
+ uint32_t ulParameter2,
+ TickType_t xTicksToWait)
+{
+ DaemonTaskMessage_t xMessage;
+ BaseType_t xReturn;
+
+ /* This function can only be called after a timer has been created or
+ after the scheduler has been started because, until then, the timer
+ queue does not exist. */
+ configASSERT(xTimerQueue);
+
+ /* Complete the message with the function parameters and post it to the
+ daemon task. */
+ xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK;
+ xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend;
+ xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1;
+ xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2;
+
+ xReturn = xQueueSendToBack(xTimerQueue, &xMessage, xTicksToWait);
+
+ tracePEND_FUNC_CALL(xFunctionToPend, pvParameter1, ulParameter2, xReturn);
+
+ return xReturn;
+}
+
+# endif /* INCLUDE_xTimerPendFunctionCall */
+/*-----------------------------------------------------------*/
+
+# if (configUSE_TRACE_FACILITY == 1)
+
+UBaseType_t uxTimerGetTimerNumber(TimerHandle_t xTimer)
+{
+ return ((Timer_t *)xTimer)->uxTimerNumber;
+}
+
+# endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+# if (configUSE_TRACE_FACILITY == 1)
+
+void vTimerSetTimerNumber(TimerHandle_t xTimer, UBaseType_t uxTimerNumber)
+{
+ ((Timer_t *)xTimer)->uxTimerNumber = uxTimerNumber;
+}
+
+# endif /* configUSE_TRACE_FACILITY */
+/*-----------------------------------------------------------*/
+
+/* This entire source file will be skipped if the application is not configured
+to include software timer functionality. If you want to include software timer
+functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */
+#endif /* configUSE_TIMERS == 1 */
diff --git a/product/rcar/src/rcar_core.c b/product/rcar/src/rcar_core.c
new file mode 100644
index 00000000..6f9373de
--- /dev/null
+++ b/product/rcar/src/rcar_core.c
@@ -0,0 +1,23 @@
+/*
+ * Renesas SCP/MCP Software
+ * Copyright (c) 2020, Renesas Electronics Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <mmio.h>
+#include <rcar_core.h>
+
+#define PRR (0xFFF00044U)
+#define PRR_57EN_OFS (27)
+#define PRR_57EN_MASK (0x0f)
+#define PRR_53EN_OFS (22)
+#define PRR_53EN_MASK (0x0f)
+#define PRR_CAEN_MASK \
+ ((PRR_57EN_MASK << PRR_57EN_OFS) + (PRR_53EN_MASK << PRR_53EN_OFS))
+
+unsigned int rcar_core_get_count(void)
+{
+ return (__builtin_popcount(
+ (mmio_read_32(PRR) & PRR_CAEN_MASK) ^ PRR_CAEN_MASK));
+}
diff --git a/tools/build_system/firmware.mk b/tools/build_system/firmware.mk
index 8729a220..427c1b39 100644
--- a/tools/build_system/firmware.mk
+++ b/tools/build_system/firmware.mk
@@ -186,9 +186,15 @@ ifeq ($(BS_FIRMWARE_HAS_MULTITHREADING),yes)
BUILD_SUFFIX := $(MULTHREADING_SUFFIX)
BUILD_HAS_MULTITHREADING := yes
- INCLUDES += $(OS_DIR)/RTX/Source
- INCLUDES += $(OS_DIR)/RTX/Include
- INCLUDES += $(OS_DIR)/../Core/Include
+ ifneq ($(findstring $(BS_FIRMWARE_CPU),$(ARMV8A_CPUS)),)
+ INCLUDES += $(OS_DIR)/Include
+ INCLUDES += $(FREERTOS_DIR)/../../Source/include
+ INCLUDES += $(FREERTOS_DIR)/../../Source/portable/GCC/ARM_CA53_64_Rcar
+ else
+ INCLUDES += $(OS_DIR)/RTX/Source
+ INCLUDES += $(OS_DIR)/RTX/Include
+ INCLUDES += $(OS_DIR)/../Core/Include
+ endif
else
BUILD_HAS_MULTITHREADING := no
endif
diff --git a/tools/cppcheck_suppress_list.txt b/tools/cppcheck_suppress_list.txt
index b03db7d3..6b80a540 100644
--- a/tools/cppcheck_suppress_list.txt
+++ b/tools/cppcheck_suppress_list.txt
@@ -29,9 +29,6 @@ unusedStructMember
// using it.
unusedVariable:framework/test/test_fwk_list_init.c
-// No checks on Rcar
-*:product/rcar/*
-
// Cppcheck can not properly understand fwk_expect() thus can not ascertain the
// way we check for pointer values.
nullPointerRedundantCheck:product/juno/*