diff --git a/components/drivers/smp/SConscript b/components/drivers/smp/SConscript new file mode 100644 index 00000000000..2f4fe0b8ec4 --- /dev/null +++ b/components/drivers/smp/SConscript @@ -0,0 +1,10 @@ +from building import * + +cwd = GetCurrentDir() +src = [] +if GetDepend("RT_USING_SMP"): + src += Glob('*.c') +CPPPATH = [cwd] +group = DefineGroup('smp', src, depend = [''], CPPPATH = CPPPATH) + +Return('group') diff --git a/components/drivers/smp/smp.c b/components/drivers/smp/smp.c new file mode 100644 index 00000000000..aa0071c576a --- /dev/null +++ b/components/drivers/smp/smp.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2006-2024 RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2024/9/12 zhujiale the first version + */ + +#include "smp.h" + +#define DBG_TAG "SMP" +#define DBG_LVL DBG_INFO +#include + +static struct rt_smp_call rt_smp_work[RT_CPUS_NR]; +static rt_atomic_t rt_smp_wait; + +static rt_err_t smp_call_handler(struct rt_smp_event *event) +{ + switch (event->event_id) + { + case SMP_CALL_EVENT_FUNC: + event->func(event->data); + rt_atomic_add(&rt_smp_wait, 1); + break; + default: + LOG_E("error event id\n"); + return -RT_ERROR; + } + return RT_EOK; +} +void rt_smp_call_ipi_handler(int vector, void *param) +{ + int err; + int cur_cpu = rt_hw_cpu_id(); + rt_spin_lock(&rt_smp_work[cur_cpu].lock); + + if (rt_smp_work[cur_cpu].event.event_id) + { + err = smp_call_handler(&rt_smp_work[cur_cpu].event); + if (err) + { + LOG_E("Have no event\n"); + rt_memset(&rt_smp_work[cur_cpu].event, 0, sizeof(struct rt_smp_event)); + rt_spin_unlock(&rt_smp_work[cur_cpu].lock); + } + rt_memset(&rt_smp_work[cur_cpu].event, 0, sizeof(struct rt_smp_event)); + } + rt_spin_unlock(&rt_smp_work[cur_cpu].lock); +} + +/** + * @brief call function on specified CPU , + * + * @param cpu_mask cpu mask for call + * @param func the function pointer + * @param data the data pointer + * @param flag call flag if you set SMP_CALL_WAIT_ALL + * then it will wait all cpu call finish and return + * else it will call function on specified CPU and return immediately + * @param cond the condition function pointer,if you set it then it will call function only when cond return true + */ +void rt_smp_call_func_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond) +{ + RT_DEBUG_NOT_IN_INTERRUPT; + struct rt_smp_event event; + rt_bool_t need_call = RT_TRUE, need_wait = RT_FALSE; + int cur_cpu = rt_hw_cpu_id(); + int cpuid = 1 << cur_cpu; + int tmp_id = 0, cpu_nr = 0; + int tmp_mask; + int irq_flag; + + if (flag == SMP_CALL_WAIT_ALL) + { + need_wait = RT_TRUE; + rt_atomic_store(&rt_smp_wait, 0); + } + + if (cpuid & cpu_mask) + { + func(data); + cpu_mask = cpu_mask & (~cpuid); + } + + if (!cpu_mask) + need_call = RT_FALSE; + + tmp_mask = cpu_mask; + if (need_call) + { + while (tmp_mask) + { + if ((tmp_mask & 1) && (tmp_id < RT_CPUS_NR)) + { + if (cond && !cond(tmp_id, data)) + continue; + cpu_nr++; + event.event_id = SMP_CALL_EVENT_FUNC; + event.func = func; + event.data = data; + event.cpu_mask = cpu_mask; + irq_flag = rt_spin_lock_irqsave(&rt_smp_work[tmp_id].lock); + rt_smp_work[tmp_id].event = event; + rt_spin_unlock_irqrestore(&rt_smp_work[tmp_id].lock,irq_flag); + } + tmp_id++; + tmp_mask = tmp_mask >> 1; + } + rt_hw_ipi_send(RT_FUNC_IPI, cpu_mask); + } + + if (need_wait) + { + while (rt_atomic_load(&rt_smp_wait) != cpu_nr); + } +} + +void rt_smp_call_each_cpu(rt_smp_call_func_back func, void *data, rt_uint8_t flag) +{ + rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, RT_NULL); +} + +void rt_smp_call_each_cpu_cond(rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func) +{ + rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, cond_func); +} +void rt_smp_call_any_cpu(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag) +{ + rt_smp_call_func_cond(cpu_mask, func, data, flag, RT_NULL); +} + +void rt_smp_call_any_cpu_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func) +{ + rt_smp_call_func_cond(cpu_mask, func, data, flag, cond_func); +} + +void rt_smp_init(void) +{ + for (int i = 0; i < RT_CPUS_NR; i++) + { + rt_memset(&rt_smp_work[i], 0, sizeof(struct rt_smp_call)); + rt_spin_lock_init(&rt_smp_work[i].lock); + } +} diff --git a/components/drivers/smp/smp.h b/components/drivers/smp/smp.h new file mode 100644 index 00000000000..6932fd1a7f8 --- /dev/null +++ b/components/drivers/smp/smp.h @@ -0,0 +1,34 @@ +#ifndef __SMP_IPI_H__ +#define __SMP_IPI_H__ +#include +typedef void (*rt_smp_call_func_back)(void *data); +typedef rt_bool_t (*rt_smp_cond)(int cpu, void *info); + +#define SMP_CALL_EVENT_FUNC 0x1 + +#define SMP_CALL_WAIT_ALL (1 << 0) +#define SMP_CALL_NO_WAIT (1 << 1) + +#define RT_ALL_CPU ((1 << RT_CPUS_NR) - 1) +struct rt_smp_event +{ + int cpu_mask; + int event_id; + void *data; + rt_smp_call_func_back func; +}; +struct rt_smp_call +{ + struct rt_spinlock lock; + struct rt_smp_event event; +}; + + +void rt_smp_call_ipi_handler(int vector, void *param); +void rt_smp_call_each_cpu(rt_smp_call_func_back func, void *data, rt_uint8_t flag); +void rt_smp_call_each_cpu_cond(rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func); +void rt_smp_call_any_cpu(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag); +void rt_smp_call_any_cpu_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func); +void rt_smp_init(void); + +#endif diff --git a/examples/utest/testcases/Kconfig b/examples/utest/testcases/Kconfig index cc13e4e70ee..890c2b234bc 100644 --- a/examples/utest/testcases/Kconfig +++ b/examples/utest/testcases/Kconfig @@ -15,6 +15,7 @@ rsource "drivers/ipc/Kconfig" rsource "posix/Kconfig" rsource "mm/Kconfig" rsource "tmpfs/Kconfig" +rsource "smp_ipi/Kconfig" endif endmenu diff --git a/examples/utest/testcases/smp_ipi/Kconfig b/examples/utest/testcases/smp_ipi/Kconfig new file mode 100644 index 00000000000..2e144c5b60a --- /dev/null +++ b/examples/utest/testcases/smp_ipi/Kconfig @@ -0,0 +1,6 @@ +menu "SMP Testcase" + +config UTEST_SMP_CALL_FUNC + bool "Call random cpu to run func" + default n +endmenu diff --git a/examples/utest/testcases/smp_ipi/SConscript b/examples/utest/testcases/smp_ipi/SConscript new file mode 100644 index 00000000000..b3570fd0404 --- /dev/null +++ b/examples/utest/testcases/smp_ipi/SConscript @@ -0,0 +1,13 @@ +Import('rtconfig') +from building import * + +cwd = GetCurrentDir() +src = [] +CPPPATH = [cwd] + +if GetDepend(['RT_USING_SMP','UTEST_SMP_CALL_FUNC']): + src += ['smp.c'] + +group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH) + +Return('group') diff --git a/examples/utest/testcases/smp_ipi/smp.c b/examples/utest/testcases/smp_ipi/smp.c new file mode 100644 index 00000000000..e7bbcbe199a --- /dev/null +++ b/examples/utest/testcases/smp_ipi/smp.c @@ -0,0 +1,75 @@ +#include +#include "utest.h" +#include "utest_assert.h" +#include "smp.h" +static int pass_count = 0; +static int pass = 1000; +static struct rt_spinlock lock; + +static void test_call(void *data) +{ + rt_spin_lock(&lock); + int *i = (int *)data; + int id = rt_hw_cpu_id(); + *i &= ~(1 << id); + if (*i == 0) + pass_count++; + rt_spin_unlock(&lock); +} + + +static void test1() +{ + int cpu_mask = 0xf; + for (int i = 0; i < 1000; i++) + { + cpu_mask = rand() % 0xf; + if (cpu_mask == 0) + pass--; + rt_smp_call_any_cpu(cpu_mask,test_call, &cpu_mask, SMP_CALL_WAIT_ALL); + if (i % 20 == 0) + rt_kprintf("#"); + } + rt_kprintf("\n"); + uassert_true(pass_count == pass); +} + +static void test_call2(void *data) +{ + rt_spin_lock(&lock); + int a = 100000; + while (a--); + int *i = (int *)data; + (*i)++; + rt_spin_unlock(&lock); +} +static void test2(void) +{ + int data = 0; + rt_smp_call_each_cpu(test_call2, &data, SMP_CALL_WAIT_ALL); + uassert_true(data == RT_CPUS_NR); + rt_thread_mdelay(10); + data = 0; + rt_smp_call_each_cpu(test_call2, &data, SMP_CALL_NO_WAIT); + uassert_true(data != RT_CPUS_NR); +} + +static rt_err_t utest_tc_init(void) +{ + pass_count = 0; + pass = 1000; + rt_spin_lock_init(&lock); + return RT_EOK; +} + +static rt_err_t utest_tc_cleanup(void) +{ + return RT_EOK; +} +static void testcase(void) +{ + UTEST_UNIT_RUN(test1); + UTEST_UNIT_RUN(test2); +} + +UTEST_TC_EXPORT(testcase, "testcase.smp.smp", utest_tc_init, utest_tc_cleanup, 10); diff --git a/include/rtdef.h b/include/rtdef.h index 6985bb8f51a..3fba367c07c 100644 --- a/include/rtdef.h +++ b/include/rtdef.h @@ -725,6 +725,12 @@ typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t; #define RT_STOP_IPI 1 #endif /* RT_STOP_IPI */ +#ifndef RT_FUNC_IPI +#define RT_FUNC_IPI 2 +#endif + +#define RT_MAX_IPI 3 + #define _SCHEDULER_CONTEXT(fileds) fileds /** diff --git a/libcpu/aarch64/common/setup.c b/libcpu/aarch64/common/setup.c index 304e8565fbc..6c16801a107 100644 --- a/libcpu/aarch64/common/setup.c +++ b/libcpu/aarch64/common/setup.c @@ -13,7 +13,7 @@ #define DBG_TAG "cpu.aa64" #define DBG_LVL DBG_INFO #include - +#include #include #include #include @@ -302,11 +302,14 @@ void rt_hw_common_setup(void) rt_thread_idle_sethook(rt_hw_idle_wfi); #ifdef RT_USING_SMP + rt_smp_init(); /* Install the IPI handle */ rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler); rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler); + rt_hw_ipi_handler_install(RT_FUNC_IPI, rt_smp_call_ipi_handler); rt_hw_interrupt_umask(RT_SCHEDULE_IPI); rt_hw_interrupt_umask(RT_STOP_IPI); + rt_hw_interrupt_umask(RT_FUNC_IPI); #endif }