diff --git a/components/drivers/include/ipc/workqueue.h b/components/drivers/include/ipc/workqueue.h index 8d0c986b4f0..982ed60ca35 100644 --- a/components/drivers/include/ipc/workqueue.h +++ b/components/drivers/include/ipc/workqueue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2023, RT-Thread Development Team + * Copyright (c) 2006-2026, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * @@ -7,6 +7,7 @@ * Date Author Notes * 2021-08-01 Meco Man remove rt_delayed_work_init() and rt_delayed_work structure * 2021-08-14 Jackistang add comments for rt_work_init() + * 2026-03-21 RyanCW(Codex) refine sync/status workqueue APIs */ #ifndef WORKQUEUE_H__ #define WORKQUEUE_H__ @@ -23,6 +24,21 @@ enum { RT_WORK_STATE_PENDING = 0x0001, /* Work item pending state */ RT_WORK_STATE_SUBMITTING = 0x0002, /* Work item submitting state */ + RT_WORK_STATE_CANCELING = 0x0004, /* Work item canceling state */ + RT_WORK_STATE_DONE = 0x0020, /* Work item finished execution */ + RT_WORK_STATE_CANCELED = 0x0040, /* Work item canceled before execution */ +}; + +enum +{ + RT_WORK_STATUS_IDLE = 0x0000, + RT_WORK_STATUS_PENDING = RT_WORK_STATE_PENDING, + RT_WORK_STATUS_SUBMITTING = RT_WORK_STATE_SUBMITTING, + RT_WORK_STATUS_CANCELING = RT_WORK_STATE_CANCELING, + RT_WORK_STATUS_DONE = RT_WORK_STATE_DONE, + RT_WORK_STATUS_CANCELED = RT_WORK_STATE_CANCELED, + RT_WORK_STATUS_RUNNING = 0x0008, + RT_WORK_STATUS_OTHER_QUEUE = 0x0010, }; /** @@ -33,17 +49,23 @@ enum RT_WORK_TYPE_DELAYED = 0x0001, }; +enum +{ + RT_WORKQUEUE_FLAG_DESTROYING = 0x01, + RT_WORKQUEUE_FLAG_SYNC_WAITING = 0x02, +}; + /* workqueue implementation */ struct rt_workqueue { rt_list_t work_list; rt_list_t delayed_list; struct rt_work *work_current; /* current work */ - - struct rt_semaphore sem; rt_thread_t work_thread; struct rt_spinlock spinlock; struct rt_completion wakeup_completion; + struct rt_completion sync_completion; + rt_uint8_t flags; }; struct rt_work @@ -59,15 +81,14 @@ struct rt_work }; #ifdef RT_USING_HEAP -/** - * WorkQueue for DeviceDriver - */ void rt_work_init(struct rt_work *work, void (*work_func)(struct rt_work *work, void *work_data), void *work_data); struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_size, rt_uint8_t priority); rt_err_t rt_workqueue_destroy(struct rt_workqueue *queue); +rt_err_t rt_workqueue_destroy_sync(struct rt_workqueue *queue); rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work); rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t ticks); rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work); +rt_uint16_t rt_workqueue_get_work_status(struct rt_workqueue *queue, struct rt_work *work); rt_err_t rt_workqueue_cancel_work_sync(struct rt_workqueue *queue, struct rt_work *work); rt_err_t rt_workqueue_cancel_all_work(struct rt_workqueue *queue); rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *work); @@ -76,6 +97,8 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo rt_err_t rt_work_submit(struct rt_work *work, rt_tick_t ticks); rt_err_t rt_work_urgent(struct rt_work *work); rt_err_t rt_work_cancel(struct rt_work *work); +rt_err_t rt_work_cancel_sync(struct rt_work *work); +rt_uint16_t rt_work_get_status(struct rt_work *work); #endif /* RT_USING_SYSTEM_WORKQUEUE */ #ifdef __cplusplus diff --git a/components/drivers/ipc/utest/SConscript b/components/drivers/ipc/utest/SConscript index 12a67b6fd60..e867b720bbc 100644 --- a/components/drivers/ipc/utest/SConscript +++ b/components/drivers/ipc/utest/SConscript @@ -9,7 +9,7 @@ if GetDepend(['RT_UTEST_COMPLETION']): src += ['completion_tc.c', 'completion_timeout_tc.c'] if GetDepend(['RT_UTEST_WORKQUEUE']): - src += ['workqueue_tc.c'] + src += ['workqueue_tc_basic.c', 'workqueue_tc_cancel.c', 'workqueue_tc_destroy.c'] group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH) diff --git a/components/drivers/ipc/utest/workqueue_tc.c b/components/drivers/ipc/utest/workqueue_tc.c deleted file mode 100644 index df8c93b9625..00000000000 --- a/components/drivers/ipc/utest/workqueue_tc.c +++ /dev/null @@ -1,639 +0,0 @@ -/* - * Copyright (c) 2006-2024, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-02-06 tyx first commit - * 2024-12-31 rbb666 Adding Test Cases - * 2025-11-16 ChuanN-sudo add standardized utest documentation block - */ - -/** - * Test Case Name: IPC Workqueue Test - * - * Test Objectives: - * - Validate rt_workqueue creation, task submission, and execution mechanisms. - * - Verify ordered execution of work items under concurrent submission scenarios. - * - Ensure proper handling of task dependencies and resource cleanup during workqueue termination. - * - Test core APIs: rt_workqueue_create(), rt_workqueue_submit(), rt_workqueue_cancel() - * - * Test Scenarios: - * - Multiple threads submit periodic work items with varying delays to simulate real-world workloads. - * - Workqueue processes tasks in FIFO order while handling dynamic task cancellations. - * - Test injects random scheduling delays and priority inversions to stress-test queue stability. - * - Concurrent submission of high-priority and normal-priority work items to verify scheduling fairness. - * - System triggers asynchronous workqueue destruction during active task processing. - * - * Verification Metrics: - * - Submitted work items execute exactly once with correct parameter context. - * - Task execution order preserves submission sequence under normal scheduling conditions. - * - Cancelled tasks are safely removed from queue without execution or memory leaks. - * - Workqueue resource cleanup completes successfully even with pending operations. - * - Asynchronous destruction of workqueue handles active tasks gracefully without corruption. - * - * Dependencies: - * - Hardware requirements: QEMU emulator or any hardware platform that supports RT-Thread. - * - Software configuration: - * - RT_USING_UTEST must be enabled (select "RT-Thread Utestcases" in menuconfig). - * - RT_UTEST_WORKQUEUE must be enabled (enable via: RT-Thread Utestcases -> Kernel Components -> Drivers -> IPC Test -> IPC Workqueue Test). - * - Environmental Assumptions: System scheduler working normally. - * - * Expected Results: - * - Final output: "[ PASSED ] [ result ] testcase (components.drivers.ipc.workqueue_tc)" - * - No memory leaks or race condition detections in logs. - * - No assertions triggered during test execution. - */ -#include "rtthread.h" -#include "rtdevice.h" -#include "utest.h" - -#ifdef RT_USING_DEVICE_IPC - -static rt_uint8_t get_test_thread_priority(rt_int8_t pos) -{ - rt_int16_t priority; - - priority = RT_SCHED_PRIV(rt_thread_self()).init_priority; - if (pos == 0) - { - return priority; - } - else - { - priority += pos; - } - if (priority < 0) - { - return 0; - } - else if (priority >= RT_THREAD_PRIORITY_MAX) - { - return RT_THREAD_PRIORITY_MAX - 1; - } - else - { - return (rt_uint8_t)priority; - } -} - -static void do_work_test_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) = 1; -} - -static void do_work_test(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 higher priority than the current test thread */ - curr_priority = get_test_thread_priority(-1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, do_work_test_fun, (void *)&work_flag); - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - /* Delay 5 ticks to ensure that the task has been executed */ - rt_thread_delay(5); - uassert_int_equal(work_flag, 1); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void do_delay_work_test_fun(struct rt_work *work, void *work_data) -{ - *((rt_tick_t *)work_data) = rt_tick_get(); -} - -static void do_delay_work_test(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile rt_tick_t work_start = 0; - volatile rt_tick_t work_end = 0; - rt_err_t err; - - /* 1 higher priority than the current test thread */ - curr_priority = get_test_thread_priority(-1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, do_delay_work_test_fun, (void *)&work_end); - work_start = rt_tick_get(); - /* Normal delayed work submission test */ - err = rt_workqueue_submit_work(queue, &work, 10); - uassert_int_equal(err, RT_EOK); - - /* Ensure that the delayed work has been executed */ - rt_thread_delay(15); - /* Check if the delayed task is executed after 10 ticks */ - if (work_end < work_start || work_end - work_start < 10) - { - uassert_false(1); - } - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancle_work_test01_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) = 1; -} - -static void cancle_work_test01(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work, cancle_work_test01_fun, (void *)&work_flag); - /* Cancel the work before it is executed */ - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - /* Cancel Now */ - err = rt_workqueue_cancel_work(queue, &work); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(5); - uassert_int_equal(work_flag, 0); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancle_work_test02_fun(struct rt_work *work, void *work_data) -{ - rt_thread_delay(10); -} - -static void cancle_work_test02(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - rt_err_t err; - - /* 1 higher priority than the current test thread */ - curr_priority = get_test_thread_priority(-1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, cancle_work_test02_fun, RT_NULL); - /* Cancel the work while it is in progress */ - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(5); - err = rt_workqueue_cancel_work(queue, &work); - uassert_int_equal(err, -RT_EBUSY); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancle_work_test03_fun(struct rt_work *work, void *work_data) -{ - rt_thread_delay(5); -} - -static void cancle_work_test03(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, cancle_work_test03_fun, RT_NULL); - /* Canceling a work after it has been executed */ - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(10); - err = rt_workqueue_cancel_work(queue, &work); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancle_work_test04_fun(struct rt_work *work, void *work_data) -{ - rt_thread_delay(10); - *((int *)work_data) = 1; -} - -static void cancle_work_test04(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, cancle_work_test04_fun, (void *)&work_flag); - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(5); - /* Synchronized cancellation work */ - err = rt_workqueue_cancel_work_sync(queue, &work); - uassert_int_equal(err, RT_EOK); - - uassert_int_equal(work_flag, 1); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancle_delay_work_test01_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) = 1; -} - -static void cancle_delay_work_test01(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work, cancle_delay_work_test01_fun, (void *)&work_flag); - err = rt_workqueue_submit_work(queue, &work, 20); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(10); - /* Cancel work */ - err = rt_workqueue_cancel_work(queue, &work); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(15); - uassert_int_equal(work_flag, 0); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void repeat_work_test01_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) += 1; -} - -static void repeat_work_test01(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test01", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work, repeat_work_test01_fun, (void *)&work_flag); - /* Multiple submissions of the same work */ - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - /* The same work, before it is executed, can be submitted repeatedly and executed only once */ - err = rt_workqueue_submit_work(queue, &work, 0); - if (err != RT_EOK) - { - LOG_E("L:%d err. %d", __LINE__, err); - } - rt_thread_delay(10); - /* Check if it was executed only once */ - uassert_int_equal(work_flag, 1); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void repeat_work_test02_fun(struct rt_work *work, void *work_data) -{ - rt_thread_delay(10); - *((int *)work_data) += 1; -} - -static void repeat_work_test02(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 priority higher than current test thread */ - curr_priority = get_test_thread_priority(-1); - queue = rt_workqueue_create("test02", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, repeat_work_test02_fun, (void *)&work_flag); - /* Submit work with high queue priority that will be executed immediately */ - err = rt_workqueue_submit_work(queue, &work, 0); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(5); - /* Re-submission of work in progress */ - err = rt_workqueue_submit_work(queue, &work, 0); - if (err != RT_EOK) - { - LOG_E("L:%d err. %d", __LINE__, err); - } - rt_thread_delay(10); - uassert_int_equal(work_flag, 1); - - rt_thread_delay(10); - uassert_int_equal(work_flag, 2); - - rt_workqueue_destroy(queue); -} - -static struct rt_workqueue *queue_3; - -static void repeat_work_test03_fun(struct rt_work *work, void *work_data) -{ - int *work_flag = (int *)work_data; - (*work_flag) += 1; - rt_kprintf("work_flag:%d\n", *work_flag); - if (*work_flag < 20) - { - rt_workqueue_submit_work(queue_3, work, 0); - } -} - -static void repeat_work_test03(void) -{ - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 priority higher than current test thread */ - curr_priority = get_test_thread_priority(-1); - queue_3 = rt_workqueue_create("test03", 2048, curr_priority); - if (queue_3 == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - rt_work_init(&work, repeat_work_test03_fun, (void *)&work_flag); - /* Submit work with high queue priority that will be executed immediately */ - err = rt_workqueue_submit_work(queue_3, &work, 0); - uassert_int_equal(err, RT_EOK); - - /* Wait for the work to be executed 20 times with a timeout */ - err = rt_workqueue_cancel_work_sync(queue_3, &work); - uassert_int_equal(err, RT_EOK); - - /* Check if the work was executed 20 times */ - uassert_int_equal(work_flag, 20); - - rt_workqueue_destroy(queue_3); -} - -static void repeat_delay_work_test01_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) += 1; -} - -static void repeat_delay_work_test01(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work, repeat_delay_work_test01_fun, (void *)&work_flag); - - err = rt_workqueue_submit_work(queue, &work, 20); - uassert_int_equal(err, RT_EOK); - - /* At this point the delayed work has not been executed */ - rt_thread_delay(10); - /* Re-submission of time-delayed work */ - err = rt_workqueue_submit_work(queue, &work, 20); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(15); - uassert_int_equal(work_flag, 0); - - /* Waiting for delayed task execution */ - rt_thread_delay(15); - uassert_int_equal(work_flag, 1); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void repeat_delay_work_test02_fun(struct rt_work *work, void *work_data) -{ - rt_thread_delay(10); - *((int *)work_data) += 1; -} - -static void repeat_delay_work_test02(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work; - volatile int work_flag = 0; - rt_err_t err; - - /* 1 lower priority than the current test thread */ - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work, repeat_delay_work_test02_fun, (void *)&work_flag); - - err = rt_workqueue_submit_work(queue, &work, 20); - uassert_int_equal(err, RT_EOK); - - /* Waiting for delayed work execution */ - rt_thread_delay(25); - err = rt_workqueue_submit_work(queue, &work, 20); - uassert_int_equal(err, RT_EOK); - - /* Check if the delayed work has been run only once */ - rt_thread_delay(10); - uassert_int_equal(work_flag, 1); - - rt_thread_delay(25); - /* Check if the delayed work is executed twice */ - uassert_int_equal(work_flag, 2); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static void cancel_all_work_test_fun(struct rt_work *work, void *work_data) -{ - *((int *)work_data) += 1; -} - -static void cancel_all_work_test(void) -{ - struct rt_workqueue *queue; - rt_uint8_t curr_priority; - struct rt_work work1; - struct rt_work work2; - struct rt_work work3; - struct rt_work work4; - volatile int work_flag = 0; - rt_err_t err; - - curr_priority = get_test_thread_priority(1); - queue = rt_workqueue_create("test", 2048, curr_priority); - if (queue == RT_NULL) - { - LOG_E("queue create failed, L:%d", __LINE__); - return; - } - work_flag = 0; - rt_work_init(&work1, cancel_all_work_test_fun, (void *)&work_flag); - rt_work_init(&work2, cancel_all_work_test_fun, (void *)&work_flag); - rt_work_init(&work3, cancel_all_work_test_fun, (void *)&work_flag); - rt_work_init(&work4, cancel_all_work_test_fun, (void *)&work_flag); - - err = rt_workqueue_submit_work(queue, &work1, 0); - uassert_int_equal(err, RT_EOK); - - err = rt_workqueue_submit_work(queue, &work2, 0); - uassert_int_equal(err, RT_EOK); - - err = rt_workqueue_submit_work(queue, &work3, 10); - uassert_int_equal(err, RT_EOK); - - err = rt_workqueue_submit_work(queue, &work4, 10); - uassert_int_equal(err, RT_EOK); - - err = rt_workqueue_cancel_all_work(queue); - uassert_int_equal(err, RT_EOK); - - rt_thread_delay(20); - uassert_int_equal(work_flag, 0); - - rt_thread_delay(100); - rt_workqueue_destroy(queue); -} - -static rt_err_t utest_tc_init(void) -{ - return RT_EOK; -} - -static rt_err_t utest_tc_cleanup(void) -{ - return RT_EOK; -} - -static void testcase(void) -{ - /* General work queue test */ - UTEST_UNIT_RUN(do_work_test); - /* Delayed work queue test */ - UTEST_UNIT_RUN(do_delay_work_test); - /* Cancellation of work prior to implementation */ - UTEST_UNIT_RUN(cancle_work_test01); - /* Cancellation of work during execution */ - UTEST_UNIT_RUN(cancle_work_test02); - /* Cancellation of work after implementation */ - UTEST_UNIT_RUN(cancle_work_test03); - /* Synchronized cancellation of work during execution */ - UTEST_UNIT_RUN(cancle_work_test04); - /* Cancel delayed work before execution */ - UTEST_UNIT_RUN(cancle_delay_work_test01); - /* Multiple submissions of the same work prior to implementation */ - UTEST_UNIT_RUN(repeat_work_test01); - /* Multiple submissions of the same work during execution */ - UTEST_UNIT_RUN(repeat_work_test02); - /* Submitting the same task multiple times in a mission */ - UTEST_UNIT_RUN(repeat_work_test03); - /* Multiple submissions of the same delayed task before execution */ - UTEST_UNIT_RUN(repeat_delay_work_test01); - /* Multiple submissions of the same delayed task during execution */ - UTEST_UNIT_RUN(repeat_delay_work_test02); - /* Cancel all works */ - UTEST_UNIT_RUN(cancel_all_work_test); -} -UTEST_TC_EXPORT(testcase, "components.drivers.ipc.workqueue_tc", utest_tc_init, utest_tc_cleanup, 300); -#endif diff --git a/components/drivers/ipc/utest/workqueue_tc_basic.c b/components/drivers/ipc/utest/workqueue_tc_basic.c new file mode 100644 index 00000000000..ec1c397e358 --- /dev/null +++ b/components/drivers/ipc/utest/workqueue_tc_basic.c @@ -0,0 +1,1184 @@ +/* + * Copyright (c) 2006-2026, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-02-06 tyx first commit + * 2024-12-31 rbb666 Adding Test Cases + * 2025-11-16 ChuanN-sudo add standardized utest documentation block + * 2026-03-20 RyanCW(Codex) split workqueue utest and cover basic/status APIs + */ + +/** + * Test Case Name: IPC Workqueue Test + * + * Test Objectives: + * - Validate workqueue creation, submission, and execution mechanisms. + * - Verify FIFO, urgent insertion, and delayed ordering behaviors. + * - Verify reschedule and repeat submission semantics. + * - Test core APIs: rt_workqueue_create(), rt_workqueue_submit_work(), + * rt_workqueue_dowork(), rt_workqueue_urgent_work(). + * - Test system workqueue APIs: rt_work_submit(), rt_work_urgent(), + * rt_work_cancel(), rt_work_cancel_sync(), rt_work_get_status(). + * + * Test Scenarios: + * - Multiple work items with varying delays validate ordering and scheduling. + * - Urgent insertion runs ahead of pending items. + * - Reschedule path (delayed <-> immediate) executes once as expected. + * - Repeat submissions for pending/running/delayed items behave correctly. + * - System workqueue APIs are validated when enabled. + * + * Verification Metrics: + * - Submitted work items execute with expected order and parameters. + * - Same-timeout delayed items keep FIFO order. + * - Rescheduled work executes once per cycle. + * - Repeat submissions follow defined semantics (pending once, running twice). + * - System workqueue APIs return expected results when enabled. + * - System workqueue cancel_sync waits for running work and status reflects DONE/CANCELED. + * + * Dependencies: + * - Hardware requirements: QEMU emulator or any hardware platform that supports RT-Thread. + * - Software configuration: + * - RT_USING_UTEST must be enabled (select "RT-Thread Utestcases" in menuconfig). + * - RT_UTEST_WORKQUEUE must be enabled + * (enable via: RT-Thread Utestcases -> Kernel Components -> Drivers -> IPC Test -> IPC Workqueue Test). + * - Environmental Assumptions: System scheduler working normally. + * + * Expected Results: + * - Final output: "[ PASSED ] [ result ] testcase (components.drivers.ipc.workqueue_tc)" + * - No memory leaks or race condition detections in logs. + * - No assertions triggered during test execution. + */ +/* Workqueue basic tests: submit/order/urgent/reschedule/repeat/system APIs. */ +#include "rtthread.h" +#include "rtdevice.h" +#include +#include "utest.h" + +#ifdef RT_USING_DEVICE_IPC + +/* Other workqueue test groups. */ +extern void workqueue_cancel_testcase(void); +extern void workqueue_destroy_testcase(void); + +rt_uint8_t wq_get_test_thread_priority(rt_int8_t pos) +{ + rt_int16_t priority; + + priority = RT_SCHED_PRIV(rt_thread_self()).init_priority; + if (pos == 0) + { + return priority; + } + else + { + priority += pos; + } + if (priority < 0) + { + return 0; + } + else if (priority >= RT_THREAD_PRIORITY_MAX) + { + return RT_THREAD_PRIORITY_MAX - 1; + } + else + { + return (rt_uint8_t)priority; + } +} + +struct wq_flag_ctx +{ + struct rt_semaphore done; + volatile int flag; +}; + +struct wq_order_ctx +{ + struct rt_semaphore done; + struct rt_semaphore start; + struct rt_semaphore finish; + volatile int seq; + volatile int order_first; + volatile int order_second; +}; + +struct wq_order_item +{ + struct wq_order_ctx *ctx; + volatile int *target; + volatile rt_tick_t *stamp; +}; + +struct wq_repeat_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore done; + volatile int count; +}; + +struct wq_sys_block_ctx +{ + struct rt_semaphore started; + struct rt_semaphore allow_finish; +}; + +struct wq_sys_release_ctx +{ + struct rt_semaphore *sem; + rt_tick_t delay; +}; + +static rt_bool_t wq_timeout_shifted_later(rt_tick_t new_tick, + rt_tick_t old_tick, + rt_tick_t min_delta) +{ + rt_tick_t delta = new_tick - old_tick; + + return (delta < RT_TICK_MAX / 2) && (delta >= min_delta); +} + +static void wq_flag_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_flag_ctx *ctx = (struct wq_flag_ctx *)work_data; + + ctx->flag = 1; + rt_sem_release(&ctx->done); +} + +static void wq_order_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_order_item *item = (struct wq_order_item *)work_data; + + *item->target = ++item->ctx->seq; + if (item->stamp != RT_NULL) + { + *item->stamp = rt_tick_get(); + } + rt_sem_release(&item->ctx->done); +} + +static void wq_urgent_block_fun(struct rt_work *work, void *work_data) +{ + struct wq_order_ctx *ctx = (struct wq_order_ctx *)work_data; + + rt_sem_release(&ctx->start); + rt_sem_take(&ctx->finish, RT_WAITING_FOREVER); +} + +static void wq_inc_work_fun(struct rt_work *work, void *work_data) +{ + *((int *)work_data) += 1; +} + +static void wq_inc_delay_work_fun(struct rt_work *work, void *work_data) +{ + rt_thread_delay(rt_tick_from_millisecond(10)); + *((int *)work_data) += 1; +} + +static void wq_sys_block_fun(struct rt_work *work, void *work_data) +{ + struct wq_sys_block_ctx *ctx = (struct wq_sys_block_ctx *)work_data; + + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); +} + +static void wq_sys_release_entry(void *parameter) +{ + struct wq_sys_release_ctx *ctx = (struct wq_sys_release_ctx *)parameter; + + rt_thread_delay(ctx->delay); + rt_sem_release(ctx->sem); +} + +static void wq_repeat_requeue_fun(struct rt_work *work, void *work_data) +{ + struct wq_repeat_ctx *ctx = (struct wq_repeat_ctx *)work_data; + + /* Requeue itself to build a deterministic count. */ + ctx->count += 1; + if (ctx->count < 20) + { + rt_workqueue_submit_work(ctx->queue, work, 0); + } + else + { + rt_sem_release(&ctx->done); + } +} + +static void test_workqueue_submit_immediate(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_flag_ctx ctx; + rt_err_t err; + + /* 1 higher priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqdw", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.flag = 0; + + rt_work_init(&work, wq_flag_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.flag, 1); + + /* Dowork API should behave the same as immediate submit. */ + ctx.flag = 0; + rt_work_init(&work, wq_flag_work_fun, (void *)&ctx); + err = rt_workqueue_dowork(queue, &work); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.flag, 1); + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_submit_invalid_tick(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_err_t err; + rt_tick_t invalid_tick; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + invalid_tick = (rt_tick_t)(RT_TICK_MAX / 2); + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work, invalid_tick); + uassert_int_equal(err, -RT_ERROR); + uassert_int_equal(work_flag, 0); + + rt_workqueue_destroy(queue); +} + +static void test_workqueue_ordering(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work1; + struct rt_work work2; + struct rt_work work_block; + struct rt_work work_urgent; + struct rt_work work_normal; + struct wq_order_ctx ctx; + struct wq_order_item item1; + struct wq_order_item item2; + struct wq_order_item item_urgent; + struct wq_order_item item_normal; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqfo", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + /* Phase: FIFO order. */ + item1.ctx = &ctx; + item1.target = &ctx.order_first; + item2.ctx = &ctx; + item2.target = &ctx.order_second; + item1.stamp = RT_NULL; + item2.stamp = RT_NULL; + + rt_work_init(&work1, wq_order_work_fun, (void *)&item1); + rt_work_init(&work2, wq_order_work_fun, (void *)&item2); + + err = rt_workqueue_submit_work(queue, &work1, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work2, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.order_first, 1); + uassert_int_equal(ctx.order_second, 2); + + rt_sem_detach(&ctx.done); + /* Phase: urgent order. */ + err = rt_sem_init(&ctx.start, "wqus", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.finish, "wquf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.done, "wqud", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item_urgent.ctx = &ctx; + item_urgent.target = &ctx.order_first; + item_normal.ctx = &ctx; + item_normal.target = &ctx.order_second; + item_urgent.stamp = RT_NULL; + item_normal.stamp = RT_NULL; + + rt_work_init(&work_block, wq_urgent_block_fun, (void *)&ctx); + rt_work_init(&work_urgent, wq_order_work_fun, (void *)&item_urgent); + rt_work_init(&work_normal, wq_order_work_fun, (void *)&item_normal); + + err = rt_workqueue_submit_work(queue, &work_block, 0); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.start, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_submit_work(queue, &work_normal, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_urgent_work(queue, &work_urgent); + uassert_int_equal(err, RT_EOK); + + rt_sem_release(&ctx.finish); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.order_first, 1); + uassert_int_equal(ctx.order_second, 2); + + rt_sem_detach(&ctx.done); + rt_sem_detach(&ctx.finish); + rt_sem_detach(&ctx.start); + + /* Phase: urgent on already queued work (move to head). */ + err = rt_sem_init(&ctx.start, "wqup", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.finish, "wquq", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.done, "wqux", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item_urgent.ctx = &ctx; + item_urgent.target = &ctx.order_first; + item_normal.ctx = &ctx; + item_normal.target = &ctx.order_second; + item_urgent.stamp = RT_NULL; + item_normal.stamp = RT_NULL; + + rt_work_init(&work_block, wq_urgent_block_fun, (void *)&ctx); + rt_work_init(&work_urgent, wq_order_work_fun, (void *)&item_urgent); + rt_work_init(&work_normal, wq_order_work_fun, (void *)&item_normal); + + err = rt_workqueue_submit_work(queue, &work_block, 0); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.start, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_submit_work(queue, &work_normal, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work_urgent, 0); + uassert_int_equal(err, RT_EOK); + /* Move an already queued work to the head. */ + err = rt_workqueue_urgent_work(queue, &work_urgent); + uassert_int_equal(err, RT_EOK); + + rt_sem_release(&ctx.finish); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.order_first, 1); + uassert_int_equal(ctx.order_second, 2); + + rt_sem_detach(&ctx.done); + rt_sem_detach(&ctx.finish); + rt_sem_detach(&ctx.start); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_delayed_order(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work_long; + struct rt_work work_mid; + struct rt_work work_short; + struct wq_order_ctx ctx; + struct wq_order_item item_long; + struct wq_order_item item_mid; + struct wq_order_item item_short; + volatile int order_long = 0; + volatile rt_tick_t long_at = 0; + volatile rt_tick_t mid_at = 0; + volatile rt_tick_t short_at = 0; + rt_tick_t short_submit_at; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqdo", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item_long.ctx = &ctx; + item_long.target = &order_long; + item_long.stamp = &long_at; + item_mid.ctx = &ctx; + item_mid.target = &ctx.order_second; + item_mid.stamp = &mid_at; + item_short.ctx = &ctx; + item_short.target = &ctx.order_first; + item_short.stamp = &short_at; + + rt_work_init(&work_long, wq_order_work_fun, (void *)&item_long); + rt_work_init(&work_mid, wq_order_work_fun, (void *)&item_mid); + rt_work_init(&work_short, wq_order_work_fun, (void *)&item_short); + + /* Phase: delayed order + reschedule. */ + /* Submit a long delay first so the queue can sleep on delayed_list. */ + err = rt_workqueue_submit_work(queue, &work_long, rt_tick_from_millisecond(80)); + uassert_int_equal(err, RT_EOK); + rt_thread_delay(rt_tick_from_millisecond(5)); + short_submit_at = rt_tick_get(); + /* Submit a shorter delay; it should wake and run earlier. */ + err = rt_workqueue_submit_work(queue, &work_short, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + /* Insert a medium delay to exercise middle insertion. */ + err = rt_workqueue_submit_work(queue, &work_mid, rt_tick_from_millisecond(50)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.order_first, 1); + uassert_int_equal(ctx.order_second, 2); + uassert_int_equal(order_long, 3); + uassert_true(short_at != 0); + uassert_true(mid_at != 0); + uassert_true(long_at != 0); + if (short_at != 0) + { + uassert_true((short_at - short_submit_at) >= rt_tick_from_millisecond(20)); + } + if (short_at != 0 && mid_at != 0) + { + /* Mid delay should finish after short delay (wrap-safe compare). */ + uassert_true((mid_at - short_at) < RT_TICK_MAX / 2); + } + if (mid_at != 0 && long_at != 0) + { + /* Long delay should finish after mid delay (wrap-safe compare). */ + uassert_true((long_at - mid_at) < RT_TICK_MAX / 2); + } + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_delayed_same_timeout_fifo(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work_block; + struct rt_work work1; + struct rt_work work2; + struct rt_work work3; + struct wq_order_ctx ctx; + struct wq_order_item item1; + struct wq_order_item item2; + struct wq_order_item item3; + volatile int order1 = 0; + volatile int order2 = 0; + volatile int order3 = 0; + rt_tick_t ticks; + rt_base_t level; + rt_err_t err; + rt_err_t err1; + rt_err_t err2; + rt_err_t err3; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqef", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.start, "wqes", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.finish, "wqee", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item1.ctx = &ctx; + item1.target = &order1; + item1.stamp = RT_NULL; + item2.ctx = &ctx; + item2.target = &order2; + item2.stamp = RT_NULL; + item3.ctx = &ctx; + item3.target = &order3; + item3.stamp = RT_NULL; + + rt_work_init(&work1, wq_order_work_fun, (void *)&item1); + rt_work_init(&work2, wq_order_work_fun, (void *)&item2); + rt_work_init(&work3, wq_order_work_fun, (void *)&item3); + + /* Phase: same-timeout FIFO order for delayed list. */ + rt_work_init(&work_block, wq_urgent_block_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work_block, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.start, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + ticks = rt_tick_from_millisecond(100); + uassert_true(ticks > 0 && ticks < RT_TICK_MAX / 2); + /* Disable interrupts to keep the same tick for FIFO ordering. */ + level = rt_hw_interrupt_disable(); + err1 = rt_workqueue_submit_work(queue, &work1, ticks); + err2 = rt_workqueue_submit_work(queue, &work2, ticks); + err3 = rt_workqueue_submit_work(queue, &work3, ticks); + rt_hw_interrupt_enable(level); + + uassert_int_equal(err1, RT_EOK); + uassert_int_equal(err2, RT_EOK); + uassert_int_equal(err3, RT_EOK); + + rt_sem_release(&ctx.finish); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(300)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(300)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(300)); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(order1, 1); + uassert_int_equal(order2, 2); + uassert_int_equal(order3, 3); + + rt_sem_detach(&ctx.finish); + rt_sem_detach(&ctx.start); + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_urgent_delayed(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_order_ctx ctx; + struct wq_order_item item; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqud", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item.ctx = &ctx; + item.target = &ctx.order_first; + item.stamp = RT_NULL; + + rt_work_init(&work, wq_order_work_fun, (void *)&item); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + rt_thread_delay(rt_tick_from_millisecond(5)); + + /* Phase: urgent moves delayed work to head and wakes sleeper. */ + err = rt_workqueue_urgent_work(queue, &work); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(60)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.order_first, 1); + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_reschedule_switch(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_order_ctx ctx; + struct wq_order_item item; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqrs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.seq = 0; + ctx.order_first = 0; + ctx.order_second = 0; + + item.ctx = &ctx; + item.target = &ctx.order_first; + item.stamp = RT_NULL; + + rt_work_init(&work, wq_order_work_fun, (void *)&item); + + /* Phase: delayed -> immediate reschedule (execute once). */ + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(40)); + uassert_int_equal(err, RT_EOK); + rt_thread_delay(rt_tick_from_millisecond(5)); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.order_first, 1); + + rt_thread_delay(rt_tick_from_millisecond(60)); + uassert_int_equal(ctx.order_first, 1); + + rt_sem_detach(&ctx.done); + err = rt_sem_init(&ctx.done, "wqrs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + + ctx.seq = 0; + ctx.order_first = 0; + + rt_work_init(&work, wq_order_work_fun, (void *)&item); + + /* Phase: immediate -> delayed reschedule (execute after delay). */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(30)); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(ctx.order_first, 0); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.order_first, 1); + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_reschedule_delayed_head_refresh(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_tick_t old_timeout_tick = 0; + rt_tick_t new_timeout_tick = 0; + rt_err_t err; + int tries; + rt_bool_t refreshed = RT_FALSE; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(60)); + uassert_int_equal(err, RT_EOK); + + /* White-box regression: worker timeout should refresh after delaying the head further. */ + for (tries = 0; tries < 10; tries++) + { + old_timeout_tick = queue->work_thread->thread_timer.timeout_tick; + if ((old_timeout_tick - rt_tick_get()) < RT_TICK_MAX / 2 && + old_timeout_tick != 0) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(old_timeout_tick != 0); + + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(140)); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + new_timeout_tick = queue->work_thread->thread_timer.timeout_tick; + if (wq_timeout_shifted_later(new_timeout_tick, + old_timeout_tick, + rt_tick_from_millisecond(40))) + { + refreshed = RT_TRUE; + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(refreshed); + uassert_int_equal(work_flag, 0); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_repeat_submit(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_err_t err; + + /* 1 lower priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test01", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + /* Multiple submissions of the same work */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + /* Phase: repeat submit while pending (execute once). */ + /* The same work, before it is executed, can be submitted repeatedly and executed only once */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + rt_thread_delay(rt_tick_from_millisecond(10)); + /* Check if it was executed only once */ + uassert_int_equal(work_flag, 1); + + rt_thread_delay(rt_tick_from_millisecond(100)); + rt_workqueue_destroy(queue); + /* Phase: repeat submit while running (execute twice). */ + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test02", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + work_flag = 0; + rt_work_init(&work, wq_inc_delay_work_fun, (void *)&work_flag); + /* Submit work with high queue priority that will be executed immediately */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(5)); + /* Re-submission of work in progress */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(work_flag, 1); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(work_flag, 2); + + rt_workqueue_destroy(queue); +} + +static void test_workqueue_repeat_requeue_limit(void) +{ + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_repeat_ctx ctx; + rt_err_t err; + + /* 1 priority higher than current test thread */ + curr_priority = wq_get_test_thread_priority(-1); + ctx.queue = rt_workqueue_create("test03", 2048, curr_priority); + uassert_not_null(ctx.queue); + if (ctx.queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqrr", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.count = 0; + + rt_work_init(&work, wq_repeat_requeue_fun, (void *)&ctx); + /* Submit work with high queue priority that will be executed immediately */ + err = rt_workqueue_submit_work(ctx.queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + /* Wait for the work to be executed 20 times with a timeout */ + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(500)); + uassert_int_equal(err, RT_EOK); + + /* Check if the work was executed 20 times */ + uassert_int_equal(ctx.count, 20); + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(ctx.queue); +} + +static void test_workqueue_repeat_delay(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_err_t err; + + /* 1 lower priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + /* Phase: delayed repeat before run (execute once). */ + /* At this point the delayed work has not been executed */ + rt_thread_delay(rt_tick_from_millisecond(10)); + /* Re-submission of time-delayed work */ + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(15)); + uassert_int_equal(work_flag, 0); + + /* Waiting for delayed task execution */ + rt_thread_delay(rt_tick_from_millisecond(15)); + uassert_int_equal(work_flag, 1); + + /* Phase: delayed repeat after run (execute twice). */ + work_flag = 0; + rt_work_init(&work, wq_inc_delay_work_fun, (void *)&work_flag); + + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + /* Waiting for delayed work execution */ + rt_thread_delay(rt_tick_from_millisecond(25)); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + /* Check if the delayed work has been run only once */ + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(work_flag, 1); + + rt_thread_delay(rt_tick_from_millisecond(25)); + /* Check if the delayed work is executed twice */ + uassert_int_equal(work_flag, 2); + + rt_thread_delay(rt_tick_from_millisecond(100)); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_state_query(void) +{ + struct rt_workqueue *queue_low; + struct rt_workqueue *queue_other; + struct rt_workqueue *queue_high; + struct rt_work work; + struct wq_sys_block_ctx block_ctx; + rt_uint16_t status; + rt_err_t err; + volatile int work_flag = 0; + + /* Phase: idle/pending/other-queue states. */ + queue_low = rt_workqueue_create("test", 2048, wq_get_test_thread_priority(1)); + uassert_not_null(queue_low); + if (queue_low == RT_NULL) + { + return; + } + + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + status = rt_workqueue_get_work_status(queue_low, &work); + uassert_int_equal(status, RT_WORK_STATUS_IDLE); + + err = rt_workqueue_submit_work(queue_low, &work, 0); + uassert_int_equal(err, RT_EOK); + status = rt_workqueue_get_work_status(queue_low, &work); + uassert_true(status & RT_WORK_STATUS_PENDING); + + queue_other = rt_workqueue_create("testo", 2048, wq_get_test_thread_priority(1)); + uassert_not_null(queue_other); + if (queue_other != RT_NULL) + { + status = rt_workqueue_get_work_status(queue_other, &work); + uassert_int_equal(status, RT_WORK_STATUS_OTHER_QUEUE); + rt_workqueue_destroy(queue_other); + } + + err = rt_workqueue_cancel_work(queue_low, &work); + uassert_int_equal(err, RT_EOK); + rt_workqueue_destroy(queue_low); + + /* Phase: running state. */ + queue_high = rt_workqueue_create("testr", 2048, wq_get_test_thread_priority(-1)); + uassert_not_null(queue_high); + if (queue_high == RT_NULL) + { + return; + } + + err = rt_sem_init(&block_ctx.started, "wqst", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&block_ctx.allow_finish, "wqsf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + + rt_work_init(&work, wq_sys_block_fun, (void *)&block_ctx); + err = rt_workqueue_submit_work(queue_high, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&block_ctx.started, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + status = rt_workqueue_get_work_status(queue_high, &work); + uassert_true(status & RT_WORK_STATUS_RUNNING); + + rt_sem_release(&block_ctx.allow_finish); + rt_thread_delay(rt_tick_from_millisecond(5)); + + rt_sem_detach(&block_ctx.allow_finish); + rt_sem_detach(&block_ctx.started); + rt_workqueue_destroy(queue_high); +} + +#ifdef RT_USING_SYSTEM_WORKQUEUE +static void test_workqueue_sys_apis(void) +{ + struct rt_work block_work; + struct rt_work work; + struct wq_flag_ctx flag_ctx; + struct wq_sys_block_ctx block_ctx; + struct wq_sys_release_ctx release_ctx; + rt_thread_t release_thread; + volatile int work_flag; + rt_uint16_t status; + rt_err_t err; + + /* Phase: system submit. */ + err = rt_sem_init(&flag_ctx.done, "wqss", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + flag_ctx.flag = 0; + + rt_work_init(&work, wq_flag_work_fun, (void *)&flag_ctx); + err = rt_work_submit(&work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&flag_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(flag_ctx.flag, 1); + + rt_sem_detach(&flag_ctx.done); + + /* Phase: system urgent. */ + err = rt_sem_init(&flag_ctx.done, "wqsu", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + flag_ctx.flag = 0; + + rt_work_init(&work, wq_flag_work_fun, (void *)&flag_ctx); + err = rt_work_urgent(&work); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&flag_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(flag_ctx.flag, 1); + + rt_sem_detach(&flag_ctx.done); + + /* Phase: system cancel + status query. */ + err = rt_sem_init(&block_ctx.started, "wqcs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&block_ctx.allow_finish, "wqcf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + + rt_work_init(&block_work, wq_sys_block_fun, (void *)&block_ctx); + err = rt_work_submit(&block_work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&block_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + status = rt_work_get_status(&block_work); + uassert_true(status & RT_WORK_STATUS_RUNNING); + + err = rt_work_cancel(&block_work); + uassert_int_equal(err, -RT_EBUSY); + + release_ctx.sem = &block_ctx.allow_finish; + release_ctx.delay = rt_tick_from_millisecond(40); + release_thread = rt_thread_create("wqsr", wq_sys_release_entry, &release_ctx, + 1024, wq_get_test_thread_priority(1), 10); + uassert_not_null(release_thread); + if (release_thread == RT_NULL) + { + rt_sem_release(&block_ctx.allow_finish); + rt_thread_delay(rt_tick_from_millisecond(5)); + rt_sem_detach(&block_ctx.allow_finish); + rt_sem_detach(&block_ctx.started); + return; + } + rt_thread_startup(release_thread); + + err = rt_work_cancel_sync(&block_work); + uassert_int_equal(err, RT_EOK); + status = rt_work_get_status(&block_work); + uassert_true(status & RT_WORK_STATUS_DONE); + + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_work_submit(&work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_work_cancel(&work); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(20)); + uassert_int_equal(work_flag, 0); + + /* Phase: system cancel sync on delayed work should report canceled. */ + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_work_submit(&work, rt_tick_from_millisecond(30)); + uassert_int_equal(err, RT_EOK); + err = rt_work_cancel_sync(&work); + uassert_int_equal(err, RT_EOK); + status = rt_work_get_status(&work); + uassert_true(status & RT_WORK_STATUS_CANCELED); + + rt_thread_delay(rt_tick_from_millisecond(40)); + uassert_int_equal(work_flag, 0); + + rt_sem_detach(&block_ctx.allow_finish); + rt_sem_detach(&block_ctx.started); +} +#endif /* RT_USING_SYSTEM_WORKQUEUE */ + +void workqueue_basic_testcase(void) +{ + /* General work queue test */ + UTEST_UNIT_RUN(test_workqueue_submit_immediate); + /* Invalid tick submit should return error (non-assert builds) */ + UTEST_UNIT_RUN(test_workqueue_submit_invalid_tick); + /* Workqueue ordering (FIFO + urgent) */ + UTEST_UNIT_RUN(test_workqueue_ordering); + /* Delayed work order + reschedule test */ + UTEST_UNIT_RUN(test_workqueue_delayed_order); + /* Same timeout delayed work keeps FIFO order */ + UTEST_UNIT_RUN(test_workqueue_delayed_same_timeout_fifo); + /* Urgent on delayed work should run immediately */ + UTEST_UNIT_RUN(test_workqueue_urgent_delayed); + /* Cross-type reschedule (delay <-> immediate) */ + UTEST_UNIT_RUN(test_workqueue_reschedule_switch); + /* Delayed-head reschedule should refresh worker timeout */ + UTEST_UNIT_RUN(test_workqueue_reschedule_delayed_head_refresh); +#ifdef RT_USING_SYSTEM_WORKQUEUE + /* System workqueue APIs test */ + UTEST_UNIT_RUN(test_workqueue_sys_apis); +#endif /* RT_USING_SYSTEM_WORKQUEUE */ + /* Multiple submissions of the same work prior to implementation */ + UTEST_UNIT_RUN(test_workqueue_repeat_submit); + /* Submitting the same task multiple times in a mission */ + UTEST_UNIT_RUN(test_workqueue_repeat_requeue_limit); + /* Multiple submissions of the same delayed task before execution */ + UTEST_UNIT_RUN(test_workqueue_repeat_delay); + /* Work status query API */ + UTEST_UNIT_RUN(test_workqueue_state_query); +} + +static rt_err_t utest_tc_init(void) +{ + return RT_EOK; +} + +static rt_err_t utest_tc_cleanup(void) +{ + return RT_EOK; +} + +static void testcase(void) +{ + workqueue_basic_testcase(); + workqueue_cancel_testcase(); + workqueue_destroy_testcase(); +} +/* Workqueue testcase entry. */ +UTEST_TC_EXPORT(testcase, "components.drivers.ipc.workqueue_tc", utest_tc_init, utest_tc_cleanup, 300); + +#endif /* RT_USING_DEVICE_IPC */ diff --git a/components/drivers/ipc/utest/workqueue_tc_cancel.c b/components/drivers/ipc/utest/workqueue_tc_cancel.c new file mode 100644 index 00000000000..3c3edbf25f2 --- /dev/null +++ b/components/drivers/ipc/utest/workqueue_tc_cancel.c @@ -0,0 +1,1506 @@ +/* + * Copyright (c) 2006-2026, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-02-06 tyx first commit + * 2024-12-31 rbb666 Adding Test Cases + * 2025-11-16 ChuanN-sudo add standardized utest documentation block + * 2026-03-20 RyanCW(Codex) split workqueue utest and cover cancel/sync paths + */ + +/** + * Test Case Name: IPC Workqueue Test + * + * Test Objectives: + * - Validate cancel, cancel sync, and cancel-all behaviors with expected return codes. + * - Verify cancel sync waits for running work and blocks requeue attempts. + * - Verify cross-queue operations are rejected. + * - Test core APIs: rt_workqueue_cancel_work(), rt_workqueue_cancel_work_sync(), + * rt_workqueue_cancel_all_work(). + * + * Test Scenarios: + * - Cancel pending and delayed work items. + * - Cancel sync waits for running work and rejects requeue attempts. + * - Cross-queue operations are rejected for safety. + * - Cancel-all keeps running work and drops queued items. + * + * Verification Metrics: + * - Pending/delayed cancels prevent execution. + * - Cancel sync waits for running work and returns busy on invalid waiters. + * - Requeue attempts during cancel sync are rejected. + * - Cancel-all keeps running work and removes queued work. + * - Cross-queue operations return busy. + * + * Dependencies: + * - Hardware requirements: QEMU emulator or any hardware platform that supports RT-Thread. + * - Software configuration: + * - RT_USING_UTEST must be enabled (select "RT-Thread Utestcases" in menuconfig). + * - RT_UTEST_WORKQUEUE must be enabled + * (enable via: RT-Thread Utestcases -> Kernel Components -> Drivers -> IPC Test -> IPC Workqueue Test). + * - Environmental Assumptions: System scheduler working normally. + * + * Expected Results: + * - Final output: "[ PASSED ] [ result ] testcase (components.drivers.ipc.workqueue_tc)" + * - No memory leaks or race condition detections in logs. + * - No assertions triggered during test execution. + */ +/* Workqueue cancel/sync tests: cancel, cancel_sync, cancel_all, cross-queue. */ +#include "rtthread.h" +#include "rtdevice.h" +#include "utest.h" +#include "rtatomic.h" + +#ifdef RT_USING_DEVICE_IPC + +/* Testcase entry is defined in workqueue_tc_basic.c. */ +extern rt_uint8_t wq_get_test_thread_priority(rt_int8_t pos); + +struct wq_sync_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore started; + struct rt_semaphore allow_finish; + volatile int phase; + volatile int count; + volatile int released; + volatile int submit_ret; + volatile int run_count; + volatile int done; +}; + +struct wq_done_ctx +{ + struct rt_semaphore done; + volatile int flag; +}; + +struct wq_requeue_early_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore started; + struct rt_semaphore allow_finish; + volatile int run_count; + volatile rt_err_t submit_ret; +}; + +struct wq_release_ctx +{ + struct rt_semaphore *sem; + rt_tick_t delay; +}; + +struct wq_cancel_wait_ctx +{ + struct rt_workqueue *queue; + struct rt_work *work; + struct rt_semaphore started; + struct rt_semaphore done; + volatile rt_err_t err; +}; + +struct wq_cancel_all_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore started; + struct rt_semaphore allow_finish; + struct rt_semaphore done; + volatile rt_err_t submit_ret1; + volatile rt_err_t submit_ret2; +}; + +struct wq_self_cancel_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore done; + volatile rt_err_t err; +}; + +struct wq_stress_item; + +struct wq_stress_group +{ + struct rt_workqueue *queue; + rt_atomic_t submit_cnt[8]; + rt_atomic_t exec_cnt[8]; + struct wq_stress_item *items; +}; + +struct wq_stress_item +{ + struct rt_work work; + struct wq_stress_group *group; + rt_uint8_t index; +}; + +struct wq_stress_thread_ctx +{ + struct wq_stress_group *group; + struct rt_semaphore done; + rt_uint32_t seed; +}; + +static rt_bool_t wq_timeout_shifted_later(rt_tick_t new_tick, + rt_tick_t old_tick, + rt_tick_t min_delta) +{ + rt_tick_t delta = new_tick - old_tick; + + return (delta < RT_TICK_MAX / 2) && (delta >= min_delta); +} + +static void wq_sync_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_sync_ctx *ctx = (struct wq_sync_ctx *)work_data; + + if (ctx->phase == 1) + { + /* Block here to simulate a running work. */ + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); + } + + ctx->count += 1; + ctx->done = 1; +} + +static void wq_release_entry(void *parameter) +{ + struct wq_sync_ctx *ctx = (struct wq_sync_ctx *)parameter; + + rt_thread_delay(rt_tick_from_millisecond(20)); + /* Release blocked work. */ + ctx->released = 1; + rt_sem_release(&ctx->allow_finish); +} + +static void wq_requeue_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_sync_ctx *ctx = (struct wq_sync_ctx *)work_data; + + ctx->run_count += 1; + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); + /* Requeue in callback, should be rejected during cancel_sync. */ + ctx->submit_ret = rt_workqueue_submit_work(ctx->queue, work, 0); +} + +static void wq_inc_work_fun(struct rt_work *work, void *work_data) +{ + *((int *)work_data) += 1; +} + +static void wq_delay_flag_work_fun(struct rt_work *work, void *work_data) +{ + rt_thread_delay(rt_tick_from_millisecond(10)); + *((int *)work_data) = 1; +} + +static void wq_done_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_done_ctx *ctx = (struct wq_done_ctx *)work_data; + + ctx->flag = 1; + rt_sem_release(&ctx->done); +} + +static void wq_release_sem_entry(void *parameter) +{ + struct wq_release_ctx *ctx = (struct wq_release_ctx *)parameter; + + rt_thread_delay(ctx->delay); + rt_sem_release(ctx->sem); +} + +static void wq_cancel_wait_entry(void *parameter) +{ + struct wq_cancel_wait_ctx *ctx = (struct wq_cancel_wait_ctx *)parameter; + + rt_sem_release(&ctx->started); + ctx->err = rt_workqueue_cancel_work_sync(ctx->queue, ctx->work); + rt_sem_release(&ctx->done); +} + +static void wq_cancel_sync_self_fun(struct rt_work *work, void *work_data) +{ + struct wq_self_cancel_ctx *ctx = (struct wq_self_cancel_ctx *)work_data; + + ctx->err = rt_workqueue_cancel_work_sync(ctx->queue, work); + rt_sem_release(&ctx->done); +} + +static void wq_cancel_all_requeue_fun(struct rt_work *work, void *work_data) +{ + struct wq_cancel_all_ctx *ctx = (struct wq_cancel_all_ctx *)work_data; + + /* Requeue early before cancel_sync sets canceling. */ + ctx->submit_ret1 = rt_workqueue_submit_work(ctx->queue, work, 0); + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); + /* Requeue after cancel_all while cancel_sync is waiting. */ + ctx->submit_ret2 = rt_workqueue_submit_work(ctx->queue, work, 0); + rt_sem_release(&ctx->done); +} + +static rt_uint32_t wq_lcg_next(rt_uint32_t *seed) +{ + *seed = (*seed * 1103515245u) + 12345u; + return *seed; +} + +static void wq_stress_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_stress_item *item = (struct wq_stress_item *)work_data; + + rt_atomic_add(&item->group->exec_cnt[item->index], 1); +} + +static void wq_stress_thread_entry(void *parameter) +{ + struct wq_stress_thread_ctx *ctx = (struct wq_stress_thread_ctx *)parameter; + struct wq_stress_group *group = ctx->group; + rt_uint32_t seed = ctx->seed; + rt_int32_t i; + + for (i = 0; i < 80; i++) + { + rt_uint32_t rand = wq_lcg_next(&seed); + rt_uint32_t idx = rand % 8; + rt_uint32_t op = (rand >> 8) % 3; + rt_err_t err; + + if (op == 0) + { + err = rt_workqueue_submit_work(group->queue, &group->items[idx].work, 0); + if (err == RT_EOK) + { + rt_atomic_add(&group->submit_cnt[idx], 1); + } + } + else if (op == 1) + { + rt_tick_t ticks = rt_tick_from_millisecond(5 * (1 + (wq_lcg_next(&seed) % 3))); + + err = rt_workqueue_submit_work(group->queue, &group->items[idx].work, ticks); + if (err == RT_EOK) + { + rt_atomic_add(&group->submit_cnt[idx], 1); + } + } + else + { + (void)rt_workqueue_cancel_work(group->queue, &group->items[idx].work); + } + + if ((i & 0x7) == 0) + { + rt_thread_delay(rt_tick_from_millisecond(5)); + } + } + + ctx->seed = seed; + rt_sem_release(&ctx->done); +} + +static void wq_requeue_early_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_requeue_early_ctx *ctx = (struct wq_requeue_early_ctx *)work_data; + + ctx->run_count += 1; + ctx->submit_ret = rt_workqueue_submit_work(ctx->queue, work, 0); + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); +} + +static void test_workqueue_cancel_sync_pending(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx ctx; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + ctx.count = 0; + ctx.phase = 0; + ctx.done = 0; + + /* Phase: cancel sync on pending work. */ + rt_work_init(&work, wq_sync_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(ctx.count, 0); + + /* Phase: cancel sync on delayed pending work. */ + ctx.count = 0; + ctx.phase = 0; + ctx.done = 0; + rt_work_init(&work, wq_sync_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(25)); + uassert_int_equal(ctx.count, 0); + + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_pending_scheduler_unavailable_ok(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + rt_base_t critical_level; + volatile int work_flag = 0; + rt_err_t err; + + /* White-box regression: pending cancel_sync should keep the no-wait fast path. */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + critical_level = rt_enter_critical(); + err = rt_workqueue_cancel_work_sync(queue, &work); + rt_exit_critical_safe(critical_level); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(work_flag, 0); + + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_cancel_sync_scheduler_unavailable_busy(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + rt_base_t level; + rt_base_t critical_level; + rt_uint8_t queue_flags; + rt_uint16_t work_flags; + volatile int work_flag = 0; + rt_err_t err; + + /* White-box regression: running cancel_sync must reject unavailable scheduler. */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue->work_current = &work; + work.workqueue = queue; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + critical_level = rt_enter_critical(); + err = rt_workqueue_cancel_work_sync(queue, &work); + rt_exit_critical_safe(critical_level); + uassert_int_equal(err, -RT_EBUSY); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue_flags = queue->flags; + work_flags = work.flags; + queue->work_current = RT_NULL; + work.workqueue = RT_NULL; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + uassert_int_equal(queue_flags, 0); + uassert_true((work_flags & RT_WORK_STATE_CANCELING) == 0); + uassert_int_equal(work_flag, 0); + + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_cancel_sync_reuse_completion(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx ctx; + rt_thread_t release_thread; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.started, "wqrs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqrf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + + ctx.phase = 0; + ctx.count = 0; + ctx.released = 0; + ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(ctx.count, 1); + + ctx.phase = 1; + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + release_thread = rt_thread_create("wqrel", wq_release_entry, &ctx, + 1024, curr_priority, 10); + uassert_not_null(release_thread); + if (release_thread == RT_NULL) + { + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(release_thread); + + /* Phase: cancel sync reuse completion. */ + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.released, 1); + uassert_int_equal(ctx.count, 2); + + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_requeue(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx ctx; + rt_thread_t release_thread; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.started, "wqrc", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqrf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.queue = queue; + ctx.submit_ret = 0; + ctx.run_count = 0; + + /* Phase: requeue in callback should be rejected during cancel_sync. */ + rt_work_init(&work, wq_requeue_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + release_thread = rt_thread_create("wqrrel", wq_release_entry, &ctx, + 1024, curr_priority, 10); + uassert_not_null(release_thread); + if (release_thread == RT_NULL) + { + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(release_thread); + + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(ctx.run_count, 1); + uassert_int_equal(ctx.submit_ret, -RT_EBUSY); + + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_requeue_early(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_requeue_early_ctx ctx; + struct wq_release_ctx release_ctx; + rt_thread_t release_thread; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.started, "wqes", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqef", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.queue = queue; + ctx.run_count = 0; + ctx.submit_ret = -RT_ERROR; + + /* Phase: requeue early before cancel_sync blocks, should be removed later. */ + rt_work_init(&work, wq_requeue_early_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + release_ctx.sem = &ctx.allow_finish; + release_ctx.delay = rt_tick_from_millisecond(20); + release_thread = rt_thread_create("wqerl", wq_release_sem_entry, &release_ctx, + 1024, curr_priority, 10); + uassert_not_null(release_thread); + if (release_thread == RT_NULL) + { + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(release_thread); + + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.submit_ret, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(ctx.run_count, 1); + + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_double_waiter_busy(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx run_ctx; + struct wq_cancel_wait_ctx wait_ctx1; + struct wq_cancel_wait_ctx wait_ctx2; + rt_thread_t thread1; + rt_thread_t thread2; + rt_base_t level; + rt_bool_t sync_waiting = RT_FALSE; + rt_err_t err; + int tries; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&run_ctx.started, "wqcs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&run_ctx.allow_finish, "wqcf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + run_ctx.phase = 1; + run_ctx.count = 0; + run_ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&run_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&run_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_init(&wait_ctx1.started, "wqc1s", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&wait_ctx1.done, "wqc1d", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + wait_ctx1.queue = queue; + wait_ctx1.work = &work; + wait_ctx1.err = RT_EOK; + + err = rt_sem_init(&wait_ctx2.started, "wqc2s", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&wait_ctx2.done, "wqc2d", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + wait_ctx2.queue = queue; + wait_ctx2.work = &work; + wait_ctx2.err = RT_EOK; + + thread1 = rt_thread_create("wqc1", wq_cancel_wait_entry, &wait_ctx1, + 1024, curr_priority, 10); + uassert_not_null(thread1); + if (thread1 == RT_NULL) + { + rt_sem_detach(&wait_ctx2.done); + rt_sem_detach(&wait_ctx2.started); + rt_sem_detach(&wait_ctx1.done); + rt_sem_detach(&wait_ctx1.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(thread1); + + err = rt_sem_take(&wait_ctx1.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + /* Ensure the first waiter has entered cancel_sync. */ + /* Wait until cancel_sync has registered the sync waiter. */ + for (tries = 0; tries < 10; tries++) + { + level = rt_spin_lock_irqsave(&(queue->spinlock)); + sync_waiting = (queue->flags & RT_WORKQUEUE_FLAG_SYNC_WAITING) ? RT_TRUE : RT_FALSE; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (sync_waiting) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(sync_waiting); + if (!sync_waiting) + { + rt_sem_release(&run_ctx.allow_finish); + rt_sem_take(&wait_ctx1.done, rt_tick_from_millisecond(200)); + rt_sem_detach(&wait_ctx2.done); + rt_sem_detach(&wait_ctx2.started); + rt_sem_detach(&wait_ctx1.done); + rt_sem_detach(&wait_ctx1.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + + /* Phase: second waiter should return busy. */ + thread2 = rt_thread_create("wqc2", wq_cancel_wait_entry, &wait_ctx2, + 1024, curr_priority, 10); + uassert_not_null(thread2); + if (thread2 == RT_NULL) + { + rt_sem_release(&run_ctx.allow_finish); + rt_sem_take(&wait_ctx1.done, rt_tick_from_millisecond(200)); + rt_sem_detach(&wait_ctx2.done); + rt_sem_detach(&wait_ctx2.started); + rt_sem_detach(&wait_ctx1.done); + rt_sem_detach(&wait_ctx1.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(thread2); + + err = rt_sem_take(&wait_ctx2.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(wait_ctx2.err, -RT_EBUSY); + + rt_sem_release(&run_ctx.allow_finish); + err = rt_sem_take(&wait_ctx1.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(wait_ctx1.err, RT_EOK); + + rt_sem_detach(&wait_ctx2.done); + rt_sem_detach(&wait_ctx2.started); + rt_sem_detach(&wait_ctx1.done); + rt_sem_detach(&wait_ctx1.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_in_callback_busy(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_self_cancel_ctx ctx; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqcb", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.queue = queue; + ctx.err = RT_EOK; + + /* Phase: cancel sync in callback should return busy (avoid deadlock). */ + rt_work_init(&work, wq_cancel_sync_self_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.err, -RT_EBUSY); + + rt_sem_detach(&ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_status_canceling(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx run_ctx; + struct wq_cancel_wait_ctx wait_ctx; + rt_thread_t cancel_thread; + rt_base_t level; + rt_bool_t canceling = RT_FALSE; + rt_uint16_t status; + rt_err_t err; + int tries; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&run_ctx.started, "wqss", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&run_ctx.allow_finish, "wqsf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + run_ctx.phase = 1; + run_ctx.count = 0; + run_ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&run_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&run_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_init(&wait_ctx.started, "wqcs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&wait_ctx.done, "wqcd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + wait_ctx.queue = queue; + wait_ctx.work = &work; + wait_ctx.err = RT_EOK; + + cancel_thread = rt_thread_create("wqst", wq_cancel_wait_entry, &wait_ctx, + 1024, curr_priority, 10); + uassert_not_null(cancel_thread); + if (cancel_thread == RT_NULL) + { + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(cancel_thread); + + err = rt_sem_take(&wait_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + level = rt_spin_lock_irqsave(&(queue->spinlock)); + canceling = (work.flags & RT_WORK_STATE_CANCELING) ? RT_TRUE : RT_FALSE; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (canceling) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(canceling); + + status = rt_workqueue_get_work_status(queue, &work); + uassert_true(status & RT_WORK_STATUS_CANCELING); + uassert_true(status & RT_WORK_STATUS_RUNNING); + + rt_sem_release(&run_ctx.allow_finish); + err = rt_sem_take(&wait_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(wait_ctx.err, RT_EOK); + + status = rt_workqueue_get_work_status(queue, &work); + uassert_true((status & RT_WORK_STATUS_CANCELING) == 0); + uassert_true(status & RT_WORK_STATUS_DONE); + + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_all_keep_canceling(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_cancel_all_ctx ctx; + struct wq_cancel_wait_ctx wait_ctx; + rt_thread_t cancel_thread; + rt_base_t level; + rt_bool_t sync_waiting = RT_FALSE; + rt_err_t err; + int tries; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.started, "wqcs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqcf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.done, "wqcd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.queue = queue; + ctx.submit_ret1 = -RT_ERROR; + ctx.submit_ret2 = -RT_ERROR; + + rt_work_init(&work, wq_cancel_all_requeue_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_init(&wait_ctx.started, "wqws", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&wait_ctx.done, "wqwd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + wait_ctx.queue = queue; + wait_ctx.work = &work; + wait_ctx.err = RT_EOK; + + cancel_thread = rt_thread_create("wqwc", wq_cancel_wait_entry, &wait_ctx, + 1024, curr_priority, 10); + uassert_not_null(cancel_thread); + if (cancel_thread == RT_NULL) + { + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&ctx.done); + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(cancel_thread); + + err = rt_sem_take(&wait_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + level = rt_spin_lock_irqsave(&(queue->spinlock)); + sync_waiting = (queue->flags & RT_WORKQUEUE_FLAG_SYNC_WAITING) ? RT_TRUE : RT_FALSE; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (sync_waiting) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(sync_waiting); + + /* Phase: cancel_all should not clear canceling state. */ + err = rt_workqueue_cancel_all_work(queue); + uassert_int_equal(err, RT_EOK); + + rt_sem_release(&ctx.allow_finish); + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&wait_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(wait_ctx.err, RT_EOK); + uassert_int_equal(ctx.submit_ret1, RT_EOK); + uassert_int_equal(ctx.submit_ret2, -RT_EBUSY); + + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&ctx.done); + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_stress_submit_cancel(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct wq_stress_group group; + struct wq_stress_item items[8]; + struct wq_stress_thread_ctx ctx1; + struct wq_stress_thread_ctx ctx2; + rt_thread_t thread1; + rt_thread_t thread2; + rt_err_t err; + rt_int32_t i; + rt_int32_t total_submit = 0; + rt_int32_t total_exec = 0; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_memset(&group, 0, sizeof(group)); + group.queue = queue; + group.items = items; + + for (i = 0; i < 8; i++) + { + rt_atomic_store(&group.submit_cnt[i], 0); + rt_atomic_store(&group.exec_cnt[i], 0); + items[i].group = &group; + items[i].index = (rt_uint8_t)i; + rt_work_init(&items[i].work, wq_stress_work_fun, (void *)&items[i]); + } + + err = rt_sem_init(&ctx1.done, "wqs1", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx2.done, "wqs2", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx1.group = &group; + ctx2.group = &group; + ctx1.seed = 0x12345678u; + ctx2.seed = 0x87654321u; + + thread1 = rt_thread_create("wqs1", wq_stress_thread_entry, &ctx1, + 1024, wq_get_test_thread_priority(1), 10); + uassert_not_null(thread1); + if (thread1 == RT_NULL) + { + rt_sem_detach(&ctx2.done); + rt_sem_detach(&ctx1.done); + rt_workqueue_destroy(queue); + return; + } + thread2 = rt_thread_create("wqs2", wq_stress_thread_entry, &ctx2, + 1024, wq_get_test_thread_priority(1), 10); + uassert_not_null(thread2); + if (thread2 == RT_NULL) + { + rt_thread_delete(thread1); + rt_sem_detach(&ctx2.done); + rt_sem_detach(&ctx1.done); + rt_workqueue_destroy(queue); + return; + } + + rt_thread_startup(thread1); + rt_thread_startup(thread2); + + err = rt_sem_take(&ctx1.done, rt_tick_from_millisecond(500)); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx2.done, rt_tick_from_millisecond(500)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); + + for (i = 0; i < 8; i++) + { + rt_atomic_t submits = rt_atomic_load(&group.submit_cnt[i]); + rt_atomic_t execs = rt_atomic_load(&group.exec_cnt[i]); + + uassert_true(execs <= submits); + total_submit += submits; + total_exec += execs; + } + + uassert_true(total_submit > 0); + uassert_true(total_exec <= total_submit); + + rt_sem_detach(&ctx2.done); + rt_sem_detach(&ctx1.done); +} + +static void test_workqueue_cross_queue_running_reject(void) +{ + struct rt_workqueue *queue1; + struct rt_workqueue *queue2; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx ctx; + volatile int work_flag = 0; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue1 = rt_workqueue_create("test1", 2048, curr_priority); + uassert_not_null(queue1); + if (queue1 == RT_NULL) + { + return; + } + queue2 = rt_workqueue_create("test2", 2048, curr_priority); + uassert_not_null(queue2); + if (queue2 == RT_NULL) + { + rt_workqueue_destroy(queue1); + return; + } + + err = rt_sem_init(&ctx.started, "wqrs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqrf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.phase = 1; + ctx.count = 0; + ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&ctx); + err = rt_workqueue_submit_work(queue1, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + /* Phase: reject cross-queue ops while running. */ + err = rt_workqueue_submit_work(queue2, &work, 0); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_urgent_work(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_cancel_work(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_cancel_work_sync(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + + rt_sem_release(&ctx.allow_finish); + rt_thread_delay(rt_tick_from_millisecond(10)); + uassert_int_equal(ctx.count, 1); + + /* Phase: reject cross-queue ops while pending/delayed. */ + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue1, &work, rt_tick_from_millisecond(50)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_submit_work(queue2, &work, 0); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_urgent_work(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_cancel_work(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_cancel_work_sync(queue2, &work); + uassert_int_equal(err, -RT_EBUSY); + + err = rt_workqueue_cancel_work(queue1, &work); + uassert_int_equal(err, RT_EOK); + + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy_sync(queue1); + rt_workqueue_destroy(queue2); +} + +static void test_workqueue_cancel_pending(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_uint16_t status; + rt_err_t err; + + /* 1 lower priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + /* Cancel the work before it is executed */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + /* Phase: cancel pending work. */ + err = rt_workqueue_cancel_work(queue, &work); + uassert_int_equal(err, RT_EOK); + status = rt_workqueue_get_work_status(queue, &work); + uassert_true(status & RT_WORK_STATUS_CANCELED); + + rt_thread_delay(rt_tick_from_millisecond(5)); + uassert_int_equal(work_flag, 0); + + /* Phase: cancel delayed pending work. */ + work_flag = 0; + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(10)); + err = rt_workqueue_cancel_work(queue, &work); + uassert_int_equal(err, RT_EOK); + status = rt_workqueue_get_work_status(queue, &work); + uassert_true(status & RT_WORK_STATUS_CANCELED); + + rt_thread_delay(rt_tick_from_millisecond(15)); + uassert_int_equal(work_flag, 0); + + rt_thread_delay(rt_tick_from_millisecond(100)); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_delayed_head_refresh(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work_head; + struct rt_work work_next; + volatile int work_flag1 = 0; + volatile int work_flag2 = 0; + rt_tick_t old_timeout_tick = 0; + rt_tick_t new_timeout_tick = 0; + rt_err_t err; + int tries; + rt_bool_t refreshed = RT_FALSE; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work_head, wq_inc_work_fun, (void *)&work_flag1); + rt_work_init(&work_next, wq_inc_work_fun, (void *)&work_flag2); + + err = rt_workqueue_submit_work(queue, &work_head, rt_tick_from_millisecond(60)); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work_next, rt_tick_from_millisecond(140)); + uassert_int_equal(err, RT_EOK); + + /* White-box regression: canceling delayed head should refresh the worker timeout. */ + for (tries = 0; tries < 10; tries++) + { + old_timeout_tick = queue->work_thread->thread_timer.timeout_tick; + if ((old_timeout_tick - rt_tick_get()) < RT_TICK_MAX / 2 && + old_timeout_tick != 0) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(old_timeout_tick != 0); + + err = rt_workqueue_cancel_work(queue, &work_head); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + new_timeout_tick = queue->work_thread->thread_timer.timeout_tick; + if (wq_timeout_shifted_later(new_timeout_tick, + old_timeout_tick, + rt_tick_from_millisecond(40))) + { + refreshed = RT_TRUE; + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(refreshed); + uassert_int_equal(work_flag1, 0); + uassert_int_equal(work_flag2, 0); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_cancel_after_done(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_done_ctx ctx; + rt_err_t err; + + /* 1 higher priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.done, "wqcd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.flag = 0; + + rt_work_init(&work, wq_done_work_fun, (void *)&ctx); + /* Phase: cancel after work done. */ + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx.flag, 1); + /* Ensure the workqueue thread finishes cleanup before cancel. */ + rt_thread_delay(rt_tick_from_millisecond(5)); + + err = rt_workqueue_cancel_work(queue, &work); + uassert_int_equal(err, RT_EOK); + + /* Phase: cancel sync after completion should also return success. */ + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + rt_sem_detach(&ctx.done); + rt_thread_delay(rt_tick_from_millisecond(100)); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_sync_running_waits_done(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_err_t err; + + /* 1 lower priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + rt_work_init(&work, wq_delay_flag_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(5)); + /* Phase: cancel should report busy while running. */ + err = rt_workqueue_cancel_work(queue, &work); + uassert_int_equal(err, -RT_EBUSY); + /* Phase: cancel sync waits for running work. */ + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + uassert_int_equal(work_flag, 1); + + rt_thread_delay(rt_tick_from_millisecond(100)); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_cancel_not_queued(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + volatile int work_flag = 0; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + /* Phase: cancel when not queued. */ + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_cancel_work(queue, &work); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(work_flag, 0); + + err = rt_workqueue_cancel_work_sync(queue, &work); + uassert_int_equal(err, RT_EOK); + + rt_workqueue_destroy(queue); +} +static void test_workqueue_cancel_all_keep_running(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work_running; + struct rt_work work_pending; + struct rt_work work_delayed; + struct wq_sync_ctx ctx; + volatile int work_flag = 0; + rt_err_t err; + + /* 1 higher priority than the current test thread */ + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&ctx.started, "wqca", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&ctx.allow_finish, "wqcf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + ctx.phase = 1; + ctx.count = 0; + ctx.done = 0; + + rt_work_init(&work_running, wq_sync_work_fun, (void *)&ctx); + rt_work_init(&work_pending, wq_inc_work_fun, (void *)&work_flag); + rt_work_init(&work_delayed, wq_inc_work_fun, (void *)&work_flag); + + err = rt_workqueue_submit_work(queue, &work_running, 0); + uassert_int_equal(err, RT_EOK); + err = rt_sem_take(&ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_submit_work(queue, &work_pending, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work_delayed, rt_tick_from_millisecond(20)); + uassert_int_equal(err, RT_EOK); + + /* Phase: cancel all keeps running work. */ + err = rt_workqueue_cancel_all_work(queue); + uassert_int_equal(err, RT_EOK); + + rt_sem_release(&ctx.allow_finish); + rt_thread_delay(rt_tick_from_millisecond(10)); + + uassert_int_equal(ctx.count, 1); + uassert_int_equal(work_flag, 0); + + rt_sem_detach(&ctx.allow_finish); + rt_sem_detach(&ctx.started); + rt_workqueue_destroy(queue); + + /* Phase: cancel all on delayed-only list. */ + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + work_flag = 0; + rt_work_init(&work_delayed, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work_delayed, rt_tick_from_millisecond(30)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_cancel_all_work(queue); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(40)); + uassert_int_equal(work_flag, 0); + + rt_workqueue_destroy(queue); +} + +void workqueue_cancel_testcase(void) +{ + /* Cancel sync on pending work (immediate + delayed) */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_pending); + /* Pending cancel_sync keeps the no-wait fast path even in critical section */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_pending_scheduler_unavailable_ok); + /* Running cancel_sync returns busy when scheduler is unavailable */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_scheduler_unavailable_busy); + /* Phase: cancel sync reuse completion. */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_reuse_completion); + /* Cancel sync with requeue attempt */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_requeue); + /* Cancel sync with early requeue attempt */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_requeue_early); + /* Cancel sync double waiters, second returns busy */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_double_waiter_busy); + /* Cancel sync inside callback should return busy */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_in_callback_busy); + /* Status query should report canceling while cancel_sync waits */ + UTEST_UNIT_RUN(test_workqueue_status_canceling); + /* Cancel all should not clear canceling state */ + UTEST_UNIT_RUN(test_workqueue_cancel_all_keep_canceling); + /* Stress submit/cancel with fixed seed */ + UTEST_UNIT_RUN(test_workqueue_stress_submit_cancel); + /* Reject cross-queue operations (running + pending/delayed) */ + UTEST_UNIT_RUN(test_workqueue_cross_queue_running_reject); + /* Cancel pending/delayed work */ + UTEST_UNIT_RUN(test_workqueue_cancel_pending); + /* Canceling delayed head should refresh worker timeout */ + UTEST_UNIT_RUN(test_workqueue_cancel_delayed_head_refresh); + /* Cancel after work completion */ + UTEST_UNIT_RUN(test_workqueue_cancel_after_done); + /* Cancel running work (busy) + cancel sync waits done */ + UTEST_UNIT_RUN(test_workqueue_cancel_sync_running_waits_done); + /* Cancel work when not queued (cancel + cancel sync) */ + UTEST_UNIT_RUN(test_workqueue_cancel_not_queued); + /* Cancel all should keep running work */ + UTEST_UNIT_RUN(test_workqueue_cancel_all_keep_running); +} + +#endif /* RT_USING_DEVICE_IPC */ diff --git a/components/drivers/ipc/utest/workqueue_tc_destroy.c b/components/drivers/ipc/utest/workqueue_tc_destroy.c new file mode 100644 index 00000000000..8c5eae49b34 --- /dev/null +++ b/components/drivers/ipc/utest/workqueue_tc_destroy.c @@ -0,0 +1,901 @@ +/* + * Copyright (c) 2006-2026, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2026-03-20 RyanCW(Codex) split workqueue utest, cover destroy APIs and add recreate stress + */ + +/** + * Test Case Name: IPC Workqueue Test + * + * Test Objectives: + * - Validate destroy and destroy-sync behaviors with expected return codes. + * - Verify destroy-sync waits for running work and drops queued work. + * - Verify destroying flag rejects new submissions and urgent work. + * - Stress repeated concurrent create/destroy_sync lifecycle paths. + * - Test core APIs: rt_workqueue_destroy(), rt_workqueue_destroy_sync(). + * + * Test Scenarios: + * - Destroy sync waits for running work to finish. + * - Destroy returns busy when sync waiter or destroying flag exists. + * - Destroying queue rejects new submit and urgent requests. + * - Destroy from workqueue thread returns busy. + * - Multiple workqueues run repeated create/destroy_sync pressure loops. + * + * Verification Metrics: + * - Destroy-sync waits for running work to finish. + * - Destroy returns busy when sync waiter or destroying flag exists. + * - Destroying queue rejects submit/urgent/cancel sync. + * - Destroy called in workqueue thread returns busy. + * - Pending and delayed works are dropped during destroy-sync. + * - Concurrent queue recreate/destroy_sync loops stay stable. + * + * Dependencies: + * - Hardware requirements: QEMU emulator or any hardware platform that supports RT-Thread. + * - Software configuration: + * - RT_USING_UTEST must be enabled (select "RT-Thread Utestcases" in menuconfig). + * - RT_UTEST_WORKQUEUE must be enabled + * (enable via: RT-Thread Utestcases -> Kernel Components -> Drivers -> IPC Test -> IPC Workqueue Test). + * - Environmental Assumptions: System scheduler working normally. + * + * Expected Results: + * - Final output: "[ PASSED ] [ result ] testcase (components.drivers.ipc.workqueue_tc)" + * - No memory leaks or race condition detections in logs. + * - No assertions triggered during test execution. + */ +/* Workqueue destroy tests: destroy, destroy_sync, destroying rejects. */ +#include "rtthread.h" +#include "rtdevice.h" +#include "utest.h" + +#ifdef RT_USING_DEVICE_IPC + +/* Testcase entry is defined in workqueue_tc_basic.c. */ +extern rt_uint8_t wq_get_test_thread_priority(rt_int8_t pos); + +struct wq_sync_ctx +{ + struct rt_semaphore started; + struct rt_semaphore allow_finish; + volatile int phase; + volatile int count; + volatile int released; + volatile int done; +}; + +struct wq_destroy_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore done; + volatile rt_err_t err; +}; + +struct wq_destroy_wait_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore started; + struct rt_semaphore done; + volatile rt_err_t err; +}; + +struct wq_cancel_wait_ctx +{ + struct rt_workqueue *queue; + struct rt_work *work; + struct rt_semaphore started; + struct rt_semaphore done; + volatile rt_err_t err; +}; + +struct wq_destroy_status_ctx +{ + struct rt_workqueue *queue; + struct rt_semaphore started; + struct rt_semaphore allow_finish; + volatile rt_err_t submit_ret; +}; + +#define WQ_DESTROY_STRESS_THREADS 4 +#define WQ_DESTROY_STRESS_LOOPS 100 + +struct wq_destroy_stress_ctx +{ + struct rt_semaphore done; + char thread_name[8]; + char queue_name[8]; + volatile rt_bool_t started; + volatile rt_int32_t run_count; + volatile rt_int32_t delayed_count; + volatile rt_int32_t loops_done; + volatile rt_err_t err; +}; + +static void wq_sync_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_sync_ctx *ctx = (struct wq_sync_ctx *)work_data; + + if (ctx->phase == 1) + { + /* Block here to simulate a running work. */ + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); + } + + ctx->count += 1; + ctx->done = 1; +} + +static void wq_release_entry(void *parameter) +{ + struct wq_sync_ctx *ctx = (struct wq_sync_ctx *)parameter; + + rt_thread_delay(rt_tick_from_millisecond(20)); + /* Release blocked work. */ + ctx->released = 1; + rt_sem_release(&ctx->allow_finish); +} + +static void wq_destroy_sync_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_destroy_ctx *ctx = (struct wq_destroy_ctx *)work_data; + + ctx->err = rt_workqueue_destroy_sync(ctx->queue); + rt_sem_release(&ctx->done); +} + +static void wq_destroy_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_destroy_ctx *ctx = (struct wq_destroy_ctx *)work_data; + + ctx->err = rt_workqueue_destroy(ctx->queue); + rt_sem_release(&ctx->done); +} + +static void wq_destroy_wait_entry(void *parameter) +{ + struct wq_destroy_wait_ctx *ctx = (struct wq_destroy_wait_ctx *)parameter; + + rt_sem_release(&ctx->started); + ctx->err = rt_workqueue_destroy_sync(ctx->queue); + rt_sem_release(&ctx->done); +} + +static void wq_cancel_wait_entry(void *parameter) +{ + struct wq_cancel_wait_ctx *ctx = (struct wq_cancel_wait_ctx *)parameter; + + rt_sem_release(&ctx->started); + ctx->err = rt_workqueue_cancel_work_sync(ctx->queue, ctx->work); + rt_sem_release(&ctx->done); +} + +static void wq_inc_work_fun(struct rt_work *work, void *work_data) +{ + *((int *)work_data) += 1; +} + +static void wq_destroy_status_work_fun(struct rt_work *work, void *work_data) +{ + struct wq_destroy_status_ctx *ctx = (struct wq_destroy_status_ctx *)work_data; + + /* Requeue before destroy_sync marks canceling to reproduce status transition. */ + ctx->submit_ret = rt_workqueue_submit_work(ctx->queue, work, 0); + rt_sem_release(&ctx->started); + rt_sem_take(&ctx->allow_finish, RT_WAITING_FOREVER); +} + +static void wq_destroy_stress_run_fun(struct rt_work *work, void *work_data) +{ + struct wq_destroy_stress_ctx *ctx = (struct wq_destroy_stress_ctx *)work_data; + + ctx->started = RT_TRUE; + rt_thread_delay(rt_tick_from_millisecond(5)); + ctx->run_count += 1; +} + +static void wq_destroy_stress_delayed_fun(struct rt_work *work, void *work_data) +{ + struct wq_destroy_stress_ctx *ctx = (struct wq_destroy_stress_ctx *)work_data; + + ctx->delayed_count += 1; +} + +static void wq_destroy_stress_entry(void *parameter) +{ + struct wq_destroy_stress_ctx *ctx = (struct wq_destroy_stress_ctx *)parameter; + struct rt_workqueue *queue = RT_NULL; + struct rt_work work_run; + struct rt_work work_delayed; + rt_int32_t loop; + rt_int32_t tries; + rt_err_t err = RT_EOK; + + for (loop = 0; loop < WQ_DESTROY_STRESS_LOOPS; loop++) + { + ctx->started = RT_FALSE; + queue = rt_workqueue_create(ctx->queue_name, 2048, wq_get_test_thread_priority(-1)); + if (queue == RT_NULL) + { + err = -RT_ENOMEM; + break; + } + + rt_work_init(&work_run, wq_destroy_stress_run_fun, (void *)ctx); + rt_work_init(&work_delayed, wq_destroy_stress_delayed_fun, (void *)ctx); + + err = rt_workqueue_submit_work(queue, &work_delayed, rt_tick_from_millisecond(30)); + if (err != RT_EOK) + { + rt_workqueue_destroy_sync(queue); + break; + } + + err = rt_workqueue_submit_work(queue, &work_run, 0); + if (err != RT_EOK) + { + rt_workqueue_destroy_sync(queue); + break; + } + + for (tries = 0; tries < 20; tries++) + { + if (ctx->started) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + if (ctx->started == RT_FALSE) + { + err = -RT_ETIMEOUT; + rt_workqueue_destroy_sync(queue); + break; + } + + err = rt_workqueue_destroy_sync(queue); + if (err != RT_EOK) + { + break; + } + + if (ctx->run_count != loop + 1 || ctx->delayed_count != 0) + { + err = -RT_ERROR; + break; + } + + ctx->loops_done = loop + 1; + queue = RT_NULL; + } + + ctx->err = err; + rt_sem_release(&ctx->done); +} + +static void test_workqueue_destroy_sync_behaviors(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_destroy_ctx destroy_ctx; + struct wq_sync_ctx sync_ctx; + rt_thread_t release_thread; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + + /* Phase: destroy sync called inside workqueue thread should return busy. */ + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&destroy_ctx.done, "wqdb", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + destroy_ctx.queue = queue; + destroy_ctx.err = RT_EOK; + + rt_work_init(&work, wq_destroy_sync_work_fun, (void *)&destroy_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&destroy_ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(destroy_ctx.err, -RT_EBUSY); + + rt_sem_detach(&destroy_ctx.done); + rt_workqueue_destroy(queue); + + /* Phase: destroy sync should wait for running work to finish. */ + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&sync_ctx.started, "wqds", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&sync_ctx.allow_finish, "wqdf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + sync_ctx.phase = 1; + sync_ctx.count = 0; + sync_ctx.released = 0; + sync_ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&sync_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&sync_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + release_thread = rt_thread_create("wqdrel", wq_release_entry, &sync_ctx, + 1024, curr_priority, 10); + uassert_not_null(release_thread); + if (release_thread == RT_NULL) + { + rt_sem_detach(&sync_ctx.allow_finish); + rt_sem_detach(&sync_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(release_thread); + + /* Destroy sync should wait current work done. */ + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(sync_ctx.done, 1); + + rt_sem_detach(&sync_ctx.allow_finish); + rt_sem_detach(&sync_ctx.started); + + /* Phase: destroy sync with pending and delayed work. */ + { + struct rt_work work_pending; + struct rt_work work_delayed; + volatile int work_flag = 0; + volatile int delayed_flag = 0; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work_pending, wq_inc_work_fun, (void *)&work_flag); + rt_work_init(&work_delayed, wq_inc_work_fun, (void *)&delayed_flag); + + err = rt_workqueue_submit_work(queue, &work_pending, 0); + uassert_int_equal(err, RT_EOK); + err = rt_workqueue_submit_work(queue, &work_delayed, rt_tick_from_millisecond(40)); + uassert_int_equal(err, RT_EOK); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); + + rt_thread_delay(rt_tick_from_millisecond(60)); + uassert_int_equal(work_flag, 0); + uassert_int_equal(delayed_flag, 0); + } +} + +static void test_workqueue_destroy_sync_scheduler_unavailable_busy(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + rt_base_t level; + rt_base_t critical_level; + rt_uint8_t queue_flags; + rt_uint16_t work_flags; + volatile int work_flag = 0; + rt_err_t err; + + /* White-box regression: running destroy_sync must reject unavailable scheduler. */ + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + rt_work_init(&work, wq_inc_work_fun, (void *)&work_flag); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue->work_current = &work; + work.workqueue = queue; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + critical_level = rt_enter_critical(); + err = rt_workqueue_destroy_sync(queue); + rt_exit_critical_safe(critical_level); + uassert_int_equal(err, -RT_EBUSY); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue_flags = queue->flags; + work_flags = work.flags; + queue->work_current = RT_NULL; + work.workqueue = RT_NULL; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + uassert_int_equal(queue_flags, 0); + uassert_true((work_flags & RT_WORK_STATE_CANCELING) == 0); + uassert_int_equal(work_flag, 0); + + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_destroy_in_callback_busy(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_destroy_ctx destroy_ctx; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&destroy_ctx.done, "wqdi", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + destroy_ctx.queue = queue; + destroy_ctx.err = RT_EOK; + + /* Phase: destroy called inside workqueue thread should return busy. */ + rt_work_init(&work, wq_destroy_work_fun, (void *)&destroy_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&destroy_ctx.done, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(destroy_ctx.err, -RT_EBUSY); + + rt_sem_detach(&destroy_ctx.done); + rt_workqueue_destroy(queue); +} + +static void test_workqueue_destroy_busy_when_sync_waiting(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_sync_ctx run_ctx; + struct wq_cancel_wait_ctx wait_ctx; + rt_thread_t cancel_thread; + rt_base_t level; + rt_bool_t sync_waiting = RT_FALSE; + rt_err_t err; + int tries; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&run_ctx.started, "wqbs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&run_ctx.allow_finish, "wqbf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + run_ctx.phase = 1; + run_ctx.count = 0; + run_ctx.done = 0; + + rt_work_init(&work, wq_sync_work_fun, (void *)&run_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&run_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_init(&wait_ctx.started, "wqws", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&wait_ctx.done, "wqwd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + wait_ctx.queue = queue; + wait_ctx.work = &work; + wait_ctx.err = RT_EOK; + + cancel_thread = rt_thread_create("wqwc", wq_cancel_wait_entry, &wait_ctx, + 1024, curr_priority, 10); + uassert_not_null(cancel_thread); + if (cancel_thread == RT_NULL) + { + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(cancel_thread); + + err = rt_sem_take(&wait_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + level = rt_spin_lock_irqsave(&(queue->spinlock)); + sync_waiting = (queue->flags & RT_WORKQUEUE_FLAG_SYNC_WAITING) ? RT_TRUE : RT_FALSE; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (sync_waiting) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(sync_waiting); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, -RT_EBUSY); + + /* Phase: destroy while sync waiter should return busy. */ + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, -RT_EBUSY); + + rt_sem_release(&run_ctx.allow_finish); + err = rt_sem_take(&wait_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(wait_ctx.err, RT_EOK); + + err = rt_workqueue_destroy_sync(queue); + uassert_int_equal(err, RT_EOK); + + rt_sem_detach(&wait_ctx.done); + rt_sem_detach(&wait_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); +} + +static void test_workqueue_destroy_busy_when_destroying(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + rt_base_t level; + rt_err_t err; + + curr_priority = wq_get_test_thread_priority(1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + /* Phase: simulate destroying flag and ensure destroy returns busy. */ + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue->flags |= RT_WORKQUEUE_FLAG_DESTROYING; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, -RT_EBUSY); + + /* Cleanup: clear flag and destroy again. */ + level = rt_spin_lock_irqsave(&(queue->spinlock)); + queue->flags &= ~RT_WORKQUEUE_FLAG_DESTROYING; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + err = rt_workqueue_destroy(queue); + uassert_int_equal(err, RT_EOK); +} + +static void test_workqueue_destroying_reject_submit_urgent(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work_running; + struct rt_work work_submit; + struct rt_work work_urgent; + struct wq_sync_ctx run_ctx; + struct wq_destroy_wait_ctx destroy_ctx; + rt_thread_t destroy_thread; + rt_base_t level; + rt_bool_t destroying = RT_FALSE; + volatile int work_flag = 0; + volatile int work_urgent_flag = 0; + rt_err_t err; + int tries; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&run_ctx.started, "wqds", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&run_ctx.allow_finish, "wqdf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + run_ctx.phase = 1; + run_ctx.count = 0; + run_ctx.done = 0; + + rt_work_init(&work_running, wq_sync_work_fun, (void *)&run_ctx); + err = rt_workqueue_submit_work(queue, &work_running, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&run_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_init(&destroy_ctx.started, "wqdy", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&destroy_ctx.done, "wqdd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + destroy_ctx.queue = queue; + destroy_ctx.err = RT_EOK; + + destroy_thread = rt_thread_create("wqdsy", wq_destroy_wait_entry, &destroy_ctx, + 1024, curr_priority, 10); + uassert_not_null(destroy_thread); + if (destroy_thread == RT_NULL) + { + rt_sem_detach(&destroy_ctx.done); + rt_sem_detach(&destroy_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(destroy_thread); + + err = rt_sem_take(&destroy_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + /* Wait until destroy_sync marks the queue as destroying. */ + for (tries = 0; tries < 10; tries++) + { + level = rt_spin_lock_irqsave(&(queue->spinlock)); + destroying = (queue->flags & RT_WORKQUEUE_FLAG_DESTROYING) ? RT_TRUE : RT_FALSE; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (destroying) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(destroying); + + rt_work_init(&work_submit, wq_inc_work_fun, (void *)&work_flag); + err = rt_workqueue_submit_work(queue, &work_submit, 0); + uassert_int_equal(err, -RT_EBUSY); + + rt_work_init(&work_urgent, wq_inc_work_fun, (void *)&work_urgent_flag); + err = rt_workqueue_urgent_work(queue, &work_urgent); + uassert_int_equal(err, -RT_EBUSY); + err = rt_workqueue_cancel_work_sync(queue, &work_running); + uassert_int_equal(err, -RT_EBUSY); + + rt_sem_release(&run_ctx.allow_finish); + err = rt_sem_take(&destroy_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(destroy_ctx.err, RT_EOK); + uassert_int_equal(work_flag, 0); + uassert_int_equal(work_urgent_flag, 0); + + if (destroy_ctx.err != RT_EOK) + { + rt_workqueue_destroy(queue); + } + + rt_sem_detach(&destroy_ctx.done); + rt_sem_detach(&destroy_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); +} + +static void test_workqueue_destroy_sync_status_no_canceled_while_running(void) +{ + struct rt_workqueue *queue; + rt_uint8_t curr_priority; + struct rt_work work; + struct wq_destroy_status_ctx run_ctx; + struct wq_destroy_wait_ctx destroy_ctx; + rt_thread_t destroy_thread; + rt_uint16_t status = RT_WORK_STATUS_IDLE; + rt_err_t err; + int tries; + rt_bool_t canceling = RT_FALSE; + + curr_priority = wq_get_test_thread_priority(-1); + queue = rt_workqueue_create("test", 2048, curr_priority); + uassert_not_null(queue); + if (queue == RT_NULL) + { + return; + } + + err = rt_sem_init(&run_ctx.started, "wqzs", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&run_ctx.allow_finish, "wqzf", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + run_ctx.queue = queue; + run_ctx.submit_ret = -RT_ERROR; + + rt_work_init(&work, wq_destroy_status_work_fun, (void *)&run_ctx); + err = rt_workqueue_submit_work(queue, &work, 0); + uassert_int_equal(err, RT_EOK); + + err = rt_sem_take(&run_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(run_ctx.submit_ret, RT_EOK); + + err = rt_sem_init(&destroy_ctx.started, "wqzt", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + err = rt_sem_init(&destroy_ctx.done, "wqzd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + destroy_ctx.queue = queue; + destroy_ctx.err = RT_EOK; + + destroy_thread = rt_thread_create("wqzdy", wq_destroy_wait_entry, &destroy_ctx, + 1024, curr_priority, 10); + uassert_not_null(destroy_thread); + if (destroy_thread == RT_NULL) + { + rt_sem_detach(&destroy_ctx.done); + rt_sem_detach(&destroy_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); + rt_workqueue_destroy(queue); + return; + } + rt_thread_startup(destroy_thread); + + err = rt_sem_take(&destroy_ctx.started, rt_tick_from_millisecond(100)); + uassert_int_equal(err, RT_EOK); + + for (tries = 0; tries < 10; tries++) + { + status = rt_workqueue_get_work_status(queue, &work); + canceling = (status & RT_WORK_STATUS_CANCELING) ? RT_TRUE : RT_FALSE; + if (canceling) + { + break; + } + rt_thread_delay(rt_tick_from_millisecond(5)); + } + uassert_true(canceling); + uassert_true(status & RT_WORK_STATUS_RUNNING); + uassert_true((status & RT_WORK_STATUS_CANCELED) == 0); + + rt_sem_release(&run_ctx.allow_finish); + err = rt_sem_take(&destroy_ctx.done, rt_tick_from_millisecond(200)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(destroy_ctx.err, RT_EOK); + + rt_sem_detach(&destroy_ctx.done); + rt_sem_detach(&destroy_ctx.started); + rt_sem_detach(&run_ctx.allow_finish); + rt_sem_detach(&run_ctx.started); +} + +static void test_workqueue_multi_queue_recreate_stress(void) +{ + struct wq_destroy_stress_ctx ctx[WQ_DESTROY_STRESS_THREADS]; + rt_thread_t threads[WQ_DESTROY_STRESS_THREADS]; + rt_uint8_t curr_priority; + rt_err_t err; + rt_int32_t created = 0; + rt_int32_t i; + rt_int32_t j; + + curr_priority = wq_get_test_thread_priority(0); + rt_memset(ctx, 0, sizeof(ctx)); + rt_memset(threads, 0, sizeof(threads)); + + for (i = 0; i < WQ_DESTROY_STRESS_THREADS; i++) + { + err = rt_sem_init(&ctx[i].done, "wqmd", 0, RT_IPC_FLAG_FIFO); + uassert_int_equal(err, RT_EOK); + if (err != RT_EOK) + { + while (--created >= 0) + { + rt_thread_delete(threads[created]); + rt_sem_detach(&ctx[created].done); + } + return; + } + + rt_snprintf(ctx[i].thread_name, sizeof(ctx[i].thread_name), "wqm%d", i); + rt_snprintf(ctx[i].queue_name, sizeof(ctx[i].queue_name), "wqq%d", i); + ctx[i].err = RT_EOK; + + threads[i] = rt_thread_create(ctx[i].thread_name, + wq_destroy_stress_entry, + &ctx[i], + 1536, + curr_priority, + 10); + uassert_not_null(threads[i]); + if (threads[i] == RT_NULL) + { + rt_sem_detach(&ctx[i].done); + while (--created >= 0) + { + rt_thread_delete(threads[created]); + rt_sem_detach(&ctx[created].done); + } + return; + } + + created += 1; + } + + for (i = 0; i < created; i++) + { + err = rt_thread_startup(threads[i]); + uassert_int_equal(err, RT_EOK); + if (err != RT_EOK) + { + for (j = 0; j < i; j++) + { + rt_sem_take(&ctx[j].done, RT_WAITING_FOREVER); + } + for (j = i; j < created; j++) + { + rt_thread_delete(threads[j]); + } + for (j = 0; j < created; j++) + { + rt_sem_detach(&ctx[j].done); + } + return; + } + } + + for (i = 0; i < created; i++) + { + err = rt_sem_take(&ctx[i].done, rt_tick_from_millisecond(10000)); + uassert_int_equal(err, RT_EOK); + uassert_int_equal(ctx[i].err, RT_EOK); + uassert_int_equal(ctx[i].loops_done, WQ_DESTROY_STRESS_LOOPS); + uassert_int_equal(ctx[i].run_count, WQ_DESTROY_STRESS_LOOPS); + uassert_int_equal(ctx[i].delayed_count, 0); + } + + for (i = 0; i < created; i++) + { + rt_sem_detach(&ctx[i].done); + } +} + +void workqueue_destroy_testcase(void) +{ + /* Destroy sync behaviors (busy in thread + wait running) */ + UTEST_UNIT_RUN(test_workqueue_destroy_sync_behaviors); + /* Running destroy_sync returns busy when scheduler is unavailable */ + UTEST_UNIT_RUN(test_workqueue_destroy_sync_scheduler_unavailable_busy); + /* Destroy in callback should return busy */ + UTEST_UNIT_RUN(test_workqueue_destroy_in_callback_busy); + /* Destroy returns busy when sync waiter exists */ + UTEST_UNIT_RUN(test_workqueue_destroy_busy_when_sync_waiting); + /* Destroy returns busy when destroying flag already set */ + UTEST_UNIT_RUN(test_workqueue_destroy_busy_when_destroying); + /* Destroying queue rejects new submit and urgent */ + UTEST_UNIT_RUN(test_workqueue_destroying_reject_submit_urgent); + /* Destroy sync should not expose CANCELED on running work */ + UTEST_UNIT_RUN(test_workqueue_destroy_sync_status_no_canceled_while_running); + /* Multiple workqueues recreate and destroy_sync stress */ + UTEST_UNIT_RUN(test_workqueue_multi_queue_recreate_stress); +} + +#endif /* RT_USING_DEVICE_IPC */ diff --git a/components/drivers/ipc/workqueue.c b/components/drivers/ipc/workqueue.c index 30b779e4dfc..721af30e949 100644 --- a/components/drivers/ipc/workqueue.c +++ b/components/drivers/ipc/workqueue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2022, RT-Thread Development Team + * Copyright (c) 2006-2026, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * @@ -11,6 +11,7 @@ * 2022-01-16 Meco Man add rt_work_urgent() * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable * 2024-12-21 yuqingli delete timer, using list + * 2026-03-21 RyanCW(Codex) refine sync semantics and hot paths */ #include @@ -18,44 +19,298 @@ #ifdef RT_USING_HEAP -rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue) +/* Queue idle means no running work and no pending work. */ +rt_inline rt_bool_t _workqueue_is_idle_nolock(struct rt_workqueue *queue) { - rt_err_t result; + return (queue->work_current == RT_NULL && + rt_list_isempty(&(queue->work_list))); +} - while (1) +/* Move all nodes from src to dst. Caller keeps dst initialized. */ +static void _workqueue_move_list_nolock(rt_list_t *dst, rt_list_t *src) +{ + if (rt_list_isempty(src)) + { + return; + } + + dst->next = src->next; + dst->prev = src->prev; + dst->next->prev = dst; + dst->prev->next = dst; + rt_list_init(src); +} + +/* Detach all queued works with the spinlock held. */ +static void _workqueue_detach_queued_nolock(struct rt_workqueue *queue, + rt_list_t *pending_list, + rt_list_t *delayed_list) +{ + struct rt_work *current = queue->work_current; + + if (current != RT_NULL && + current->list.next != &(current->list)) { - /* try to take condition semaphore */ - result = rt_sem_trytake(&(queue->sem)); - if (result == -RT_ETIMEOUT) + rt_list_remove(&(current->list)); + current->flags = (current->flags & RT_WORK_STATE_CANCELING) ? RT_WORK_STATE_CANCELING : RT_WORK_STATE_CANCELED; + } + _workqueue_move_list_nolock(pending_list, &queue->work_list); + _workqueue_move_list_nolock(delayed_list, &queue->delayed_list); +} + +/* The caller must ensure the running work has already been detached. */ +static void _workqueue_cancel_detached_list(rt_list_t *list) +{ + struct rt_work *work; + + while (rt_list_isempty(list) == RT_FALSE) + { + work = rt_list_first_entry(list, struct rt_work, list); + rt_list_remove(&(work->list)); + work->flags = RT_WORK_STATE_CANCELED; + work->workqueue = RT_NULL; + } +} + +static rt_err_t _workqueue_prepare_destroy(struct rt_workqueue *queue, + rt_bool_t sync, + rt_list_t *pending_list, + rt_list_t *delayed_list) +{ + rt_base_t level; + rt_bool_t need_wakeup; + + if (sync && !rt_scheduler_is_available()) + { + return -RT_EBUSY; + } + + rt_list_init(pending_list); + rt_list_init(delayed_list); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + if (queue->flags & (RT_WORKQUEUE_FLAG_SYNC_WAITING | RT_WORKQUEUE_FLAG_DESTROYING)) + { + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + return -RT_EBUSY; + } + + need_wakeup = _workqueue_is_idle_nolock(queue); + if (sync) + { + queue->flags |= RT_WORKQUEUE_FLAG_SYNC_WAITING; + if (queue->work_current != RT_NULL) { - /* it's timeout, release this semaphore */ - rt_sem_release(&(queue->sem)); + queue->work_current->flags |= RT_WORK_STATE_CANCELING; } - else if (result == RT_EOK) + } + + queue->flags |= RT_WORKQUEUE_FLAG_DESTROYING; + _workqueue_detach_queued_nolock(queue, pending_list, delayed_list); + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + _workqueue_cancel_detached_list(pending_list); + _workqueue_cancel_detached_list(delayed_list); + if (need_wakeup) + { + rt_completion_done(&(queue->wakeup_completion)); + } + + return RT_EOK; +} + +static rt_err_t _workqueue_destroy(struct rt_workqueue *queue, rt_bool_t sync) +{ + rt_err_t err; + rt_list_t pending_list; + rt_list_t delayed_list; + + if (!rt_in_thread_context() || queue->work_thread == rt_thread_self()) + { + return -RT_EBUSY; + } + + err = _workqueue_prepare_destroy(queue, sync, &pending_list, &delayed_list); + if (err != RT_EOK) + { + return err; + } + + if (!sync) + { + return RT_EOK; + } + + rt_completion_wait(&(queue->sync_completion), RT_WAITING_FOREVER); + RT_KERNEL_FREE(queue); + + return RT_EOK; +} + +/* Promote due delayed works to work_list, and return next wait ticks. */ +rt_inline rt_int32_t _workqueue_promote_delayed_nolock(struct rt_workqueue *queue) +{ + rt_int32_t delay_tick = RT_WAITING_FOREVER; + rt_tick_t current_tick; + rt_list_t *delayed_head = &(queue->delayed_list); + rt_list_t *work_head = &(queue->work_list); + rt_list_t *first_due; + rt_list_t *first_non_due; + rt_list_t *last_due; + rt_list_t *delayed_tail; + rt_list_t *work_tail; + struct rt_work *work; + + if (rt_list_isempty(delayed_head)) + { + return delay_tick; + } + + /* delayed_list is sorted; only head may be due, promote consecutive due items. */ + current_tick = rt_tick_get(); + first_due = delayed_head->next; + first_non_due = first_due; + while (first_non_due != delayed_head) + { + work = rt_list_entry(first_non_due, struct rt_work, list); + /* delayed_list is sorted by timeout_tick (wrap-safe compare). */ + if ((current_tick - work->timeout_tick) < RT_TICK_MAX / 2) { - /* keep the sem value = 0 */ - result = RT_EOK; - break; + work->flags = RT_WORK_STATE_PENDING; + first_non_due = first_non_due->next; } else { - result = -RT_ERROR; + /* Next timeout not reached; record remaining delay. */ + delay_tick = work->timeout_tick - current_tick; break; } } - return result; + if (first_non_due == first_due) + { + return delay_tick; + } + + /* Move the whole due-prefix to work_list tail in one splice. */ + last_due = first_non_due->prev; + delayed_tail = delayed_head->prev; + delayed_head->next = first_non_due; + first_non_due->prev = delayed_head; + + work_tail = work_head->prev; + work_tail->next = first_due; + first_due->prev = work_tail; + last_due->next = work_head; + work_head->prev = last_due; + + if (first_non_due == delayed_head) + { + delayed_head->prev = delayed_head; + } + else + { + delayed_head->prev = delayed_tail; + } + + return delay_tick; } +/* Insert delayed work into the sorted delayed list. */ +rt_inline void _workqueue_insert_delayed_nolock(struct rt_workqueue *queue, struct rt_work *work) +{ + rt_list_t *list = &(queue->delayed_list); + struct rt_work *first; + struct rt_work *last; + struct rt_work *iter; + + if (rt_list_isempty(list)) + { + rt_list_insert_before(list, &(work->list)); + return; + } + + /* Fast path: compare with head/tail to avoid full traversal. */ + first = rt_list_first_entry(list, struct rt_work, list); + last = rt_list_entry(list->prev, struct rt_work, list); + + /* Wrap-safe compare: smaller timeout should be placed earlier in list. */ + if ((first->timeout_tick - work->timeout_tick) < RT_TICK_MAX / 2 && + first->timeout_tick != work->timeout_tick) + { + /* Same-timeout should keep FIFO order. */ + rt_list_insert_before(&(first->list), &(work->list)); + return; + } + if ((work->timeout_tick - last->timeout_tick) < RT_TICK_MAX / 2) + { + rt_list_insert_before(list, &(work->list)); + return; + } + + /* Find the first element that should be after the new work. */ + rt_list_for_each_entry(iter, list, list) + { + if ((iter->timeout_tick - work->timeout_tick) == 0) + { + /* Keep FIFO order for equal timeout. */ + continue; + } + if ((iter->timeout_tick - work->timeout_tick) < RT_TICK_MAX / 2) + { + rt_list_insert_before(&(iter->list), &(work->list)); + return; + } + } + rt_list_insert_before(list, &(work->list)); +} + +/* The caller must ensure the work is queued on this queue. */ +rt_inline rt_bool_t _workqueue_cancel_queued_work_nolock(struct rt_workqueue *queue, + struct rt_work *work) +{ + rt_tick_t old_head_timeout; + rt_bool_t queue_idle = _workqueue_is_idle_nolock(queue); + rt_bool_t had_head = RT_FALSE; + + if (queue_idle) + { + if (rt_list_isempty(&(queue->delayed_list)) == RT_FALSE) + { + old_head_timeout = rt_list_first_entry(&(queue->delayed_list), struct rt_work, list)->timeout_tick; + had_head = RT_TRUE; + } + } + + rt_list_remove(&(work->list)); + work->workqueue = RT_NULL; + work->flags = RT_WORK_STATE_CANCELED; + + if (!queue_idle) + { + return RT_FALSE; + } + + if (rt_list_isempty(&(queue->delayed_list))) + { + return had_head; + } + + return !had_head || + rt_list_first_entry(&(queue->delayed_list), struct rt_work, list)->timeout_tick != old_head_timeout; +} + +/* workqueue thread entry */ static void _workqueue_thread_entry(void *parameter) { - rt_base_t level; - struct rt_work *work; + rt_base_t level; + struct rt_work *work; struct rt_workqueue *queue; - rt_tick_t current_tick; - rt_int32_t delay_tick; + rt_int32_t delay_tick; void (*work_func)(struct rt_work *work, void *work_data); void *work_data; + rt_bool_t need_sync; + rt_bool_t requeued; queue = (struct rt_workqueue *)parameter; RT_ASSERT(queue != RT_NULL); @@ -64,129 +319,238 @@ static void _workqueue_thread_entry(void *parameter) { level = rt_spin_lock_irqsave(&(queue->spinlock)); - /* timer check */ - current_tick = rt_tick_get(); - delay_tick = RT_WAITING_FOREVER; - while (!rt_list_isempty(&(queue->delayed_list))) + /* Move due delayed work into pending list. */ + delay_tick = _workqueue_promote_delayed_nolock(queue); + + if ((queue->flags & RT_WORKQUEUE_FLAG_DESTROYING) && + queue->work_current == RT_NULL && + rt_list_isempty(&(queue->work_list)) && + rt_list_isempty(&(queue->delayed_list))) { - work = rt_list_entry(queue->delayed_list.next, struct rt_work, list); - if ((current_tick - work->timeout_tick) < RT_TICK_MAX / 2) + need_sync = (queue->flags & RT_WORKQUEUE_FLAG_SYNC_WAITING) != 0; + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + if (need_sync) { - rt_list_remove(&(work->list)); - rt_list_insert_after(queue->work_list.prev, &(work->list)); - work->flags &= ~RT_WORK_STATE_SUBMITTING; - work->flags |= RT_WORK_STATE_PENDING; + rt_completion_done(&(queue->sync_completion)); } else { - delay_tick = work->timeout_tick - current_tick; - break; + RT_KERNEL_FREE(queue); } + return; } if (rt_list_isempty(&(queue->work_list))) { rt_spin_unlock_irqrestore(&(queue->spinlock), level); - /* wait for work completion */ + /* Wait for new work or delayed timeout refresh. */ rt_completion_wait(&(queue->wakeup_completion), delay_tick); continue; } - /* we have work to do with. */ - work = rt_list_entry(queue->work_list.next, struct rt_work, list); + /* Fetch next pending work. */ + work = rt_list_first_entry(&(queue->work_list), struct rt_work, list); rt_list_remove(&(work->list)); - queue->work_current = work; - work->flags &= ~RT_WORK_STATE_PENDING; - work->workqueue = RT_NULL; - work_func = work->work_func; - work_data = work->work_data; + queue->work_current = work; + work->flags = 0; + work_func = work->work_func; + work_data = work->work_data; rt_spin_unlock_irqrestore(&(queue->spinlock), level); - /* do work */ + /* Execute work without holding lock. */ work_func(work, work_data); - /* clean current work */ + need_sync = RT_FALSE; + level = rt_spin_lock_irqsave(&(queue->spinlock)); + /* Finalize current work and wake sync waiter if any. */ queue->work_current = RT_NULL; + requeued = work->list.next != &(work->list); + /* Wake up cancel_work_sync waiter if present. */ + if ((queue->flags & RT_WORKQUEUE_FLAG_SYNC_WAITING) && + (queue->flags & RT_WORKQUEUE_FLAG_DESTROYING) == 0) + { + /* Remove possible requeue before cancel_work_sync resumes. */ + if (requeued) + { + rt_list_remove(&(work->list)); + requeued = RT_FALSE; + } + queue->flags &= ~RT_WORKQUEUE_FLAG_SYNC_WAITING; + need_sync = RT_TRUE; + } + if (!requeued) + { + /* Mark completion only when no requeue is pending. */ + work->flags = RT_WORK_STATE_DONE; + /* Clear only when truly idle. */ + work->workqueue = RT_NULL; + } + rt_spin_unlock_irqrestore(&(queue->spinlock), level); - /* ack work completion */ - _workqueue_work_completion(queue); + if (need_sync) + { + rt_completion_done(&(queue->sync_completion)); + } } } static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue, - struct rt_work *work, rt_tick_t ticks) + struct rt_work *work, + rt_tick_t ticks, + rt_bool_t urgent) { - rt_base_t level; - rt_err_t err = RT_EOK; - struct rt_work *work_tmp; - rt_list_t *list_tmp; + rt_base_t level; + rt_tick_t timeout_tick; + rt_bool_t need_wakeup = RT_FALSE; + rt_bool_t queue_idle; + rt_tick_t old_head_timeout; + rt_bool_t had_head = RT_FALSE; + + /* Validate input ticks. */ + if (ticks >= RT_TICK_MAX / 2) + { + return -RT_ERROR; + } + + if (ticks != 0) + { + timeout_tick = rt_tick_get() + ticks; + } level = rt_spin_lock_irqsave(&(queue->spinlock)); - /* remove list */ - rt_list_remove(&(work->list)); + if ((queue->flags & RT_WORKQUEUE_FLAG_DESTROYING) || + (work->flags & RT_WORK_STATE_CANCELING) || + (work->workqueue != RT_NULL && work->workqueue != queue)) + { + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + return -RT_EBUSY; + } + /* Snapshot idle state before mutation to avoid unnecessary wakeups. */ + queue_idle = _workqueue_is_idle_nolock(queue); + /* Remove list node to avoid duplicate queueing (safe on standalone node). */ + if (work->list.next != &(work->list)) + { + rt_list_remove(&(work->list)); + } + /* Clear queued/terminal state when resubmitting. */ work->flags = 0; - + work->workqueue = queue; if (ticks == 0) { - rt_list_insert_after(queue->work_list.prev, &(work->list)); - work->flags |= RT_WORK_STATE_PENDING; - work->workqueue = queue; - - rt_completion_done(&(queue->wakeup_completion)); - err = RT_EOK; + /* Enqueue immediate work to head or tail. */ + if (urgent) + { + rt_list_insert_after(&(queue->work_list), &(work->list)); + } + else + { + rt_list_insert_before(&(queue->work_list), &(work->list)); + } + work->flags = RT_WORK_STATE_PENDING; + /* Wake up when queue was sleeping for delayed work or new work. */ + need_wakeup = queue_idle; } - else if (ticks < RT_TICK_MAX / 2) + else { - /* insert delay work list */ - work->flags |= RT_WORK_STATE_SUBMITTING; - work->workqueue = queue; - work->timeout_tick = rt_tick_get() + ticks; - - list_tmp = &(queue->delayed_list); - for (work_tmp = rt_list_entry(list_tmp->next, struct rt_work, list); - &work_tmp->list != list_tmp; - work_tmp = rt_list_entry(work_tmp->list.next, struct rt_work, list)) + /* Enqueue delayed work (sorted by timeout). */ + if (queue_idle) { - if ((work_tmp->timeout_tick - work->timeout_tick) < RT_TICK_MAX / 2) + if (rt_list_isempty(&(queue->delayed_list)) == RT_FALSE) { - list_tmp = &(work_tmp->list); - break; + old_head_timeout = rt_list_first_entry(&(queue->delayed_list), struct rt_work, list)->timeout_tick; + had_head = RT_TRUE; } } - rt_list_insert_before(list_tmp, &(work->list)); + work->flags = RT_WORK_STATE_SUBMITTING; + work->timeout_tick = timeout_tick; - rt_completion_done(&(queue->wakeup_completion)); - err = RT_EOK; + _workqueue_insert_delayed_nolock(queue, work); + /* Wake up only when queue is idle and timeout needs refresh. */ + if (queue_idle) + { + need_wakeup = !had_head || + rt_list_first_entry(&(queue->delayed_list), struct rt_work, list)->timeout_tick != old_head_timeout; + } } - else + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + if (need_wakeup) { - err = -RT_ERROR; + rt_completion_done(&(queue->wakeup_completion)); } - rt_spin_unlock_irqrestore(&(queue->spinlock), level); - return err; + return RT_EOK; } -static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work) +static rt_err_t _workqueue_cancel_core(struct rt_workqueue *queue, + struct rt_work *work, + rt_bool_t sync, + rt_bool_t *need_wakeup, + rt_bool_t *need_wait) { rt_base_t level; - rt_err_t err; + rt_err_t err = RT_EOK; + rt_bool_t sched_available = RT_FALSE; + + if (sync) + { + sched_available = rt_scheduler_is_available(); + } level = rt_spin_lock_irqsave(&(queue->spinlock)); - rt_list_remove(&(work->list)); - work->flags = 0; - err = queue->work_current != work ? RT_EOK : -RT_EBUSY; - work->workqueue = RT_NULL; + if (work->workqueue != RT_NULL && work->workqueue != queue) + { + err = -RT_EBUSY; + goto __exit; + } + + if (sync) + { + if ((queue->flags & (RT_WORKQUEUE_FLAG_SYNC_WAITING | RT_WORKQUEUE_FLAG_DESTROYING)) || + (work->flags & RT_WORK_STATE_CANCELING)) + { + err = -RT_EBUSY; + goto __exit; + } + + if (queue->work_current == work) + { + /* Only reject when cancel_sync would really block. */ + if (!sched_available) + { + err = -RT_EBUSY; + goto __exit; + } + + /* Mark canceling to block requeue in work callback. */ + work->flags |= RT_WORK_STATE_CANCELING; + queue->flags |= RT_WORKQUEUE_FLAG_SYNC_WAITING; + *need_wait = RT_TRUE; + goto __exit; + } + } + else if ((work->flags & RT_WORK_STATE_CANCELING) || + queue->work_current == work) + { + err = -RT_EBUSY; + goto __exit; + } + + if (work->list.next != &(work->list)) + { + *need_wakeup = _workqueue_cancel_queued_work_nolock(queue, work); + } + +__exit: rt_spin_unlock_irqrestore(&(queue->spinlock), level); + return err; } /** - * @brief Initialize a work item, binding with a callback function. + * @brief Initialize a work item. * - * @param work is a pointer to the work item object. - * - * @param work_func is a callback function that will be called when this work item is executed. - * - * @param work_data is a user data passed to the callback function as the second parameter. + * @param work Work item. + * @param work_func Work callback. + * @param work_data Callback argument. */ void rt_work_init(struct rt_work *work, void (*work_func)(struct rt_work *work, void *work_data), @@ -199,40 +563,36 @@ void rt_work_init(struct rt_work *work, work->work_func = work_func; work->work_data = work_data; work->workqueue = RT_NULL; - work->flags = 0; - work->type = 0; + work->flags = 0; + work->type = 0; } /** - * @brief Create a work queue with a thread inside. - * - * @param name is a name of the work queue thread. - * - * @param stack_size is stack size of the work queue thread. + * @brief Create a work queue. * - * @param priority is a priority of the work queue thread. + * @param name Worker thread name. + * @param stack_size Worker thread stack size. + * @param priority Worker thread priority. * - * @return Return a pointer to the workqueue object. It will return RT_NULL if failed. + * @return Work queue object, or RT_NULL on failure. */ struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_size, rt_uint8_t priority) { - struct rt_workqueue *queue = RT_NULL; + struct rt_workqueue *queue; queue = (struct rt_workqueue *)RT_KERNEL_MALLOC(sizeof(struct rt_workqueue)); if (queue != RT_NULL) { - /* initialize work list */ rt_list_init(&(queue->work_list)); rt_list_init(&(queue->delayed_list)); queue->work_current = RT_NULL; - rt_sem_init(&(queue->sem), "wqueue", 0, RT_IPC_FLAG_FIFO); + queue->flags = 0; rt_completion_init(&(queue->wakeup_completion)); + rt_completion_init(&(queue->sync_completion)); - /* create the work thread */ queue->work_thread = rt_thread_create(name, _workqueue_thread_entry, queue, stack_size, priority, 10); if (queue->work_thread == RT_NULL) { - rt_sem_detach(&(queue->sem)); RT_KERNEL_FREE(queue); return RT_NULL; } @@ -247,162 +607,247 @@ struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_siz /** * @brief Destroy a work queue. * - * @param queue is a pointer to the workqueue object. + * @param queue Work queue. + * + * @return RT_EOK on success. + * -RT_EBUSY if the queue is destroying, has a sync waiter, or the + * call is made outside thread context or from the worker thread. * - * @return RT_EOK Success. + * @note This API does not wait for the running work to finish. Use + * rt_workqueue_destroy_sync() if the caller must wait. */ rt_err_t rt_workqueue_destroy(struct rt_workqueue *queue) { RT_ASSERT(queue != RT_NULL); - - rt_workqueue_cancel_all_work(queue); - rt_thread_delete(queue->work_thread); - rt_sem_detach(&(queue->sem)); - RT_KERNEL_FREE(queue); - - return RT_EOK; + return _workqueue_destroy(queue, RT_FALSE); } /** - * @brief Submit a work item to the work queue without delay. - * - * @param queue is a pointer to the workqueue object. + * @brief Destroy a work queue synchronously. * - * @param work is a pointer to the work item object. + * @param queue Work queue. * - * @return RT_EOK Success. + * @return RT_EOK on success. + * -RT_EBUSY if the queue is destroying, has a sync waiter, the call + * is made outside thread context or from the worker thread, or the + * required wait cannot be performed while scheduler is unavailable. */ -rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work) +rt_err_t rt_workqueue_destroy_sync(struct rt_workqueue *queue) { RT_ASSERT(queue != RT_NULL); - RT_ASSERT(work != RT_NULL); - - return _workqueue_submit_work(queue, work, 0); + return _workqueue_destroy(queue, RT_TRUE); } /** - * @brief Submit a work item to the work queue with a delay. - * - * @param queue is a pointer to the workqueue object. + * @brief Submit a work item without delay. * - * @param work is a pointer to the work item object. + * @param queue Work queue. + * @param work Work item. * - * @param ticks is the delay ticks for the work item to be submitted to the work queue. + * @return RT_EOK on success. + */ +rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work) +{ + return rt_workqueue_submit_work(queue, work, 0); +} + +/** + * @brief Submit a work item with delay. * - * NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1) + * @param queue Work queue. + * @param work Work item. + * @param ticks Delay ticks. The value must be less than RT_TICK_MAX / 2. * - * @return RT_EOK Success. - * -RT_ERROR The ticks parameter is invalid. + * @return RT_EOK on success. + * -RT_ERROR if ticks is invalid. + * -RT_EBUSY if the work belongs to another queue, is canceling, or + * the queue is destroying. */ rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t ticks) { RT_ASSERT(queue != RT_NULL); RT_ASSERT(work != RT_NULL); - RT_ASSERT(ticks < RT_TICK_MAX / 2); - - return _workqueue_submit_work(queue, work, ticks); + return _workqueue_submit_work(queue, work, ticks, RT_FALSE); } /** - * @brief Submit a work item to the work queue without delay. This work item will be executed after the current work item. + * @brief Submit a work item urgently. * - * @param queue is a pointer to the workqueue object. + * @param queue Work queue. + * @param work Work item. * - * @param work is a pointer to the work item object. + * @return RT_EOK on success. + * -RT_EBUSY if the work belongs to another queue, is canceling, or + * the queue is destroying. * - * @return RT_EOK Success. + * @note The work runs before queued pending works, but after the current + * running work. */ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *work) { - rt_base_t level; - RT_ASSERT(queue != RT_NULL); RT_ASSERT(work != RT_NULL); + return _workqueue_submit_work(queue, work, 0, RT_TRUE); +} - level = rt_spin_lock_irqsave(&(queue->spinlock)); - /* NOTE: the work MUST be initialized firstly */ - rt_list_remove(&(work->list)); - rt_list_insert_after(&queue->work_list, &(work->list)); +/** + * @brief Cancel a work item. + * + * @param queue Work queue. + * @param work Work item. + * + * @return RT_EOK on success. + * -RT_EBUSY if the work is executing, canceling, or belongs to + * another queue. + */ +rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work) +{ + rt_err_t err; + rt_bool_t need_wakeup = RT_FALSE; - rt_completion_done(&(queue->wakeup_completion)); - rt_spin_unlock_irqrestore(&(queue->spinlock), level); + RT_ASSERT(work != RT_NULL); + RT_ASSERT(queue != RT_NULL); + err = _workqueue_cancel_core(queue, work, RT_FALSE, &need_wakeup, RT_NULL); + if (err == RT_EOK && need_wakeup) + { + rt_completion_done(&(queue->wakeup_completion)); + } - return RT_EOK; + return err; } /** - * @brief Cancel a work item in the work queue. + * @brief Get work status in a work queue. * - * @param queue is a pointer to the workqueue object. + * @param queue Work queue. + * @param work Work item. * - * @param work is a pointer to the work item object. + * @return Status bits defined by RT_WORK_STATUS_*. * - * @return RT_EOK Success. - * -RT_EBUSY This work item is executing. + * @note If the work is not bound to any queue, this API returns the last + * terminal state if present, or RT_WORK_STATUS_IDLE otherwise. */ -rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work) +rt_uint16_t rt_workqueue_get_work_status(struct rt_workqueue *queue, struct rt_work *work) { - RT_ASSERT(work != RT_NULL); + rt_base_t level; + rt_uint16_t status = RT_WORK_STATUS_IDLE; + RT_ASSERT(queue != RT_NULL); + RT_ASSERT(work != RT_NULL); + + level = rt_spin_lock_irqsave(&(queue->spinlock)); + if (work->workqueue != queue) + { + if (work->workqueue != RT_NULL) + { + status = RT_WORK_STATUS_OTHER_QUEUE; + } + else + { + /* Idle work reports its last terminal state if any. */ + status = work->flags & (RT_WORK_STATUS_DONE | RT_WORK_STATUS_CANCELED); + } + goto __exit; + } - return _workqueue_cancel_work(queue, work); + status = work->flags & (RT_WORK_STATUS_PENDING | + RT_WORK_STATUS_SUBMITTING | + RT_WORK_STATUS_CANCELING | + RT_WORK_STATUS_DONE | + RT_WORK_STATUS_CANCELED); + if (queue->work_current == work) + { + status |= RT_WORK_STATUS_RUNNING; + } + +__exit: + rt_spin_unlock_irqrestore(&(queue->spinlock), level); + + return status; } /** - * @brief Cancel a work item in the work queue. If the work item is executing, this function will block until it is done. + * @brief Cancel a work item synchronously. * - * @param queue is a pointer to the workqueue object. + * @param queue Work queue. + * @param work Work item. * - * @param work is a pointer to the work item object. + * @return RT_EOK on success. If the work is executing and the caller may + * block, this API waits for completion and still returns RT_EOK. + * -RT_EBUSY if the work is canceling, belongs to another queue, the + * queue is destroying, another sync waiter exists, the call is made + * outside thread context or from the worker thread, or the required + * wait cannot be performed while scheduler is unavailable. * - * @return RT_EOK Success. + * @note This API must be called in thread context and not from the worker + * thread. */ rt_err_t rt_workqueue_cancel_work_sync(struct rt_workqueue *queue, struct rt_work *work) { + rt_bool_t need_wait = RT_FALSE; + rt_bool_t need_wakeup = RT_FALSE; + rt_err_t err; + RT_ASSERT(queue != RT_NULL); RT_ASSERT(work != RT_NULL); + if (!rt_in_thread_context() || queue->work_thread == rt_thread_self()) + { + return -RT_EBUSY; + } - if (queue->work_current == work) /* it's current work in the queue */ + err = _workqueue_cancel_core(queue, work, RT_TRUE, &need_wakeup, &need_wait); + if (err != RT_EOK) { - /* wait for work completion */ - rt_sem_take(&(queue->sem), RT_WAITING_FOREVER); - /* Note that because work items are automatically deleted after execution, they do not need to be deleted again */ + return err; } - else + + if (need_wakeup) { - _workqueue_cancel_work(queue, work); + rt_completion_done(&(queue->wakeup_completion)); } + if (!need_wait) + { + return RT_EOK; + } + + /* Wait for current work done. */ + rt_completion_wait(&(queue->sync_completion), RT_WAITING_FOREVER); return RT_EOK; } /** - * @brief This function will cancel all work items in work queue. + * @brief Cancel all queued work items. * - * @param queue is a pointer to the workqueue object. + * @param queue Work queue. * - * @return RT_EOK Success. + * @return RT_EOK on success. + * + * @note This API does not stop the running work. */ rt_err_t rt_workqueue_cancel_all_work(struct rt_workqueue *queue) { - struct rt_work *work; + rt_base_t level; + rt_bool_t need_wakeup; + rt_list_t pending_list; + rt_list_t delayed_list; RT_ASSERT(queue != RT_NULL); + rt_list_init(&pending_list); + rt_list_init(&delayed_list); + /* Cancel queued work. */ + level = rt_spin_lock_irqsave(&(queue->spinlock)); + need_wakeup = _workqueue_is_idle_nolock(queue) && + rt_list_isempty(&(queue->delayed_list)) == RT_FALSE; + _workqueue_detach_queued_nolock(queue, &pending_list, &delayed_list); + rt_spin_unlock_irqrestore(&(queue->spinlock), level); - /* cancel work */ - rt_enter_critical(); - while (rt_list_isempty(&queue->work_list) == RT_FALSE) - { - work = rt_list_first_entry(&queue->work_list, struct rt_work, list); - _workqueue_cancel_work(queue, work); - } - /* cancel delay work */ - while (rt_list_isempty(&queue->delayed_list) == RT_FALSE) + _workqueue_cancel_detached_list(&pending_list); + _workqueue_cancel_detached_list(&delayed_list); + if (need_wakeup) { - work = rt_list_first_entry(&queue->delayed_list, struct rt_work, list); - _workqueue_cancel_work(queue, work); + rt_completion_done(&(queue->wakeup_completion)); } - rt_exit_critical(); return RT_EOK; } @@ -414,45 +859,81 @@ static struct rt_workqueue *sys_workq; /* system work queue */ /** * @brief Submit a work item to the system work queue with a delay. * - * @param work is a pointer to the work item object. - * - * @param ticks is the delay OS ticks for the work item to be submitted to the work queue. + * @param work Work item. + * @param ticks Delay ticks. The value must be less than RT_TICK_MAX / 2. * - * NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1) - * - * @return RT_EOK Success. - * -RT_ERROR The ticks parameter is invalid. + * @return RT_EOK on success. + * -RT_ERROR if ticks is invalid. + * -RT_EBUSY if the work belongs to another queue, is canceling, or + * the queue is destroying. */ rt_err_t rt_work_submit(struct rt_work *work, rt_tick_t ticks) { + RT_ASSERT(sys_workq != RT_NULL); return rt_workqueue_submit_work(sys_workq, work, ticks); } /** - * @brief Submit a work item to the system work queue without delay. This work item will be executed after the current work item. + * @brief Submit a work item urgently to the system work queue. * - * @param work is a pointer to the work item object. + * @param work Work item. * - * @return RT_EOK Success. + * @return RT_EOK on success. + * -RT_EBUSY if the work belongs to another queue, is canceling, or + * the queue is destroying. */ rt_err_t rt_work_urgent(struct rt_work *work) { + RT_ASSERT(sys_workq != RT_NULL); return rt_workqueue_urgent_work(sys_workq, work); } /** * @brief Cancel a work item in the system work queue. * - * @param work is a pointer to the work item object. + * @param work Work item. * - * @return RT_EOK Success. - * -RT_EBUSY This work item is executing. + * @return RT_EOK on success. + * -RT_EBUSY if the work is executing, canceling, or belongs to + * another queue. */ rt_err_t rt_work_cancel(struct rt_work *work) { + RT_ASSERT(sys_workq != RT_NULL); return rt_workqueue_cancel_work(sys_workq, work); } +/** + * @brief Cancel a work item in the system work queue synchronously. + * + * @param work Work item. + * + * @return RT_EOK on success. If the work is executing and the caller may + * block, this API waits for completion and still returns RT_EOK. + * -RT_EBUSY if the work is canceling, belongs to another queue, the + * queue is destroying, another sync waiter exists, the call is made + * outside thread context or from the system worker thread, or the + * required wait cannot be performed while scheduler is unavailable. + */ +rt_err_t rt_work_cancel_sync(struct rt_work *work) +{ + RT_ASSERT(sys_workq != RT_NULL); + return rt_workqueue_cancel_work_sync(sys_workq, work); +} + +/** + * @brief Get work status in the system work queue. + * + * @param work Work item. + * + * @return Status bits defined by RT_WORK_STATUS_*. + */ +rt_uint16_t rt_work_get_status(struct rt_work *work) +{ + RT_ASSERT(sys_workq != RT_NULL); + return rt_workqueue_get_work_status(sys_workq, work); +} + static int rt_work_sys_workqueue_init(void) { if (sys_workq != RT_NULL)