workqueue.c 9.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2006-2018, RT-Thread Development Team
3
 *
4
 * SPDX-License-Identifier: Apache-2.0
5 6 7 8 9 10 11
 *
 * Change Logs:
 * Date           Author       Notes
 * 2017-02-27     bernard      fix the re-work issue.
 */

#include <rthw.h>
12 13 14 15
#include <rtthread.h>
#include <rtdevice.h>

#ifdef RT_USING_HEAP
16

17 18
static void _delayed_work_timeout_handler(void *parameter);

19 20 21
rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue)
{
    rt_err_t result;
22 23

    rt_enter_critical();
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
    while (1)
    {
        /* try to take condition semaphore */
        result = rt_sem_trytake(&(queue->sem));
        if (result == -RT_ETIMEOUT)
        {
            /* it's timeout, release this semaphore */
            rt_sem_release(&(queue->sem));
        }
        else if (result == RT_EOK)
        {
            /* keep the sem value = 0 */
            result = RT_EOK;
            break;
        }
        else
        {
            result = -RT_ERROR;
            break;
        }
    }
    rt_exit_critical();
46

47 48 49
    return result;
}

50
static void _workqueue_thread_entry(void *parameter)
51
{
52
    rt_base_t level;
53 54
    struct rt_work *work;
    struct rt_workqueue *queue;
55

56
    queue = (struct rt_workqueue *) parameter;
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
    RT_ASSERT(queue != RT_NULL);

    while (1)
    {
        if (rt_list_isempty(&(queue->work_list)))
        {
            /* no software timer exist, suspend self. */
            rt_thread_suspend(rt_thread_self());
            rt_schedule();
        }

        /* we have work to do with. */
        level = rt_hw_interrupt_disable();
        work = rt_list_entry(queue->work_list.next, struct rt_work, list);
        rt_list_remove(&(work->list));
        queue->work_current = work;
73
        work->flags &= ~RT_WORK_STATE_PENDING;
74
        work->workqueue = RT_NULL;
75 76 77 78 79 80 81 82
        rt_hw_interrupt_enable(level);

        /* do work */
        work->work_func(work, work->work_data);
        level = rt_hw_interrupt_disable();
        /* clean current work */
        queue->work_current = RT_NULL;
        rt_hw_interrupt_enable(level);
83 84 85

        /* ack work completion */
        _workqueue_work_completion(queue);
86
    }
87 88
}

89 90
static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
        struct rt_work *work, rt_tick_t ticks)
91 92 93 94
{
    rt_base_t level;

    level = rt_hw_interrupt_disable();
95
    /* remove list */
96
    rt_list_remove(&(work->list));
97 98 99
    work->flags &= ~RT_WORK_STATE_PENDING;
    /*  */
    if (ticks == 0)
100
    {
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
        if (queue->work_current != work)
        {
            rt_list_insert_after(queue->work_list.prev, &(work->list));
            work->flags |= RT_WORK_STATE_PENDING;
            work->workqueue = queue;
        }

        /* whether the workqueue is doing work */
        if (queue->work_current == RT_NULL &&
            ((queue->work_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND))
        {
            rt_hw_interrupt_enable(level);
            /* resume work thread */
            rt_thread_resume(queue->work_thread);
            rt_schedule();
        }
        else
        {
            rt_hw_interrupt_enable(level);
        }
        return RT_EOK;
122
    }
123
    else if (ticks < RT_TICK_MAX / 2)
124
    {
125 126 127 128 129 130 131 132 133 134 135 136 137
        /* Timer started */
        if (work->flags & RT_WORK_STATE_SUBMITTING)
        {
            rt_timer_stop(&work->timer);
            rt_timer_control(&work->timer, RT_TIMER_CTRL_SET_TIME, &ticks);
        }
        else
        {
            rt_timer_init(&(work->timer), "work", _delayed_work_timeout_handler,
                        work, ticks, RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
            work->flags |= RT_WORK_STATE_SUBMITTING;
        }
        work->workqueue = queue;
138
        rt_hw_interrupt_enable(level);
139 140
        rt_timer_start(&(work->timer));
        return RT_EOK;
141
    }
142 143
    rt_hw_interrupt_enable(level);
    return -RT_ERROR;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
}

static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
{
    rt_base_t level;

    level = rt_hw_interrupt_disable();
    if (queue->work_current == work)
    {
        rt_hw_interrupt_enable(level);
        return -RT_EBUSY;
    }
    rt_list_remove(&(work->list));
    work->flags &= ~RT_WORK_STATE_PENDING;
    rt_hw_interrupt_enable(level);

    return RT_EOK;
}

163
static rt_err_t _workqueue_cancel_delayed_work(struct rt_work *work)
164 165 166 167 168 169 170 171 172 173
{
    rt_base_t level;
    int ret = RT_EOK;

    if (!work->workqueue)
    {
        ret = -EINVAL;
        goto __exit;
    }

174
    if (work->flags & RT_WORK_STATE_PENDING)
175 176
    {
        /* Remove from the queue if already submitted */
177
        ret = _workqueue_cancel_work(work->workqueue, work);
178 179 180 181 182 183 184
        if (ret)
        {
            goto __exit;
        }
    }
    else
    {
185
        if (work->flags & RT_WORK_STATE_SUBMITTING)
186 187 188 189
        {
            level = rt_hw_interrupt_disable();
            rt_timer_stop(&(work->timer));
            rt_timer_detach(&(work->timer));
190
            work->flags &= ~RT_WORK_STATE_SUBMITTING;
191 192
            rt_hw_interrupt_enable(level);
        }
193 194 195 196 197
    }

    level = rt_hw_interrupt_disable();
    /* Detach from workqueue */
    work->workqueue = RT_NULL;
198
    work->flags &= ~(RT_WORK_STATE_PENDING);
199 200 201 202 203 204 205 206
    rt_hw_interrupt_enable(level);

__exit:
    return ret;
}

static void _delayed_work_timeout_handler(void *parameter)
{
207
    struct rt_work *delayed_work;
208
    rt_base_t level;
209

210
    delayed_work = (struct rt_work *)parameter;
211
    level = rt_hw_interrupt_disable();
212
    rt_timer_stop(&(delayed_work->timer));
213
    rt_timer_detach(&(delayed_work->timer));
214 215
    delayed_work->flags &= ~RT_WORK_STATE_SUBMITTING;
    delayed_work->type &= ~RT_WORK_TYPE_DELAYED;
216
    rt_hw_interrupt_enable(level);
217
    _workqueue_submit_work(delayed_work->workqueue, delayed_work, 0);
218 219 220
}

struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_size, rt_uint8_t priority)
221
{
222
    struct rt_workqueue *queue = RT_NULL;
223

224
    queue = (struct rt_workqueue *)RT_KERNEL_MALLOC(sizeof(struct rt_workqueue));
225 226
    if (queue != RT_NULL)
    {
227 228
        /* initialize work list */
        rt_list_init(&(queue->work_list));
229
        queue->work_current = RT_NULL;
230
        rt_sem_init(&(queue->sem), "wqueue", 0, RT_IPC_FLAG_FIFO);
231 232 233 234 235 236 237 238 239 240 241 242 243

        /* create the work thread */
        queue->work_thread = rt_thread_create(name, _workqueue_thread_entry, queue, stack_size, priority, 10);
        if (queue->work_thread == RT_NULL)
        {
            RT_KERNEL_FREE(queue);
            return RT_NULL;
        }

        rt_thread_startup(queue->work_thread);
    }

    return queue;
244 245
}

246
rt_err_t rt_workqueue_destroy(struct rt_workqueue *queue)
247
{
248
    RT_ASSERT(queue != RT_NULL);
249

250 251
    rt_thread_delete(queue->work_thread);
    RT_KERNEL_FREE(queue);
252

253
    return RT_EOK;
254 255
}

256
rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work)
257
{
258 259 260
    RT_ASSERT(queue != RT_NULL);
    RT_ASSERT(work != RT_NULL);

261
    return _workqueue_submit_work(queue, work, 0);
262
}
263

264 265 266 267
rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t time)
{
    RT_ASSERT(queue != RT_NULL);
    RT_ASSERT(work != RT_NULL);
268

269 270 271 272 273
    if (time > 0)
    {
        work->type |= RT_WORK_TYPE_DELAYED;
    }

274
    if (work->type & RT_WORK_TYPE_DELAYED)
275
    {
276
        return _workqueue_submit_work(queue, work, time);
277 278 279
    }
    else
    {
280
        return _workqueue_submit_work(queue, work, 0);
281
    }
282 283
}

284
rt_err_t rt_workqueue_critical_work(struct rt_workqueue *queue, struct rt_work *work)
285
{
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
    rt_base_t level;
    RT_ASSERT(queue != RT_NULL);
    RT_ASSERT(work != RT_NULL);

    level = rt_hw_interrupt_disable();
    if (queue->work_current == work)
    {
        rt_hw_interrupt_enable(level);
        return -RT_EBUSY;
    }

    /* NOTE: the work MUST be initialized firstly */
    rt_list_remove(&(work->list));

    rt_list_insert_after(queue->work_list.prev, &(work->list));
    if (queue->work_current == RT_NULL)
    {
        rt_hw_interrupt_enable(level);
        /* resume work thread */
        rt_thread_resume(queue->work_thread);
        rt_schedule();
    }
    else rt_hw_interrupt_enable(level);

    return RT_EOK;
311 312
}

313
rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
314
{
315 316
    RT_ASSERT(queue != RT_NULL);
    RT_ASSERT(work != RT_NULL);
317

318
    if (work->type & RT_WORK_TYPE_DELAYED)
319
    {
320
        return _workqueue_cancel_delayed_work(work);
321 322 323 324
    }
    else
    {
        return _workqueue_cancel_work(queue, work);
325
    }
326 327
}

328
rt_err_t rt_workqueue_cancel_work_sync(struct rt_workqueue *queue, struct rt_work *work)
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
{
    rt_base_t level;

    RT_ASSERT(queue != RT_NULL);
    RT_ASSERT(work != RT_NULL);

    level = rt_hw_interrupt_disable();
    if (queue->work_current == work) /* it's current work in the queue */
    {
        /* wait for work completion */
        rt_sem_take(&(queue->sem), RT_WAITING_FOREVER);
    }
    else
    {
        rt_list_remove(&(work->list));
    }
345
    work->flags &= ~RT_WORK_STATE_PENDING;
346 347 348 349 350
    rt_hw_interrupt_enable(level);

    return RT_EOK;
}

351
rt_err_t rt_workqueue_cancel_all_work(struct rt_workqueue *queue)
352
{
353 354 355 356 357 358 359 360 361 362 363 364
    struct rt_list_node *node, *next;
    RT_ASSERT(queue != RT_NULL);

    rt_enter_critical();
    for (node = queue->work_list.next; node != &(queue->work_list); node = next)
    {
        next = node->next;
        rt_list_remove(node);
    }
    rt_exit_critical();

    return RT_EOK;
365 366
}

367 368 369
void rt_delayed_work_init(struct rt_delayed_work *work, void (*work_func)(struct rt_work *work,
                          void *work_data), void *work_data)
{
370
    rt_work_init(&work->work, work_func, work_data);
371
}
372

373 374 375 376 377 378 379 380 381 382 383 384 385
#ifdef RT_USING_SYSTEM_WORKQUEUE
static struct rt_workqueue *sys_workq;

rt_err_t rt_work_submit(struct rt_work *work, rt_tick_t time)
{
    return rt_workqueue_submit_work(sys_workq, work, time);
}

rt_err_t rt_work_cancel(struct rt_work *work)
{
    return rt_workqueue_cancel_work(sys_workq, work);
}

386
int rt_work_sys_workqueue_init(void)
387
{
388 389 390
    if (sys_workq != RT_NULL)
        return 0;

391
    sys_workq = rt_workqueue_create("sys_work", RT_SYSTEM_WORKQUEUE_STACKSIZE,
392 393 394 395 396
                                    RT_SYSTEM_WORKQUEUE_PRIORITY);

    return RT_EOK;
}

397
INIT_PREV_EXPORT(rt_work_sys_workqueue_init);
398 399
#endif
#endif