[smart] Replace irq disable/enable codes in futex (#7941)

Signed-off-by: Shell <smokewood@qq.com>
This commit is contained in:
Shell 2023-10-14 13:07:45 +08:00 committed by GitHub
parent 5d16042765
commit 4158c8e88e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 508 additions and 169 deletions

View File

@ -6,6 +6,10 @@ menuconfig RT_USING_LWP
The lwP is a light weight process running in user mode.
if RT_USING_LWP
config LWP_DEBUG
bool "Enable debugging features of LwP"
default n
config RT_LWP_MAX_NR
int "The max number of light-weight process"
default 30

View File

@ -1090,7 +1090,7 @@ static void _lwp_thread_entry(void *parameter)
if (lwp->debug)
{
lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
*(uint32_t *)lwp->text_entry = dbg_get_ins();
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
icache_invalid_all();

View File

@ -85,7 +85,7 @@ struct rt_lwp
#ifdef ARCH_MM_MPU
struct rt_mpu_info mpu_info;
#endif /* ARCH_MM_MPU */
#endif
#endif /* ARCH_MM_MMU */
#ifdef RT_USING_SMP
int bind_cpu;
@ -109,7 +109,7 @@ struct rt_lwp
void *data_entry;
uint32_t data_size;
int ref;
rt_atomic_t ref;
void *args;
uint32_t args_length;
pid_t pid;
@ -119,7 +119,7 @@ struct rt_lwp
rt_list_t t_grp;
rt_list_t timer; /* POSIX timer object binding to a process */
int leader; /*boolean value for session group_leader*/
int leader; /* boolean value for session group_leader*/
struct dfs_fdtable fdt;
char cmd[RT_NAME_MAX];
@ -135,8 +135,11 @@ struct rt_lwp
struct lwp_avl_struct *address_search_head; /* for addressed object fast search */
char working_directory[DFS_PATH_MAX];
int debug;
uint32_t bak_first_ins;
rt_uint32_t bak_first_inst; /* backup of first instruction */
struct rt_mutex lwp_lock;
rt_slist_t signalfd_notify_head;
@ -186,6 +189,9 @@ int lwp_setaffinity(pid_t pid, int cpu);
/* ctime lwp API */
int timer_list_free(rt_list_t *timer_list);
struct rt_futex;
rt_err_t lwp_futex(struct rt_lwp *lwp, struct rt_futex *futex, int *uaddr, int op, int val, const struct timespec *timeout);
#ifdef ARCH_MM_MMU
struct __pthread {
/* Part 1 -- these fields may be external or
@ -230,16 +236,6 @@ struct __pthread {
};
#endif
/* for futex op */
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
/* for pmutex op */
#define PMUTEX_INIT 0
#define PMUTEX_LOCK 1
#define PMUTEX_UNLOCK 2
#define PMUTEX_DESTROY 3
#ifdef __cplusplus
}
#endif

View File

@ -1,12 +1,22 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021/01/02 bernard the first version
* 2023-07-25 Shell Remove usage of rt_hw_interrupt API in the lwp
* Coding style: remove multiple `return` in a routine
* 2023-08-08 Shell Fix return value of futex(wait); Fix ops that only
* FUTEX_PRIVATE is supported currently
*/
#define DBG_TAG "lwp.futex"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "lwp_internal.h"
#include "lwp_pid.h"
#include <rtthread.h>
#include <lwp.h>
@ -23,27 +33,24 @@ struct rt_futex
struct rt_object *custom_obj;
};
static struct rt_mutex _futex_lock;
static int futex_system_init(void)
{
rt_mutex_init(&_futex_lock, "futexList", RT_IPC_FLAG_FIFO);
return 0;
}
INIT_PREV_EXPORT(futex_system_init);
rt_err_t futex_destory(void *data)
/* must have futex address_search_head taken */
static rt_err_t _futex_destroy_locked(void *data)
{
rt_err_t ret = -1;
rt_base_t level;
struct rt_futex *futex = (struct rt_futex *)data;
if (futex)
{
level = rt_hw_interrupt_disable();
/* remove futex from futext avl */
/**
* Brief: Delete the futex from lwp address_search_head
*
* Note: Critical Section
* - the lwp (READ. share by thread)
* - the lwp address_search_head (RW. protected by caller. for destroy
* routine, it's always safe because it has already taken a write lock
* to the lwp.)
*/
lwp_avl_remove(&futex->node, (struct lwp_avl_struct **)futex->node.data);
rt_hw_interrupt_enable(level);
/* release object */
rt_free(futex);
@ -52,43 +59,79 @@ rt_err_t futex_destory(void *data)
return ret;
}
struct rt_futex *futex_create(int *uaddr, struct rt_lwp *lwp)
/* must have futex address_search_head taken */
static struct rt_futex *_futex_create_locked(int *uaddr, struct rt_lwp *lwp)
{
struct rt_futex *futex = RT_NULL;
struct rt_object *obj = RT_NULL;
if (!lwp)
/**
* Brief: Create a futex under current lwp
*
* Note: Critical Section
* - lwp (READ; share with thread)
*/
if (lwp)
{
return RT_NULL;
}
futex = (struct rt_futex *)rt_malloc(sizeof(struct rt_futex));
if (!futex)
{
return RT_NULL;
}
obj = rt_custom_object_create("futex", (void *)futex, futex_destory);
if (!obj)
{
rt_free(futex);
return RT_NULL;
}
futex = (struct rt_futex *)rt_malloc(sizeof(struct rt_futex));
if (futex)
{
obj = rt_custom_object_create("futex", (void *)futex, _futex_destroy_locked);
if (!obj)
{
rt_free(futex);
futex = RT_NULL;
}
else
{
/**
* Brief: Add futex to user object tree for resource recycling
*
* Note: Critical Section
* - lwp user object tree (RW; protected by API)
* - futex (if the adding is successful, others can find the
* unready futex. However, only the lwp_free will do this,
* and this is protected by the ref taken by the lwp thread
* that the lwp_free will never execute at the same time)
*/
if (lwp_user_object_add(lwp, obj))
{
rt_object_delete(obj);
rt_free(futex);
futex = RT_NULL;
}
else
{
futex->uaddr = uaddr;
futex->node.avl_key = (avl_key_t)uaddr;
futex->node.data = &lwp->address_search_head;
futex->custom_obj = obj;
rt_list_init(&(futex->waiting_thread));
futex->uaddr = uaddr;
futex->node.avl_key = (avl_key_t)uaddr;
futex->node.data = &lwp->address_search_head;
futex->custom_obj = obj;
rt_list_init(&(futex->waiting_thread));
/* insert into futex head */
lwp_avl_insert(&futex->node, &lwp->address_search_head);
/**
* Brief: Insert into futex head
*
* Note: Critical Section
* - lwp address_search_head (RW; protected by caller)
*/
lwp_avl_insert(&futex->node, &lwp->address_search_head);
}
}
}
}
return futex;
}
static struct rt_futex *futex_get(void *uaddr, struct rt_lwp *lwp)
/* must have futex address_search_head taken */
static struct rt_futex *_futex_get_locked(void *uaddr, struct rt_lwp *lwp)
{
struct rt_futex *futex = RT_NULL;
struct lwp_avl_struct *node = RT_NULL;
/**
* Note: Critical Section
* protect lwp address_search_head (READ)
*/
node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
if (!node)
{
@ -98,162 +141,213 @@ static struct rt_futex *futex_get(void *uaddr, struct rt_lwp *lwp)
return futex;
}
int futex_wait(struct rt_futex *futex, int value, const struct timespec *timeout)
static int _futex_wait(struct rt_futex *futex, struct rt_lwp *lwp, int value, const struct timespec *timeout)
{
rt_base_t level = 0;
rt_thread_t thread;
rt_err_t ret = -RT_EINTR;
/**
* Brief: Remove current thread from scheduler, besides appends it to
* the waiting thread list of the futex. If the timeout is specified
* a timer will be setup for current thread
*
* Note: Critical Section
* - futex (RW; Protected by lwp_lock)
* - the local cpu
*/
LWP_LOCK(lwp);
if (*(futex->uaddr) == value)
{
rt_thread_t thread = rt_thread_self();
thread = rt_thread_self();
rt_enter_critical();
level = rt_hw_interrupt_disable();
ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
if (ret < 0)
if (ret == RT_EOK)
{
rt_mutex_release(&_futex_lock);
rt_hw_interrupt_enable(level);
rt_set_errno(EINTR);
return ret;
}
/**
* Brief: Add current thread into futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
rt_list_insert_before(&(futex->waiting_thread), &(thread->tlist));
/* add into waiting thread list */
rt_list_insert_before(&(futex->waiting_thread), &(thread->tlist));
/* with timeout */
if (timeout)
{
rt_int32_t time = timeout->tv_sec * RT_TICK_PER_SECOND + timeout->tv_nsec * RT_TICK_PER_SECOND / NANOSECOND_PER_SECOND;
if (time < 0)
if (timeout)
{
time = 0;
/* start the timer of thread */
rt_int32_t time = timeout->tv_sec * RT_TICK_PER_SECOND + timeout->tv_nsec * RT_TICK_PER_SECOND / NANOSECOND_PER_SECOND;
if (time < 0)
{
time = 0;
}
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&time);
rt_timer_start(&(thread->thread_timer));
}
/* start the timer of thread */
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&time);
rt_timer_start(&(thread->thread_timer));
}
rt_mutex_release(&_futex_lock);
rt_hw_interrupt_enable(level);
else
{
ret = EINTR;
}
/* do schedule */
rt_schedule();
LWP_UNLOCK(lwp);
rt_exit_critical();
ret = thread->error;
/* check errno */
if (ret == RT_EOK)
{
/* do schedule */
rt_schedule();
/* check errno */
ret = rt_get_errno();
}
ret = ret > 0 ? -ret : ret;
switch (ret)
{
case RT_EOK:
ret = 0;
break;
case -RT_EINTR:
ret = -EINTR;
break;
default:
ret = -EAGAIN;
break;
}
}
else
{
rt_mutex_release(&_futex_lock);
LWP_UNLOCK(lwp);
ret = -EAGAIN;
rt_set_errno(EAGAIN);
}
return ret;
}
void futex_wake(struct rt_futex *futex, int number)
static long _futex_wake(struct rt_futex *futex, struct rt_lwp *lwp, int number)
{
rt_base_t level = rt_hw_interrupt_disable();
while (!rt_list_isempty(&(futex->waiting_thread)) && number)
long woken_cnt = 0;
int is_empty = 0;
rt_thread_t thread;
/**
* Brief: Wakeup a suspended thread on the futex waiting thread list
*
* Note: Critical Section
* - the futex waiting_thread list (RW)
*/
while (number && !is_empty)
{
rt_thread_t thread;
LWP_LOCK(lwp);
is_empty = rt_list_isempty(&(futex->waiting_thread));
if (!is_empty)
{
thread = rt_list_entry(futex->waiting_thread.next, struct rt_thread, tlist);
/* remove from waiting list */
rt_list_remove(&(thread->tlist));
thread = rt_list_entry(futex->waiting_thread.next, struct rt_thread, tlist);
/* remove from waiting list */
rt_list_remove(&(thread->tlist));
thread->error = RT_EOK;
/* resume the suspended thread */
rt_thread_resume(thread);
thread->error = RT_EOK;
/* resume the suspended thread */
rt_thread_resume(thread);
number--;
number--;
woken_cnt++;
}
LWP_UNLOCK(lwp);
}
rt_mutex_release(&_futex_lock);
rt_hw_interrupt_enable(level);
/* do schedule */
rt_schedule();
return woken_cnt;
}
#include <syscall_generic.h>
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
int *uaddr2, int val3)
rt_inline rt_bool_t _timeout_ignored(int op)
{
struct rt_lwp *lwp = RT_NULL;
struct rt_futex *futex = RT_NULL;
int ret = 0;
rt_err_t lock_ret = 0;
if (!lwp_user_accessable(uaddr, sizeof(int)))
{
rt_set_errno(EINVAL);
return -RT_EINVAL;
}
/**
* if (op & (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI)) was TRUE
* `timeout` should be ignored by implementation, according to POSIX futex(2) manual.
* since only FUTEX_WAKE is implemented in rt-smart, only FUTEX_WAKE was omitted currently
*/
if (timeout && !(op & (FUTEX_WAKE)))
return (op & (FUTEX_WAKE));
}
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
int *uaddr2, int val3)
{
struct rt_lwp *lwp = RT_NULL;
struct rt_futex *futex = RT_NULL;
sysret_t ret = 0;
if (!lwp_user_accessable(uaddr, sizeof(int)))
{
if (!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
{
rt_set_errno(EINVAL);
return -RT_EINVAL;
}
ret = -EINVAL;
}
lock_ret = rt_mutex_take_interruptible(&_futex_lock, RT_WAITING_FOREVER);
if (lock_ret != RT_EOK)
else if (timeout && !_timeout_ignored(op) && !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
{
rt_set_errno(EAGAIN);
return -RT_EINTR;
ret = -EINVAL;
}
lwp = lwp_self();
futex = futex_get(uaddr, lwp);
if (futex == RT_NULL)
else
{
/* create a futex according to this uaddr */
futex = futex_create(uaddr, lwp);
if (futex == RT_NULL)
{
rt_mutex_release(&_futex_lock);
rt_set_errno(ENOMEM);
return -RT_ENOMEM;
}
if (lwp_user_object_add(lwp, futex->custom_obj) != 0)
{
rt_custom_object_destroy(futex->custom_obj);
rt_mutex_release(&_futex_lock);
rt_set_errno(ENOMEM);
return -RT_ENOMEM;
}
}
switch (op)
{
case FUTEX_WAIT:
ret = futex_wait(futex, val, timeout);
/* _futex_lock is released by futex_wait */
break;
case FUTEX_WAKE:
futex_wake(futex, val);
/* _futex_lock is released by futex_wake */
break;
default:
rt_mutex_release(&_futex_lock);
rt_set_errno(ENOSYS);
ret = -ENOSYS;
break;
lwp = lwp_self();
ret = lwp_futex(lwp, futex, uaddr, op, val, timeout);
}
return ret;
}
rt_err_t lwp_futex(struct rt_lwp *lwp, struct rt_futex *futex, int *uaddr, int op, int val, const struct timespec *timeout)
{
rt_err_t rc = 0;
/**
* Brief: Check if the futex exist, otherwise create a new one
*
* Note: Critical Section
* - lwp address_search_head (READ)
*/
LWP_LOCK(lwp);
futex = _futex_get_locked(uaddr, lwp);
if (futex == RT_NULL)
{
/* create a futex according to this uaddr */
futex = _futex_create_locked(uaddr, lwp);
if (futex == RT_NULL)
{
rc = -ENOMEM;
}
}
LWP_UNLOCK(lwp);
if (!rc)
{
if (!(op & FUTEX_PRIVATE))
rc = -ENOSYS;
else
{
op &= ~FUTEX_PRIVATE;
switch (op)
{
case FUTEX_WAIT:
rc = _futex_wait(futex, lwp, val, timeout);
break;
case FUTEX_WAKE:
rc = _futex_wake(futex, lwp, val);
break;
default:
LOG_W("User require op=%d which is not implemented", op);
rc = -ENOSYS;
break;
}
}
}
return rc;
}

View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
*/
#define DBG_TAG "lwp.internal"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <stdlib.h>
#include "lwp_internal.h"
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
{
DEF_RETURN_CODE(rc);
int retry;
rt_int32_t effect_timeout;
#ifdef LWP_DEBUG
rt_thread_t thread = rt_thread_self();
#endif
if (mtx)
{
effect_timeout = timeout;
#if DBG_LVL == DBG_LOG && defined(LWP_DEBUG)
int exception;
rt_list_t *node = RT_NULL;
struct rt_mutex *tak_obj = RT_NULL;
if (!rt_list_isempty(&(thread->taken_object_list)) && timeout == RT_WAITING_FOREVER)
{
exception = 1;
effect_timeout = 0;
}
else
{
exception = 0;
}
#endif /* DBG_LOG && defined(LWP_DEBUG) */
do {
retry = 0;
if (interruptable)
rc = rt_mutex_take_interruptible(mtx, effect_timeout);
else
rc = rt_mutex_take(mtx, effect_timeout);
#ifdef LWP_DEBUG
if (rc == RT_EOK)
{
if (rt_mutex_get_hold(mtx) > 1)
{
LOG_W("Already hold the lock");
}
}
else if (rc == -RT_ETIMEOUT)
{
#if DBG_LVL == DBG_LOG
if (exception)
{
rt_list_for_each(node, &(thread->taken_object_list))
{
tak_obj = rt_list_entry(node, struct rt_mutex, taken_list);
if (rt_mutex_get_owner(tak_obj)->stat & RT_THREAD_SUSPEND_MASK)
LOG_D("Potential dead lock - Taken: %s, Try take: %s",
tak_obj->parent.parent.name, mtx->parent.parent.name);
}
rt_backtrace();
retry = 1;
exception = 0;
}
#endif
}
else if (rc != -RT_EINTR)
{
char tname[RT_NAME_MAX];
rt_thread_get_name(thread, tname, sizeof(tname));
LOG_W("Possible kernel corruption detected on thread %s with errno %ld", tname, rc);
}
#endif /* LWP_DEBUG */
} while (retry);
}
else
{
LOG_W("%s: mtx should not be NULL", __func__);
RT_ASSERT(0);
}
RETURN(rc);
}
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
{
DEF_RETURN_CODE(rc);
rc = _mutex_take_safe(mtx, timeout, interruptable);
RETURN(rc);
}
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
{
DEF_RETURN_CODE(rc);
rc = rt_mutex_release(mtx);
if (rc)
{
LOG_I("%s: release failed with code %ld", __func__, rc);
}
RETURN(rc);
}
rt_err_t lwp_critical_enter(struct rt_lwp *lwp)
{
rt_err_t rc;
rc = lwp_mutex_take_safe(&lwp->lwp_lock, RT_WAITING_FOREVER, 0);
/* if current process is force killed */
if (rc != RT_EOK)
{
if (rc == -RT_EINTR && lwp_self() != RT_NULL)
sys_exit(EXIT_SUCCESS);
else
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_critical_exit(struct rt_lwp *lwp)
{
return lwp_mutex_release_safe(&lwp->lwp_lock);
}

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
*/
#ifndef __LWP_INTERNAL_H__
#define __LWP_INTERNAL_H__
#include <rtthread.h>
#include "lwp.h"
#include "libc_musl.h"
struct rt_lwp;
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable);
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx);
#ifdef RT_USING_SMP
#define LOCAL_IRQ_MASK() rt_hw_local_irq_disable()
#define LOCAL_IRQ_UNMASK(level) rt_hw_local_irq_enable(level)
#else
#define LOCAL_IRQ_MASK() rt_hw_interrupt_disable()
#define LOCAL_IRQ_UNMASK(level) rt_hw_interrupt_enable(level)
#endif
#ifndef LWP_USING_CPUS_LOCK
rt_err_t lwp_critical_enter(struct rt_lwp *lwp);
rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
#define LWP_LOCK(lwp) \
do { \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define LWP_UNLOCK(lwp) \
do { \
if (lwp_critical_exit(lwp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#else
#define LWP_LOCK(lwp) rt_base_t level = rt_hw_interrupt_disable()
#define LWP_UNLOCK(lwp) rt_hw_interrupt_enable(level)
#endif /* LWP_USING_CPUS_LOCK */
/* cpus lock */
#ifdef LWP_OVERRIDE_CPUS_LOCK
#undef rt_hw_interrupt_disable
#undef rt_hw_interrupt_enable
#define rt_hw_interrupt_disable() ({ \
rt_base_t irq = rt_hw_interrupt_is_disabled(); \
if (irq) \
{ \
LOG_W("Nested interrupt disable"); \
rt_backtrace(); \
irq = 0xabadcafe; \
} else { \
irq = rt_cpus_lock(); \
} \
irq; \
})
#define rt_hw_interrupt_enable(level) do { \
if (level != 0xabadcafe) \
rt_cpus_unlock(level); \
} while (0)
#endif /* LWP_OVERRIDE_CPUS_LOCK */
/**
* @brief Return code with safety check
* There tend to be chances where a return value is returned without correctly init
*/
#ifndef LWP_DEBUG
#define DEF_RETURN_CODE(name) rt_err_t name
#define RETURN(name) return name
#else
#define UNINITIALIZED 0xbeefcafe
#define DEF_RETURN_CODE(name) rt_err_t name = UNINITIALIZED
#define RETURN(name) {RT_ASSERT(name != UNINITIALIZED);return name;}
#endif /* LWP_DEBUG */
#endif /* __LWP_INTERNAL_H__ */

View File

@ -331,6 +331,7 @@ rt_lwp_t lwp_create(rt_base_t flags)
lwp_user_object_lock_init(new_lwp);
rt_wqueue_init(&new_lwp->wait_queue);
lwp_signal_init(&new_lwp->signal);
rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
/* lwp with pid */
if (flags & LWP_CREATE_FLAG_ALLOC_PID)
@ -392,6 +393,8 @@ void lwp_free(struct rt_lwp* lwp)
lwp_user_object_clear(lwp);
lwp_user_object_lock_destroy(lwp);
RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
rt_mutex_detach(&lwp->lwp_lock);
/* free data section */
if (lwp->data_entry != RT_NULL)

View File

@ -9,12 +9,13 @@
* 2022/12/18 bernard fix the _m_lock to tid in user land.
*/
#include "lwp_internal.h"
#include <rtthread.h>
#include <lwp.h>
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif
#include <sys/time.h>
#include <syscall_generic.h>
#define PMUTEX_NORMAL 0 /* Unable to recursion */
#define PMUTEX_RECURSIVE 1 /* Can be recursion */
@ -423,8 +424,6 @@ static int _pthread_mutex_destroy(void *umutex)
return 0;
}
#include <syscall_generic.h>
sysret_t sys_pmutex(void *umutex, int op, void *arg)
{
int ret = -EINVAL;

View File

@ -1021,7 +1021,7 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
&time);
rt_timer_start(&(thread->thread_timer));
}

View File

@ -30,8 +30,8 @@
#include "syscall_generic.h"
#include <lwp.h>
#include "lwp_signal.h"
#include "libc_musl.h"
#include "lwp_internal.h"
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#include <lwp_arch.h>
@ -343,7 +343,7 @@ sysret_t sys_exit_group(int value)
tid->clear_child_tid = RT_NULL;
lwp_put_to_user(clear_child_tid, &t, sizeof t);
sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
}
lwp_terminate(lwp);

View File

@ -384,6 +384,16 @@ rt_err_t rt_mutex_take_interruptible(rt_mutex_t mutex, rt_int32_t time);
rt_err_t rt_mutex_take_killable(rt_mutex_t mutex, rt_int32_t time);
rt_err_t rt_mutex_release(rt_mutex_t mutex);
rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg);
rt_inline rt_thread_t rt_mutex_get_owner(rt_mutex_t mutex)
{
return mutex->owner;
}
rt_inline rt_ubase_t rt_mutex_get_hold(rt_mutex_t mutex)
{
return mutex->hold;
}
#endif /* RT_USING_MUTEX */
#ifdef RT_USING_EVENT