mirror of
https://github.com/RT-Thread/rt-thread.git
synced 2026-02-06 17:12:01 +08:00
[smart] fixup of lwp recycling and mm varea (#8206)
Signed-off-by: shell <wangxiaoyao@rt-thread.com> Signed-off-by: Shell <smokewood@qq.com> Co-authored-by: xqyjlj <xqyjlj@126.com>
This commit is contained in:
@@ -295,6 +295,10 @@ void fdt_fd_release(struct dfs_fdtable *fdt, int fd)
|
||||
if (file && file->ref_count == 1)
|
||||
{
|
||||
rt_mutex_detach(&file->pos_lock);
|
||||
if (file->mmap_context)
|
||||
{
|
||||
rt_free(file->mmap_context);
|
||||
}
|
||||
rt_free(file);
|
||||
}
|
||||
else
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
#define PMUTEX_DESTROY 3
|
||||
|
||||
/* for sys/mman.h */
|
||||
#define MAP_FAILED ((void *)-1)
|
||||
|
||||
#define MAP_SHARED 0x01
|
||||
#define MAP_PRIVATE 0x02
|
||||
|
||||
@@ -1157,6 +1157,10 @@ rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
|
||||
LWP_UNLOCK(parent);
|
||||
|
||||
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
|
||||
/* parent holds reference to child */
|
||||
lwp_ref_inc(parent);
|
||||
/* child holds reference to parent */
|
||||
lwp_ref_inc(child);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1178,6 +1182,8 @@ rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
|
||||
LWP_UNLOCK(parent);
|
||||
|
||||
LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
|
||||
lwp_ref_dec(child);
|
||||
lwp_ref_dec(parent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1195,7 +1201,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
|
||||
|
||||
if (filename == RT_NULL)
|
||||
{
|
||||
return -RT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (access(filename, X_OK) != 0)
|
||||
@@ -1208,7 +1214,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
|
||||
if (lwp == RT_NULL)
|
||||
{
|
||||
dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
|
||||
return -RT_ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
|
||||
|
||||
|
||||
@@ -171,7 +171,6 @@ char *lwp_getcwd(void);
|
||||
void lwp_request_thread_exit(rt_thread_t thread_to_exit);
|
||||
int lwp_check_exit_request(void);
|
||||
void lwp_terminate(struct rt_lwp *lwp);
|
||||
void lwp_wait_subthread_exit(void);
|
||||
|
||||
int lwp_tid_init(void);
|
||||
int lwp_tid_get(void);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -85,6 +85,9 @@ rt_inline void lwp_from_pid_release_lock(struct rt_lwp *lwp)
|
||||
lwp_ref_dec(lwp);
|
||||
}
|
||||
|
||||
void lwp_thread_exit(rt_thread_t thread, rt_base_t status);
|
||||
void lwp_exit(struct rt_lwp *lwp, rt_base_t status);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -298,12 +298,12 @@ static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
|
||||
lwp_mutex_release_safe(&_pmutex_lock);
|
||||
return -EDEADLK;
|
||||
}
|
||||
lwp_mutex_release_safe(&_pmutex_lock);
|
||||
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
|
||||
if (lock_ret == RT_EOK)
|
||||
{
|
||||
umutex_p->_m_lock = rt_thread_self()->tid;
|
||||
}
|
||||
lwp_mutex_release_safe(&_pmutex_lock);
|
||||
break;
|
||||
default: /* unknown type */
|
||||
return -EINVAL;
|
||||
|
||||
@@ -764,7 +764,6 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
|
||||
rt_list_t *thread_list;
|
||||
rt_err_t ret = RT_EOK;
|
||||
|
||||
|
||||
if (lwp)
|
||||
{
|
||||
/** acquire READ access to lwp */
|
||||
|
||||
@@ -328,104 +328,35 @@ static void _crt_thread_entry(void *parameter)
|
||||
/* exit group */
|
||||
sysret_t sys_exit_group(int value)
|
||||
{
|
||||
rt_thread_t tid, main_thread;
|
||||
struct rt_lwp *lwp;
|
||||
sysret_t rc = 0;
|
||||
struct rt_lwp *lwp = lwp_self();
|
||||
|
||||
tid = rt_thread_self();
|
||||
lwp = (struct rt_lwp *)tid->lwp;
|
||||
LOG_D("process(%p) exit.", lwp);
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
if (tid->clear_child_tid)
|
||||
if (lwp)
|
||||
lwp_exit(lwp, value);
|
||||
else
|
||||
{
|
||||
int t = 0;
|
||||
int *clear_child_tid = tid->clear_child_tid;
|
||||
|
||||
tid->clear_child_tid = RT_NULL;
|
||||
lwp_put_to_user(clear_child_tid, &t, sizeof t);
|
||||
sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
|
||||
LOG_E("Can't find matching process of current thread");
|
||||
rc = -EINVAL;
|
||||
}
|
||||
lwp_terminate(lwp);
|
||||
|
||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||
if (main_thread == tid)
|
||||
{
|
||||
lwp_wait_subthread_exit();
|
||||
lwp->lwp_ret = LWP_CREATE_STAT(value);
|
||||
}
|
||||
#else
|
||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||
if (main_thread == tid)
|
||||
{
|
||||
rt_thread_t sub_thread;
|
||||
rt_list_t *list;
|
||||
|
||||
lwp_terminate(lwp);
|
||||
|
||||
/* delete all subthread */
|
||||
while ((list = tid->sibling.prev) != &lwp->t_grp)
|
||||
{
|
||||
sub_thread = rt_list_entry(list, struct rt_thread, sibling);
|
||||
rt_list_remove(&sub_thread->sibling);
|
||||
rt_thread_delete(sub_thread);
|
||||
}
|
||||
lwp->lwp_ret = value;
|
||||
}
|
||||
#endif /* ARCH_MM_MMU */
|
||||
|
||||
/**
|
||||
* Note: the tid tree always hold a reference to thread, hence the tid must
|
||||
* be release before cleanup of thread
|
||||
*/
|
||||
lwp_tid_put(tid->tid);
|
||||
tid->tid = 0;
|
||||
rt_list_remove(&tid->sibling);
|
||||
rt_thread_delete(tid);
|
||||
rt_schedule();
|
||||
|
||||
/* never reach here */
|
||||
RT_ASSERT(0);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* thread exit */
|
||||
void sys_exit(int status)
|
||||
sysret_t sys_exit(int status)
|
||||
{
|
||||
rt_thread_t tid, main_thread;
|
||||
struct rt_lwp *lwp;
|
||||
|
||||
LOG_D("thread exit");
|
||||
sysret_t rc = 0;
|
||||
rt_thread_t tid;
|
||||
|
||||
tid = rt_thread_self();
|
||||
lwp = (struct rt_lwp *)tid->lwp;
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
if (tid->clear_child_tid)
|
||||
if (tid && tid->lwp)
|
||||
lwp_thread_exit(tid, status);
|
||||
{
|
||||
int t = 0;
|
||||
int *clear_child_tid = tid->clear_child_tid;
|
||||
|
||||
tid->clear_child_tid = RT_NULL;
|
||||
lwp_put_to_user(clear_child_tid, &t, sizeof t);
|
||||
sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
|
||||
LOG_E("Can't find matching process of current thread");
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||
if (main_thread == tid && tid->sibling.prev == &lwp->t_grp)
|
||||
{
|
||||
lwp_terminate(lwp);
|
||||
lwp_wait_subthread_exit();
|
||||
lwp->lwp_ret = LWP_CREATE_STAT(status);
|
||||
}
|
||||
#endif /* ARCH_MM_MMU */
|
||||
|
||||
lwp_tid_put(tid->tid);
|
||||
tid->tid = 0;
|
||||
rt_list_remove(&tid->sibling);
|
||||
rt_thread_delete(tid);
|
||||
rt_schedule();
|
||||
|
||||
return;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* syscall: "read" ret: "ssize_t" args: "int" "void *" "size_t" */
|
||||
@@ -1174,18 +1105,28 @@ sysret_t sys_getpid(void)
|
||||
/* syscall: "getpriority" ret: "int" args: "int" "id_t" */
|
||||
sysret_t sys_getpriority(int which, id_t who)
|
||||
{
|
||||
long prio = 0xff;
|
||||
|
||||
if (which == PRIO_PROCESS)
|
||||
{
|
||||
rt_thread_t tid;
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
|
||||
tid = rt_thread_self();
|
||||
if (who == (id_t)(rt_size_t)tid || who == 0xff)
|
||||
lwp_pid_lock_take();
|
||||
if(who == 0)
|
||||
lwp = lwp_self();
|
||||
else
|
||||
lwp = lwp_from_pid_locked(who);
|
||||
|
||||
if (lwp)
|
||||
{
|
||||
return tid->current_priority;
|
||||
rt_thread_t thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||
prio = thread->current_priority;
|
||||
}
|
||||
|
||||
lwp_pid_lock_release();
|
||||
}
|
||||
|
||||
return 0xff;
|
||||
return prio;
|
||||
}
|
||||
|
||||
/* syscall: "setpriority" ret: "int" args: "int" "id_t" "int" */
|
||||
@@ -1193,14 +1134,30 @@ sysret_t sys_setpriority(int which, id_t who, int prio)
|
||||
{
|
||||
if (which == PRIO_PROCESS)
|
||||
{
|
||||
rt_thread_t tid;
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
|
||||
tid = rt_thread_self();
|
||||
if ((who == (id_t)(rt_size_t)tid || who == 0xff) && (prio >= 0 && prio < RT_THREAD_PRIORITY_MAX))
|
||||
lwp_pid_lock_take();
|
||||
if(who == 0)
|
||||
lwp = lwp_self();
|
||||
else
|
||||
lwp = lwp_from_pid_locked(who);
|
||||
|
||||
if (lwp && prio >= 0 && prio < RT_THREAD_PRIORITY_MAX)
|
||||
{
|
||||
rt_thread_control(tid, RT_THREAD_CTRL_CHANGE_PRIORITY, &prio);
|
||||
rt_list_t *list;
|
||||
rt_thread_t thread;
|
||||
for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
|
||||
{
|
||||
thread = rt_list_entry(list, struct rt_thread, sibling);
|
||||
rt_thread_control(thread, RT_THREAD_CTRL_CHANGE_PRIORITY, &prio);
|
||||
}
|
||||
lwp_pid_lock_release();
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
lwp_pid_lock_release();
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
@@ -2790,6 +2747,7 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
|
||||
* Since no other threads can access the lwp field, it't uneccessary to
|
||||
* take a lock here
|
||||
*/
|
||||
RT_ASSERT(rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling) == thread);
|
||||
|
||||
strncpy(thread->parent.name, run_name + last_backslash, RT_NAME_MAX);
|
||||
strncpy(lwp->cmd, new_lwp->cmd, RT_NAME_MAX);
|
||||
@@ -5560,7 +5518,6 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
|
||||
struct sched_param *sched_param = RT_NULL;
|
||||
rt_thread_t thread = RT_NULL;
|
||||
|
||||
|
||||
if (!lwp_user_accessable(param, sizeof(struct sched_param)))
|
||||
{
|
||||
return -EFAULT;
|
||||
|
||||
@@ -48,7 +48,7 @@ typedef uint32_t id_t; /* may contain pid, uid or gid */
|
||||
const char *lwp_get_syscall_name(rt_uint32_t number);
|
||||
const void *lwp_get_sys_api(rt_uint32_t number);
|
||||
|
||||
void sys_exit(int value);
|
||||
sysret_t sys_exit(int value);
|
||||
sysret_t sys_exit_group(int status);
|
||||
ssize_t sys_read(int fd, void *buf, size_t nbyte);
|
||||
ssize_t sys_write(int fd, const void *buf, size_t nbyte);
|
||||
|
||||
@@ -109,11 +109,67 @@ rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
|
||||
{
|
||||
/* each mapping of page frame in the varea is binding with a reference */
|
||||
rt_page_ref_inc(page_addr, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Private unmapping of address space
|
||||
*/
|
||||
static void _pgmgr_pop_all(rt_varea_t varea)
|
||||
{
|
||||
rt_aspace_t aspace = varea->aspace;
|
||||
char *iter = varea->start;
|
||||
char *end_addr = iter + varea->size;
|
||||
|
||||
RT_ASSERT(iter < end_addr);
|
||||
RT_ASSERT(!((long)iter & ARCH_PAGE_MASK));
|
||||
RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
|
||||
|
||||
for (; iter != end_addr; iter += ARCH_PAGE_SIZE)
|
||||
{
|
||||
void *page_pa = rt_hw_mmu_v2p(aspace, iter);
|
||||
char *page_va = rt_kmem_p2v(page_pa);
|
||||
if (page_pa != ARCH_MAP_FAILED && page_va)
|
||||
{
|
||||
rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void _pgmgr_pop_range(rt_varea_t varea, void *rm_start, void *rm_end)
|
||||
{
|
||||
void *page_va;
|
||||
|
||||
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
|
||||
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
|
||||
while (rm_start != rm_end)
|
||||
{
|
||||
page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
|
||||
|
||||
if (page_va != ARCH_MAP_FAILED)
|
||||
{
|
||||
page_va -= PV_OFFSET;
|
||||
LOG_D("%s: free page %p", __func__, page_va);
|
||||
rt_varea_unmap_page(varea, rm_start);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
rm_start += ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static const char *_anon_get_name(rt_varea_t varea)
|
||||
{
|
||||
return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
|
||||
}
|
||||
|
||||
/**
|
||||
* Migration handler on varea re-construction
|
||||
*/
|
||||
|
||||
static void _anon_varea_open(struct rt_varea *varea)
|
||||
{
|
||||
rt_aspace_anon_ref_inc(varea->mem_obj);
|
||||
@@ -127,7 +183,9 @@ static void _anon_varea_open(struct rt_varea *varea)
|
||||
static void _anon_varea_close(struct rt_varea *varea)
|
||||
{
|
||||
rt_aspace_anon_ref_dec(varea->mem_obj);
|
||||
rt_mm_dummy_mapper.on_varea_close(varea);
|
||||
|
||||
/* unmap and dereference page frames in the varea region */
|
||||
_pgmgr_pop_all(varea);
|
||||
}
|
||||
|
||||
static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
|
||||
@@ -137,21 +195,45 @@ static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_s
|
||||
|
||||
static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
|
||||
{
|
||||
return rt_mm_dummy_mapper.on_varea_shrink(varea, new_start, size);
|
||||
char *varea_start = varea->start;
|
||||
void *rm_start;
|
||||
void *rm_end;
|
||||
|
||||
if (varea_start == (char *)new_start)
|
||||
{
|
||||
rm_start = varea_start + size;
|
||||
rm_end = varea_start + varea->size;
|
||||
}
|
||||
else /* if (varea_start < (char *)new_start) */
|
||||
{
|
||||
RT_ASSERT(varea_start < (char *)new_start);
|
||||
rm_start = varea_start;
|
||||
rm_end = new_start;
|
||||
}
|
||||
|
||||
_pgmgr_pop_range(varea, rm_start, rm_end);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
|
||||
{
|
||||
/* remove the resource in the unmap region, and do nothing for the subset */
|
||||
_pgmgr_pop_range(existed, unmap_start, (char *)unmap_start + unmap_len);
|
||||
|
||||
_anon_varea_open(subset);
|
||||
return rt_mm_dummy_mapper.on_varea_split(existed, unmap_start, unmap_len, subset);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
|
||||
{
|
||||
_anon_varea_close(merge_from);
|
||||
return rt_mm_dummy_mapper.on_varea_merge(merge_to, merge_from);
|
||||
/* do nothing for the varea merge */
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Private mapping of address space
|
||||
*/
|
||||
|
||||
rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
|
||||
struct rt_aspace_fault_msg *msg, char *fault_addr)
|
||||
{
|
||||
@@ -168,6 +250,7 @@ rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
|
||||
}
|
||||
}
|
||||
|
||||
/* page frame inquiry or allocation in backup address space */
|
||||
static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
|
||||
{
|
||||
void *frame_pa;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
#define DBG_TAG "mm.object"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include "rtdbg.h"
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
@@ -31,31 +31,6 @@ static const char *get_name(rt_varea_t varea)
|
||||
return "dummy-mapper";
|
||||
}
|
||||
|
||||
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
|
||||
{
|
||||
/* each mapping of page frame in the varea is binding with a reference */
|
||||
rt_page_ref_inc(page_addr, 0);
|
||||
}
|
||||
|
||||
/* resource recycling of page frames */
|
||||
void rt_varea_pgmgr_pop_all(rt_varea_t varea)
|
||||
{
|
||||
rt_aspace_t aspace = varea->aspace;
|
||||
char *end_addr = varea->start + varea->size;
|
||||
RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
|
||||
|
||||
for (char *iter = varea->start; iter != end_addr; iter += ARCH_PAGE_SIZE)
|
||||
{
|
||||
void *page_pa = rt_hw_mmu_v2p(aspace, iter);
|
||||
char *page_va = rt_kmem_p2v(page_pa);
|
||||
if (page_pa != ARCH_MAP_FAILED && page_va)
|
||||
{
|
||||
rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
|
||||
{
|
||||
void *page;
|
||||
@@ -79,8 +54,6 @@ static void on_varea_open(struct rt_varea *varea)
|
||||
|
||||
static void on_varea_close(struct rt_varea *varea)
|
||||
{
|
||||
/* unmap and dereference page frames in the varea region */
|
||||
rt_varea_pgmgr_pop_all(varea);
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
|
||||
@@ -88,60 +61,18 @@ static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
|
||||
{
|
||||
void *page_va;
|
||||
|
||||
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
|
||||
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
|
||||
while (rm_start != rm_end)
|
||||
{
|
||||
page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
|
||||
|
||||
if (page_va != ARCH_MAP_FAILED)
|
||||
{
|
||||
page_va -= PV_OFFSET;
|
||||
LOG_D("%s: free page %p", __func__, page_va);
|
||||
rt_varea_unmap_page(varea, rm_start);
|
||||
rt_pages_free(page_va, 0);
|
||||
}
|
||||
rm_start += ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
|
||||
{
|
||||
char *varea_start = varea->start;
|
||||
void *rm_start;
|
||||
void *rm_end;
|
||||
|
||||
if (varea_start == (char *)new_start)
|
||||
{
|
||||
rm_start = varea_start + size;
|
||||
rm_end = varea_start + varea->size;
|
||||
}
|
||||
else /* if (varea_start < (char *)new_start) */
|
||||
{
|
||||
RT_ASSERT(varea_start < (char *)new_start);
|
||||
rm_start = varea_start;
|
||||
rm_end = new_start;
|
||||
}
|
||||
|
||||
_remove_pages(varea, rm_start, rm_end);
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
|
||||
{
|
||||
/* remove the resource in the unmap region, and do nothing for the subset */
|
||||
_remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
|
||||
{
|
||||
/* do nothing for the migration */
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
|
||||
@@ -863,8 +863,7 @@ int rt_page_install(rt_region_t region)
|
||||
{
|
||||
int err = -RT_EINVAL;
|
||||
if (region.end != region.start && !(region.start & ARCH_PAGE_MASK) &&
|
||||
!(region.end & ARCH_PAGE_MASK) &&
|
||||
!((region.end - region.start) & shadow_mask))
|
||||
!(region.end & ARCH_PAGE_MASK))
|
||||
{
|
||||
void *head = addr_to_page(page_start, (void *)region.start);
|
||||
void *tail = addr_to_page(page_start, (void *)region.end);
|
||||
|
||||
@@ -105,8 +105,6 @@ void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
|
||||
*/
|
||||
void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
|
||||
|
||||
void rt_varea_pgmgr_pop_all(rt_varea_t varea);
|
||||
|
||||
int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
|
||||
struct rt_aspace_fault_msg *msg,
|
||||
rt_bool_t dont_copy);
|
||||
|
||||
@@ -67,7 +67,7 @@ static void test_user_map_varea(void)
|
||||
uassert_true(varea->start != 0);
|
||||
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
|
||||
|
||||
uassert_true(!lwp_ref_dec(lwp));
|
||||
uassert_true(!(lwp_ref_dec(lwp) - 1));
|
||||
}
|
||||
|
||||
static void test_user_map_varea_ext(void)
|
||||
@@ -91,7 +91,7 @@ static void test_user_map_varea_ext(void)
|
||||
uassert_true(varea->start != 0);
|
||||
uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
|
||||
|
||||
uassert_true(!lwp_ref_dec(lwp));
|
||||
uassert_true(!(lwp_ref_dec(lwp) - 1));
|
||||
}
|
||||
|
||||
static void user_map_varea_tc(void)
|
||||
|
||||
@@ -12,9 +12,9 @@ bsp_path = Dir('#').abspath
|
||||
|
||||
if not os.path.exists(bsp_path + "/link.lds"):
|
||||
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('link.lds', cwd + "/link.lds")
|
||||
# fix the linker with crtx.o
|
||||
Preprocessing("link.lds.S", ".lds", CPPPATH=[bsp_path])
|
||||
|
||||
# fix the linker with crtx.o
|
||||
Env['LINKFLAGS'] += ' -nostartfiles'
|
||||
|
||||
# add common code files
|
||||
|
||||
@@ -373,6 +373,8 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
|
||||
#endif
|
||||
|
||||
#ifdef RT_USING_LWP
|
||||
/* restore normal execution environment */
|
||||
__asm__ volatile("msr daifclr, 0x3\ndmb ishst\nisb\n");
|
||||
_check_fault(regs, 0, "user fault");
|
||||
#endif
|
||||
|
||||
|
||||
Reference in New Issue
Block a user