Sync dfs lwp (#8123)

This commit is contained in:
geniusgogo
2023-10-17 13:07:59 +08:00
committed by GitHub
parent 7d64cdcf58
commit ecd29fda60
79 changed files with 7410 additions and 729 deletions

View File

@@ -193,6 +193,40 @@ if RT_USING_DFS_V1
endif
endif
if RT_USING_DFS_V2
config RT_USING_PAGECACHE
bool "Enable page cache"
default n
if RT_USING_PAGECACHE
menu "page cache config"
config RT_PAGECACHE_COUNT
int "page cache max total pages."
default 4096
config RT_PAGECACHE_ASPACE_COUNT
int "aspace max active pages."
default 1024
config RT_PAGECACHE_PRELOAD
int "max pre load pages."
default 4
config RT_PAGECACHE_HASH_NR
int "page cache hash size."
default 1024
config RT_PAGECACHE_GC_WORK_LEVEL
int "page cache gc work trigger min percentage, default 90%."
default 90
config RT_PAGECACHE_GC_STOP_LEVEL
int "page cache gc to min percentage, default 70%."
default 70
endmenu
endif
endif
endif
endmenu

View File

@@ -286,11 +286,10 @@ int dfs_device_fs_unlink(struct dfs_filesystem *fs, const char *path)
int dfs_device_fs_stat(struct dfs_filesystem *fs, const char *path, struct stat *st)
{
st->st_dev = (dev_t)((size_t)dfs_filesystem_lookup(fs->path));
/* stat root directory */
if ((path[0] == '/') && (path[1] == '\0'))
{
st->st_dev = 0;
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
st->st_mode &= ~S_IFREG;
@@ -308,8 +307,6 @@ int dfs_device_fs_stat(struct dfs_filesystem *fs, const char *path, struct stat
dev_id = rt_device_find(&path[1]);
if (dev_id != RT_NULL)
{
st->st_dev = 0;
st->st_mode = S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;

View File

@@ -198,9 +198,11 @@ int dfs_tmpfs_ioctl(struct dfs_file *file, int cmd, void *args)
{
return -RT_ENOMEM;
}
else if (mmap2->lwp == RT_NULL)
return -RT_EINVAL;
LOG_D("tmpfile mmap ptr:%x , size:%d\n", d_file->data, mmap2->length);
mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, d_file->data, mmap2->length, 0);
mmap2->ret = lwp_map_user_phy(mmap2->lwp, mmap2->addr, d_file->data, mmap2->length, 0);
}
return RT_EOK;
break;

View File

@@ -64,6 +64,7 @@ struct dfs_file
void *data; /* Specific fd data */
};
#ifdef RT_USING_SMART
struct dfs_mmap2_args
{
void *addr;
@@ -72,8 +73,10 @@ struct dfs_mmap2_args
int flags;
off_t pgoffset;
struct rt_lwp *lwp;
void *ret;
};
#endif
void dfs_vnode_mgr_init(void);
int dfs_vnode_init(struct dfs_vnode *vnode, int type, const struct dfs_file_ops *fops);
@@ -92,8 +95,9 @@ off_t dfs_file_lseek(struct dfs_file *fd, off_t offset);
int dfs_file_stat(const char *path, struct stat *buf);
int dfs_file_rename(const char *oldpath, const char *newpath);
int dfs_file_ftruncate(struct dfs_file *fd, off_t length);
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2);
#endif
/* 0x5254 is just a magic number to make these relatively unique ("RT") */
#define RT_FIOFTRUNCATE 0x52540000U
#define RT_FIOGETADDR 0x52540001U

View File

@@ -713,6 +713,7 @@ int dfs_file_ftruncate(struct dfs_file *fd, off_t length)
return result;
}
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2)
{
int ret = 0;
@@ -736,6 +737,7 @@ int dfs_file_mmap2(struct dfs_file *fd, struct dfs_mmap2_args *mmap2)
return ret;
}
#endif
#ifdef RT_USING_FINSH
#include <finsh.h>

View File

@@ -2,20 +2,12 @@ from building import *
import os
# The set of source files associated with this SConscript file.
src = Split('''
src/dfs.c
src/dfs_file.c
src/dfs_fs.c
src/dfs_dentry.c
src/dfs_vnode.c
src/dfs_mnt.c
src/dfs_posix.c
''')
src = Glob('src/*.c') + Glob('src/*.cpp')
cwd = GetCurrentDir()
CPPPATH = [cwd + "/include"]
if GetDepend('RT_USING_POSIX'):
src += ['src/poll.c', 'src/select.c']
if not GetDepend('RT_USING_SMART'):
SrcRemove(src, ['src/dfs_file_mmap.c'])
group = DefineGroup('Filesystem', src, depend = ['RT_USING_DFS', 'RT_USING_DFS_V2'], CPPPATH = CPPPATH)

View File

@@ -32,6 +32,24 @@
#include <dfs_file.h>
#include <dfs_mnt.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
static int dfs_elm_free_vnode(struct dfs_vnode *vnode);
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_elm_page_read(struct dfs_file *file, struct dfs_page *page);
static ssize_t dfs_elm_page_write(struct dfs_page *page);
static struct dfs_aspace_ops dfs_elm_aspace_ops =
{
.read = dfs_elm_page_read,
.write = dfs_elm_page_write,
};
#endif
#undef SS
#if FF_MAX_SS == FF_MIN_SS
#define SS(fs) ((UINT)FF_MAX_SS) /* Fixed sector size */
@@ -361,7 +379,7 @@ int dfs_elm_open(struct dfs_file *file)
extern int elm_get_vol(FATFS * fat);
RT_ASSERT(file->vnode->ref_count > 0);
if (file->vnode->ref_count > 1)
if (file->vnode->data)
{
if (file->vnode->type == FT_DIRECTORY
&& !(file->flags & O_DIRECTORY))
@@ -425,6 +443,7 @@ int dfs_elm_open(struct dfs_file *file)
}
file->vnode->data = dir;
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
return RT_EOK;
}
else
@@ -465,6 +484,7 @@ int dfs_elm_open(struct dfs_file *file)
file->vnode->size = f_size(fd);
file->vnode->type = FT_REGULAR;
file->vnode->data = fd;
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
if (file->flags & O_APPEND)
{
@@ -516,6 +536,9 @@ int dfs_elm_close(struct dfs_file *file)
rt_free(fd);
}
file->vnode->data = RT_NULL;
rt_mutex_detach(&file->vnode->lock);
return elm_result_to_dfs(result);
}
@@ -558,7 +581,7 @@ int dfs_elm_ioctl(struct dfs_file *file, int cmd, void *args)
ssize_t dfs_elm_read(struct dfs_file *file, void *buf, size_t len, off_t *pos)
{
FIL *fd;
FRESULT result;
FRESULT result = FR_OK;
UINT byte_read;
if (file->vnode->type == FT_DIRECTORY)
@@ -566,14 +589,19 @@ ssize_t dfs_elm_read(struct dfs_file *file, void *buf, size_t len, off_t *pos)
return -EISDIR;
}
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
result = f_read(fd, buf, len, &byte_read);
/* update position */
*pos = fd->fptr;
if (result == FR_OK)
return byte_read;
if (file->vnode->size > *pos)
{
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, *pos);
result = f_read(fd, buf, len, &byte_read);
/* update position */
*pos = fd->fptr;
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
return byte_read;
}
return elm_result_to_dfs(result);
}
@@ -591,11 +619,13 @@ ssize_t dfs_elm_write(struct dfs_file *file, const void *buf, size_t len, off_t
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, *pos);
result = f_write(fd, buf, len, &byte_write);
/* update position and file size */
*pos = fd->fptr;
file->vnode->size = f_size(fd);
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
return byte_write;
@@ -642,8 +672,9 @@ off_t dfs_elm_lseek(struct dfs_file *file, off_t offset, int wherece)
/* regular file type */
fd = (FIL *)(file->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
result = f_lseek(fd, offset);
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
{
/* return current position */
@@ -657,8 +688,9 @@ off_t dfs_elm_lseek(struct dfs_file *file, off_t offset, int wherece)
dir = (DIR *)(file->vnode->data);
RT_ASSERT(dir != RT_NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
result = f_seekdir(dir, offset / sizeof(struct dirent));
rt_mutex_release(&file->vnode->lock);
if (result == FR_OK)
{
/* update file position */
@@ -827,21 +859,35 @@ int dfs_elm_stat(struct dfs_dentry *dentry, struct stat *st)
st->st_dev = (dev_t)(size_t)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
if (file_info.fattrib & AM_DIR)
{
st->st_mode &= ~S_IFREG;
st->st_mode |= S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
st->st_mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
else
{
st->st_mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
}
if (file_info.fattrib & AM_RDO)
st->st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
st->st_size = file_info.fsize;
if (S_IFDIR & st->st_mode)
{
st->st_size = file_info.fsize;
}
else
{
#ifdef RT_USING_PAGECACHE
st->st_size = (dentry->vnode && dentry->vnode->aspace) ? dentry->vnode->size : file_info.fsize;
#else
st->st_size = file_info.fsize;
#endif
}
st->st_blksize = fat->csize * SS(fat);
if (file_info.fattrib & AM_ARC)
{
st->st_blocks = file_info.fsize ? ((file_info.fsize - 1) / SS(fat) / fat->csize + 1) : 0;
st->st_blocks = st->st_size ? ((st->st_size - 1) / SS(fat) / fat->csize + 1) : 0;
st->st_blocks *= (st->st_blksize / 512); // man say st_blocks is number of 512B blocks allocated
}
else
@@ -901,20 +947,23 @@ static struct dfs_vnode *dfs_elm_lookup(struct dfs_dentry *dentry)
vnode = dfs_vnode_create();
if (vnode)
{
vnode->mnt = dentry->mnt;
vnode->size = st.st_size;
vnode->data = NULL;
if (S_ISDIR(st.st_mode))
{
vnode->mode = S_IFDIR | (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | S_IXUSR | (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_elm_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
vnode->data = NULL;
vnode->size = 0;
}
return vnode;
@@ -934,13 +983,18 @@ static struct dfs_vnode *dfs_elm_create_vnode(struct dfs_dentry *dentry, int typ
{
if (type == FT_DIRECTORY)
{
vnode->mode = S_IFDIR | mode;
/* fat directory force mode 0555 */
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | mode;
/* fat REGULAR file mode force mode 0777 */
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_elm_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
@@ -962,6 +1016,46 @@ static int dfs_elm_free_vnode(struct dfs_vnode *vnode)
return 0;
}
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_elm_page_read(struct dfs_file *file, struct dfs_page *page)
{
int ret = -EINVAL;
if (page->page)
{
off_t fpos = page->fpos;
ret = dfs_elm_read(file, page->page, page->size, &fpos);
}
return ret;
}
ssize_t dfs_elm_page_write(struct dfs_page *page)
{
FIL *fd;
FRESULT result;
UINT byte_write;
if (page->aspace->vnode->type == FT_DIRECTORY)
{
return -EISDIR;
}
fd = (FIL *)(page->aspace->vnode->data);
RT_ASSERT(fd != RT_NULL);
rt_mutex_take(&page->aspace->vnode->lock, RT_WAITING_FOREVER);
f_lseek(fd, page->fpos);
result = f_write(fd, page->page, page->len, &byte_write);
rt_mutex_release(&page->aspace->vnode->lock);
if (result == FR_OK)
{
return byte_write;
}
return elm_result_to_dfs(result);
}
#endif
static const struct dfs_file_ops dfs_elm_fops =
{
.open = dfs_elm_open,

View File

@@ -149,7 +149,7 @@ static struct dfs_vnode *dfs_mqueue_create_vnode(struct dfs_dentry *dentry, int
dfs_mqueue_insert_after(&(mq_file->list));
}
vnode->mode = S_IFREG | mode;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
rt_mq_t mq = rt_mq_create(dentry->pathname + 1, mq_file->msg_size, mq_file->max_msgs,
RT_IPC_FLAG_FIFO);
@@ -191,7 +191,7 @@ struct dfs_vnode *_dfs_mqueue_lookup(struct dfs_dentry *dentry) {
vnode = dfs_vnode_create();
if (mq_file && mq_file->data) {
vnode->mode = S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
vnode->mnt = dentry->mnt;
vnode->data = mq_file;
@@ -202,7 +202,7 @@ struct dfs_vnode *_dfs_mqueue_lookup(struct dfs_dentry *dentry) {
vnode->fops = &_mqueue_fops;
vnode->mnt = dentry->mnt;
vnode->type = FT_DIRECTORY;
vnode->mode = S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
return vnode;
}

View File

@@ -191,12 +191,12 @@ static struct dfs_vnode *dfs_romfs_lookup (struct dfs_dentry *dentry)
vnode->size = dirent->size;
if (dirent->type == ROMFS_DIRENT_DIR)
{
vnode->mode = romfs_modemap[ROMFS_DIRENT_DIR] | S_IRUSR;
vnode->mode = romfs_modemap[ROMFS_DIRENT_DIR] | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else if (dirent->type == ROMFS_DIRENT_FILE)
{
vnode->mode = romfs_modemap[ROMFS_DIRENT_FILE] | S_IRUSR | S_IXUSR;
vnode->mode = romfs_modemap[ROMFS_DIRENT_FILE] | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_REGULAR;
}

View File

@@ -28,6 +28,20 @@
#define DBG_TAG "tmpfs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_tmp_page_read(struct dfs_file *file, struct dfs_page *page);
static ssize_t dfs_tmp_page_write(struct dfs_page *page);
static struct dfs_aspace_ops dfs_tmp_aspace_ops =
{
.read = dfs_tmp_page_read,
.write = dfs_tmp_page_write,
};
#endif
static int _path_separate(const char *path, char *parent_path, char *file_name)
{
@@ -285,6 +299,8 @@ static ssize_t dfs_tmpfs_read(struct dfs_file *file, void *buf, size_t count, of
d_file = (struct tmpfs_file *)file->vnode->data;
RT_ASSERT(d_file != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
if (count < file->vnode->size - *pos)
length = count;
else
@@ -296,6 +312,8 @@ static ssize_t dfs_tmpfs_read(struct dfs_file *file, void *buf, size_t count, of
/* update file current position */
*pos += length;
rt_mutex_release(&file->vnode->lock);
return length;
}
@@ -310,12 +328,15 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
superblock = d_file->sb;
RT_ASSERT(superblock != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
if (count + *pos > file->vnode->size)
{
rt_uint8_t *ptr;
ptr = rt_realloc(d_file->data, *pos + count);
if (ptr == NULL)
{
rt_mutex_release(&file->vnode->lock);
rt_set_errno(-ENOMEM);
return 0;
}
@@ -335,6 +356,7 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
/* update file current position */
*pos += count;
rt_mutex_release(&file->vnode->lock);
return count;
}
@@ -371,6 +393,7 @@ static int dfs_tmpfs_close(struct dfs_file *file)
struct tmpfs_file *d_file;
RT_ASSERT(file->vnode->ref_count > 0);
if (file->vnode->ref_count != 1)
return 0;
@@ -390,6 +413,8 @@ static int dfs_tmpfs_close(struct dfs_file *file)
rt_free(d_file);
}
rt_mutex_detach(&file->vnode->lock);
return RT_EOK;
}
@@ -424,6 +449,12 @@ static int dfs_tmpfs_open(struct dfs_file *file)
file->fpos = 0;
}
RT_ASSERT(file->vnode->ref_count > 0);
if(file->vnode->ref_count == 1)
{
rt_mutex_init(&file->vnode->lock, file->dentry->pathname, RT_IPC_FLAG_PRIO);
}
return 0;
}
@@ -442,12 +473,13 @@ static int dfs_tmpfs_stat(struct dfs_dentry *dentry, struct stat *st)
st->st_dev = (dev_t)(size_t)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH |
S_IWUSR | S_IWGRP | S_IWOTH;
if (d_file->type == TMPFS_TYPE_DIR)
{
st->st_mode &= ~S_IFREG;
st->st_mode |= S_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH;
st->st_mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
}
else
{
st->st_mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
}
st->st_size = d_file->size;
@@ -468,13 +500,18 @@ static int dfs_tmpfs_getdents(struct dfs_file *file,
d_file = (struct tmpfs_file *)file->vnode->data;
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
superblock = d_file->sb;
RT_ASSERT(superblock != RT_NULL);
/* make integer count */
count = (count / sizeof(struct dirent));
if (count == 0)
{
rt_mutex_release(&file->vnode->lock);
return -EINVAL;
}
end = file->fpos + count;
index = 0;
@@ -507,6 +544,7 @@ static int dfs_tmpfs_getdents(struct dfs_file *file,
break;
}
}
rt_mutex_release(&file->vnode->lock);
return count * sizeof(struct dirent);
}
@@ -607,13 +645,16 @@ static struct dfs_vnode *_dfs_tmpfs_lookup(struct dfs_dentry *dentry)
{
if (d_file->type == TMPFS_TYPE_DIR)
{
vnode->mode = S_IFDIR | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
vnode->mode = S_IFREG | S_IRUSR | S_IWUSR | S_IXUSR;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_tmp_aspace_ops);
#endif
}
vnode->mnt = dentry->mnt;
@@ -681,14 +722,17 @@ static struct dfs_vnode *dfs_tmpfs_create_vnode(struct dfs_dentry *dentry, int t
if (type == FT_DIRECTORY)
{
d_file->type = TMPFS_TYPE_DIR;
vnode->mode = S_IFDIR | mode;
vnode->mode = S_IFDIR | (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
vnode->type = FT_DIRECTORY;
}
else
{
d_file->type = TMPFS_TYPE_FILE;
vnode->mode = S_IFREG | mode;
vnode->mode = S_IFREG | (S_IRWXU | S_IRWXG | S_IRWXO);
vnode->type = FT_REGULAR;
#ifdef RT_USING_PAGECACHE
vnode->aspace = dfs_aspace_create(dentry, vnode, &dfs_tmp_aspace_ops);
#endif
}
rt_spin_lock(&superblock->lock);
rt_list_insert_after(&(p_file->subdirs), &(d_file->sibling));
@@ -713,6 +757,41 @@ static int dfs_tmpfs_free_vnode(struct dfs_vnode *vnode)
return 0;
}
#ifdef RT_USING_PAGECACHE
static ssize_t dfs_tmp_page_read(struct dfs_file *file, struct dfs_page *page)
{
int ret = -EINVAL;
if (page->page)
{
off_t fpos = page->fpos;
ret = dfs_tmpfs_read(file, page->page, page->size, &fpos);
}
return ret;
}
ssize_t dfs_tmp_page_write(struct dfs_page *page)
{
struct tmpfs_file *d_file;
if (page->aspace->vnode->type == FT_DIRECTORY)
{
return -EISDIR;
}
d_file = (struct tmpfs_file *)(page->aspace->vnode->data);
RT_ASSERT(d_file != RT_NULL);
rt_mutex_take(&page->aspace->vnode->lock, RT_WAITING_FOREVER);
if (page->len > 0)
memcpy(d_file->data + page->fpos, page->page, page->len);
rt_mutex_release(&page->aspace->vnode->lock);
return F_OK;
}
#endif
static int dfs_tmpfs_truncate(struct dfs_file *file, off_t offset)
{
struct tmpfs_file *d_file = RT_NULL;

View File

@@ -33,6 +33,7 @@ struct rt_pollreq;
struct dirent;
struct lwp_avl_struct;
struct file_lock;
struct dfs_aspace;
struct dfs_file_ops
{
@@ -73,6 +74,9 @@ struct dfs_vnode
struct timespec mtime;
struct timespec ctime;
struct dfs_aspace *aspace;
struct rt_mutex lock;
void *data; /* private data of this file system */
};
@@ -93,6 +97,8 @@ struct dfs_file
struct dfs_dentry *dentry; /* dentry of this file */
struct dfs_vnode *vnode; /* vnode of this file */
void *mmap_context; /* used by mmap routine */
void *data;
};
@@ -122,7 +128,7 @@ struct dfs_vnode *dfs_vnode_ref(struct dfs_vnode *vnode);
void dfs_vnode_unref(struct dfs_vnode *vnode);
/*dfs_file.c*/
#ifdef RT_USING_SMART
struct dfs_mmap2_args
{
void *addr;
@@ -131,8 +137,10 @@ struct dfs_mmap2_args
int flags;
off_t pgoffset;
struct rt_lwp *lwp;
void *ret;
};
#endif
void dfs_file_init(struct dfs_file *file);
void dfs_file_deinit(struct dfs_file *file);
@@ -166,8 +174,13 @@ int dfs_file_isdir(const char *path);
int dfs_file_access(const char *path, mode_t mode);
int dfs_file_chdir(const char *path);
char *dfs_file_getcwd(char *buf, size_t size);
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
#endif
/* 0x5254 is just a magic number to make these relatively unique ("RT") */
#define RT_FIOFTRUNCATE 0x52540000U
#define RT_FIOGETADDR 0x52540001U

View File

@@ -53,6 +53,7 @@ int dfs_mnt_destroy(struct dfs_mnt* mnt);
int dfs_mnt_list(struct dfs_mnt* mnt);
int dfs_mnt_insert(struct dfs_mnt* mnt, struct dfs_mnt* child);
struct dfs_mnt *dfs_mnt_dev_lookup(rt_device_t dev_id);
struct dfs_mnt *dfs_mnt_lookup(const char *path);
const char *dfs_mnt_get_mounted_path(struct rt_device *device);

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-05-05 RTT Implement dentry in dfs v2.0
*/
#ifndef DFS_PAGE_CACHE_H__
#define DFS_PAGE_CACHE_H__
#include <rtthread.h>
#ifdef RT_USING_PAGECACHE
#include <dfs_file.h>
#include <avl.h>
#ifdef __cplusplus
extern "C"
{
#endif
struct dfs_vnode;
struct dfs_dentry;
struct dfs_aspace;
struct dfs_mmap
{
rt_list_t mmap_node;
struct rt_varea *varea;
};
struct dfs_page
{
rt_list_t space_node;
rt_list_t dirty_node;
struct util_avl_struct avl_node;
rt_list_t mmap_head;
rt_atomic_t ref_count;
void *page;
off_t fpos;
size_t size;
size_t len;
int is_dirty;
rt_tick_t tick_ms;
struct dfs_aspace *aspace;
};
struct dfs_aspace_ops
{
ssize_t (*read)(struct dfs_file *file, struct dfs_page *page);
ssize_t (*write)(struct dfs_page *page);
};
struct dfs_aspace
{
rt_list_t hash_node, cache_node;
char *fullpath, *pathname;
struct dfs_mnt *mnt;
rt_list_t list_active, list_inactive;
rt_list_t list_dirty;
size_t pages_count;
struct util_avl_root avl_root;
struct dfs_page *avl_page;
rt_bool_t is_active;
struct rt_mutex lock;
rt_atomic_t ref_count;
struct dfs_vnode *vnode;
const struct dfs_aspace_ops *ops;
};
#ifndef RT_PAGECACHE_HASH_NR
#define RT_PAGECACHE_HASH_NR 1024
#endif
struct dfs_pcache
{
rt_list_t head[RT_PAGECACHE_HASH_NR];
rt_list_t list_active, list_inactive;
rt_atomic_t pages_count;
struct rt_mutex lock;
struct rt_messagequeue *mqueue;
rt_tick_t last_time_wb;
};
struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry, struct dfs_vnode *vnode, const struct dfs_aspace_ops *ops);
int dfs_aspace_destroy(struct dfs_aspace *aspace);
int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos);
int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos);
int dfs_aspace_flush(struct dfs_aspace *aspace);
int dfs_aspace_clean(struct dfs_aspace *aspace);
void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea);
int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr);
off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr);
void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos);
int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data);
int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data);
void dfs_pcache_release(size_t count);
void dfs_pcache_unmount(struct dfs_mnt *mnt);
#ifdef __cplusplus
}
#endif
#endif
#endif

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __DFS_SEQ_FILE_H__
#define __DFS_SEQ_FILE_H__
#include <dfs.h>
#include <dfs_fs.h>
struct dfs_seq_ops;
struct dfs_seq_file
{
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
off_t index;
off_t read_pos;
struct rt_mutex lock;
const struct dfs_seq_ops *ops;
const struct dfs_file *file;
void *data;
};
struct dfs_seq_ops
{
void *(*start)(struct dfs_seq_file *seq, off_t *index);
void (*stop)(struct dfs_seq_file *seq, void *data);
void *(*next)(struct dfs_seq_file *seq, void *data, off_t *index);
int (*show)(struct dfs_seq_file *seq, void *data);
};
/**
* check if the buffer is full
*/
static inline rt_bool_t dfs_seq_is_full(struct dfs_seq_file *seq)
{
return seq->count == seq->size;
}
/**
* set padding width size
*/
static inline void dfs_seq_setwidth(struct dfs_seq_file *seq, size_t size)
{
seq->pad_until = seq->count + size;
}
int dfs_seq_open(struct dfs_file *file, const struct dfs_seq_ops *ops);
ssize_t dfs_seq_read(struct dfs_file *file, void *buf, size_t size, off_t *pos);
ssize_t dfs_seq_lseek(struct dfs_file *file, off_t offset, int whence);
int dfs_seq_release(struct dfs_file *file);
int dfs_seq_write(struct dfs_seq_file *seq, const void *data, size_t len);
void dfs_seq_vprintf(struct dfs_seq_file *seq, const char *fmt, va_list args);
void dfs_seq_printf(struct dfs_seq_file *seq, const char *fmt, ...);
void dfs_seq_putc(struct dfs_seq_file *seq, char c);
void dfs_seq_puts(struct dfs_seq_file *seq, const char *s);
void dfs_seq_pad(struct dfs_seq_file *seq, char c);
#endif

View File

@@ -197,7 +197,7 @@ struct dfs_dentry *dfs_dentry_lookup(struct dfs_mnt *mnt, const char *path, uint
path = "/";
}
}
dfs_file_lock();
dentry = _dentry_hash_lookup(mnt, path);
if (!dentry)
{
@@ -252,7 +252,7 @@ struct dfs_dentry *dfs_dentry_lookup(struct dfs_mnt *mnt, const char *path, uint
{
DLOG(note, "dentry", "found dentry");
}
dfs_file_unlock();
return dentry;
}

View File

@@ -19,6 +19,10 @@
#include "dfs_mnt.h"
#include "dfs_private.h"
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.file"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
@@ -114,7 +118,7 @@ static void dfs_file_unref(struct dfs_file *file)
{
if (file->vnode->ref_count > 1)
{
file->vnode->ref_count--;
rt_atomic_sub(&(file->vnode->ref_count), 1);
}
else if (file->vnode->ref_count == 1)
{
@@ -448,6 +452,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
struct dfs_vnode *vnode = RT_NULL;
DLOG(msg, "dfs_file", "dentry", DLOG_MSG, "dfs_dentry_create(%s)", fullpath);
dfs_file_lock();
dentry = dfs_dentry_create(mnt, fullpath);
if (dentry)
{
@@ -472,6 +477,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
dentry = RT_NULL;
}
}
dfs_file_unlock();
}
}
}
@@ -518,7 +524,9 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
dfs_file_lock();
ret = file->fops->open(file);
dfs_file_unlock();
}
else
{
@@ -572,6 +580,12 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_clean(file->vnode->aspace);
}
#endif
ret = file->fops->truncate(file, 0);
}
else
@@ -612,7 +626,12 @@ int dfs_file_close(struct dfs_file *file)
if (ref_count == 1 && file->fops && file->fops->close)
{
DLOG(msg, "dfs_file", file->dentry->mnt->fs_ops->name, DLOG_MSG, "fops->close(file)");
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->close(file);
if (ret == 0) /* close file sucessfully */
@@ -665,7 +684,16 @@ ssize_t dfs_file_read(struct dfs_file *file, void *buf, size_t len)
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
ret = file->fops->read(file, buf, len, &pos);
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
ret = dfs_aspace_read(file, buf, len, &pos);
}
else
#endif
{
ret = file->fops->read(file, buf, len, &pos);
}
}
else
{
@@ -710,7 +738,16 @@ ssize_t dfs_file_write(struct dfs_file *file, const void *buf, size_t len)
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
ret = file->fops->write(file, buf, len, &pos);
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
ret = dfs_aspace_write(file, buf, len, &pos);
}
else
#endif
{
ret = file->fops->write(file, buf, len, &pos);
}
}
else
{
@@ -1047,6 +1084,12 @@ int dfs_file_fsync(struct dfs_file *file)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->flush(file);
}
else
@@ -1089,6 +1132,14 @@ int dfs_file_unlink(const char *path)
rt_bool_t has_child = RT_FALSE;
has_child = dfs_mnt_has_child_mnt(mnt, fullpath);
#ifdef RT_USING_PAGECACHE
if (dentry->vnode->aspace)
{
dfs_aspace_clean(dentry->vnode->aspace);
}
#endif
dfs_file_lock();
if (has_child == RT_FALSE)
{
/* no child mnt point, unlink it */
@@ -1106,6 +1157,7 @@ int dfs_file_unlink(const char *path)
{
ret = -EBUSY;
}
dfs_file_unlock();
/* release this dentry */
dfs_dentry_unref(dentry);
@@ -1467,6 +1519,12 @@ int dfs_file_rename(const char *old_file, const char *new_file)
{
if (dfs_is_mounted(mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (old_dentry->vnode->aspace)
{
dfs_aspace_clean(old_dentry->vnode->aspace);
}
#endif
ret = mnt->fs_ops->rename(old_dentry, new_dentry);
}
}
@@ -1499,6 +1557,12 @@ int dfs_file_ftruncate(struct dfs_file *file, off_t length)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_clean(file->vnode->aspace);
}
#endif
ret = file->fops->truncate(file, length);
}
else
@@ -1529,6 +1593,12 @@ int dfs_file_flush(struct dfs_file *file)
{
if (dfs_is_mounted(file->vnode->mnt) == 0)
{
#ifdef RT_USING_PAGECACHE
if (file->vnode->aspace)
{
dfs_aspace_flush(file->vnode->aspace);
}
#endif
ret = file->fops->flush(file);
}
else
@@ -1669,13 +1739,23 @@ int dfs_file_access(const char *path, mode_t mode)
return ret;
}
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
int ret = 0;
int ret = RT_EOK;
if (file && mmap2)
{
if (file->vnode->type != FT_DEVICE || !file->vnode->fops->ioctl)
if (file->vnode->type == FT_REGULAR)
{
ret = dfs_file_mmap(file, mmap2);
if (ret != 0)
{
ret = ret > 0 ? ret : -ret;
rt_set_errno(ret);
}
}
else if (file->vnode->type != FT_DEVICE || !file->vnode->fops->ioctl)
{
rt_set_errno(EINVAL);
}
@@ -1700,6 +1780,7 @@ int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
return ret;
}
#endif
#ifdef RT_USING_FINSH

View File

@@ -0,0 +1,443 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include "dfs_file.h"
#include "dfs_dentry.h"
#include "dfs_mnt.h"
#define DBG_TAG "dfs.mmap"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#if defined(RT_USING_SMART) && defined(ARCH_MM_MMU) && defined(RT_USING_PAGECACHE)
#include "dfs_pcache.h"
#include <lwp.h>
#include <sys/mman.h>
#include <lwp_user_mm.h>
#include <mm_aspace.h>
#include <mm_fault.h>
#include <mm_flag.h>
#include <mm_page.h>
#include <mmu.h>
#include <page.h>
#include <tlb.h>
static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file);
static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj);
static rt_varea_t _dfs_map_user_varea_data(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr, mm_flag_t flags, off_t pgoffset, void *data)
{
int ret = 0;
rt_varea_t varea;
void *vaddr = map_vaddr;
rt_mem_obj_t mem_obj = dfs_get_mem_obj(data);
ret = rt_aspace_map(lwp->aspace, &vaddr, map_size,
attr, flags, mem_obj, pgoffset);
if (ret != RT_EOK)
{
varea = RT_NULL;
}
else
{
varea = rt_aspace_query(lwp->aspace, vaddr);
}
if (ret != RT_EOK)
{
LOG_E("failed to map %lx with size %lx with errno %d", map_vaddr,
map_size, ret);
}
return varea;
}
static rt_varea_t dfs_map_user_varea_data(struct dfs_mmap2_args *mmap2, void *data)
{
rt_varea_t varea = RT_NULL;
size_t offset = 0;
void *map_vaddr = mmap2->addr;
size_t map_size = mmap2->length;
struct rt_lwp *lwp = mmap2->lwp;
rt_size_t k_attr;
rt_size_t k_flags;
if (map_size)
{
offset = (size_t)map_vaddr & ARCH_PAGE_MASK;
map_size += (offset + ARCH_PAGE_SIZE - 1);
map_size &= ~ARCH_PAGE_MASK;
map_vaddr = (void *)((size_t)map_vaddr & ~ARCH_PAGE_MASK);
k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);
varea = _dfs_map_user_varea_data(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data);
}
return varea;
}
static void hint_free(rt_mm_va_hint_t hint)
{
}
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
void *page;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("fault vaddr: %p", msg->fault_vaddr);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
page = dfs_aspace_mmap(file, varea, msg->fault_vaddr);
if (page)
{
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
msg->response.size = ARCH_PAGE_SIZE;
msg->response.vaddr = page;
}
else
{
LOG_E("%s varea %p mmap failed at vaddr %p", __func__, varea, msg->fault_vaddr);
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
/* do pre open bushiness like inc a ref */
static void on_varea_open(struct rt_varea *varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
varea->data = RT_NULL;
rt_atomic_add(&(file->ref_count), 1);
}
/* do post close bushiness like def a ref */
static void on_varea_close(struct rt_varea *varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
dfs_aspace_unmap(file, varea);
dfs_file_lock();
if (rt_atomic_load(&(file->ref_count)) == 1)
{
dfs_file_close(file);
}
else
{
rt_atomic_sub(&(file->ref_count), 1);
}
dfs_file_unlock();
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
static const char *get_name(rt_varea_t varea)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
return (file && file->dentry) ? file->dentry->pathname : "file-mapper";
}
void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
rt_ubase_t ret;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
ret = dfs_aspace_mmap_read(file, varea, msg);
if (ret > 0)
{
msg->response.status = MM_FAULT_STATUS_OK;
if (ret < ARCH_PAGE_SIZE)
{
memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
}
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
{
rt_ubase_t ret;
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
ret = dfs_aspace_mmap_write(file, varea, msg);
if (ret > 0)
{
msg->response.status = MM_FAULT_STATUS_OK;
if (ret < ARCH_PAGE_SIZE)
{
memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
}
}
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
}
static rt_err_t unmap_pages(rt_varea_t varea, void *rm_start, void *rm_end)
{
struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
if (file)
{
LOG_I("%s varea: %p start: %p end: %p", __func__, varea, rm_start, rm_end);
RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
while (rm_start != rm_end)
{
dfs_aspace_page_unmap(file, varea, rm_start);
rm_start += ARCH_PAGE_SIZE;
}
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
}
return -RT_ERROR;
}
rt_err_t on_varea_shrink(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
char *varea_start = varea->start;
void *rm_start;
void *rm_end;
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
if (varea_start == (char *)new_vaddr)
{
rm_start = varea_start + size;
rm_end = varea_start + varea->size;
}
else if (varea_start < (char *)new_vaddr)
{
rm_start = varea_start;
rm_end = new_vaddr;
}
return unmap_pages(varea, rm_start, rm_end);
}
rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
{
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
return RT_EOK;
}
rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
{
struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, existed);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
existed->start, existed->size, existed->offset, existed->attr, existed->flag);
LOG_I("unmap_start: %p unmap_len: %p", unmap_start, unmap_len);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
subset->data = existed->data;
rt_atomic_add(&(file->ref_count), 1);
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, existed, existed->start);
}
return -RT_ERROR;
}
rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
{
struct dfs_file *file = dfs_mem_obj_get_file(merge_from->mem_obj);
if (file)
{
LOG_I("%s varea: %p", __func__, merge_from);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
merge_from->start, merge_from->size, merge_from->offset, merge_from->attr, merge_from->flag);
if (file->dentry)
{
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
dfs_aspace_unmap(file, merge_from);
merge_from->data = RT_NULL;
rt_atomic_sub(&(file->ref_count), 1);
return RT_EOK;
}
else
{
LOG_E("%s varea %p not a file, vaddr %p", __func__, merge_from, merge_from->start);
}
return -RT_ERROR;
}
static struct rt_mem_obj _mem_obj =
{
.hint_free = hint_free,
.on_page_fault = on_page_fault,
.on_varea_open = on_varea_open,
.on_varea_close = on_varea_close,
.get_name = get_name,
.page_read = page_read,
.page_write = page_write,
.on_varea_shrink = on_varea_shrink,
.on_varea_expand = on_varea_expand,
.on_varea_split = on_varea_split,
.on_varea_merge = on_varea_merge,
};
struct dfs_mem_obj {
struct rt_mem_obj mem_obj;
void *file;
};
static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file)
{
rt_mem_obj_t mobj = file->mmap_context;
if (!mobj)
{
struct dfs_mem_obj *dfs_mobj;
dfs_file_lock();
dfs_mobj = rt_malloc(sizeof(*dfs_mobj));
if (dfs_mobj)
{
dfs_mobj->file = file;
mobj = &dfs_mobj->mem_obj;
memcpy(mobj, &_mem_obj, sizeof(*mobj));
file->mmap_context = mobj;
}
dfs_file_unlock();
}
return mobj;
}
static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)
{
struct dfs_mem_obj *dfs_mobj;
dfs_mobj = rt_container_of(mem_obj, struct dfs_mem_obj, mem_obj);
return dfs_mobj->file;
}
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
int ret = -EINVAL;
LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
if (file && file->vnode && file->vnode->aspace)
{
/* create a va area in user space (lwp) */
rt_varea_t varea = dfs_map_user_varea_data(mmap2, file);
if (varea)
{
mmap2->ret = varea->start;
LOG_I("%s varea: %p", __func__, varea);
LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
varea->start, varea->size, varea->offset, varea->attr, varea->flag);
LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
ret = RT_EOK;
}
else
{
ret = -ENOMEM;
}
}
else if (file->vnode->aspace == RT_NULL)
{
LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
}
return ret;
}
#else
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
{
LOG_E("File mapping support is not enabled, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
LOG_E("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
return -EPERM;
}
#endif

View File

@@ -19,6 +19,10 @@
#include <dfs_mnt.h>
#include "dfs_private.h"
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.fs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
@@ -314,6 +318,9 @@ int dfs_umount(const char *specialfile, int flags)
if (!(mnt->flags & MNT_IS_LOCKED) && rt_list_isempty(&mnt->child) && (ref_count == 1 || (flags & MNT_FORCE)))
{
#ifdef RT_USING_PAGECACHE
dfs_pcache_unmount(mnt);
#endif
/* destroy this mount point */
DLOG(msg, "dfs", "mnt", DLOG_MSG, "dfs_mnt_destroy(mnt)");
ret = dfs_mnt_destroy(mnt);
@@ -396,6 +403,18 @@ int dfs_mkfs(const char *fs_name, const char *device_name)
if (type->fs_ops->mkfs)
{
ret = type->fs_ops->mkfs(dev_id, type->fs_ops->name);
#ifdef RT_USING_PAGECACHE
if (ret == RT_EOK)
{
struct dfs_mnt *mnt = RT_NULL;
mnt = dfs_mnt_dev_lookup(dev_id);
if (mnt)
{
dfs_pcache_unmount(mnt);
}
}
#endif
}
return ret;

View File

@@ -131,6 +131,53 @@ int dfs_mnt_remove(struct dfs_mnt* mnt)
return ret;
}
static struct dfs_mnt *_dfs_mnt_dev_lookup(struct dfs_mnt *mnt, rt_device_t dev_id)
{
struct dfs_mnt *ret = RT_NULL, *iter = RT_NULL;
rt_list_for_each_entry(iter, &mnt->child, sibling)
{
if (iter->dev_id == dev_id)
{
ret = iter;
break;
}
else
{
ret = _dfs_mnt_dev_lookup(iter, dev_id);
if (ret)
{
break;
}
}
}
return ret;
}
struct dfs_mnt *dfs_mnt_dev_lookup(rt_device_t dev_id)
{
struct dfs_mnt *mnt = _root_mnt;
struct dfs_mnt *ret = RT_NULL;
if (mnt)
{
dfs_lock();
if (mnt->dev_id == dev_id)
{
dfs_unlock();
return mnt;
}
ret = _dfs_mnt_dev_lookup(mnt, dev_id);
dfs_unlock();
}
return ret;
}
/**
* this function will return the file system mounted on specified path.
*
@@ -139,24 +186,26 @@ int dfs_mnt_remove(struct dfs_mnt* mnt)
* @return the found file system or NULL if no file system mounted on
* specified path
*/
struct dfs_mnt* dfs_mnt_lookup(const char* fullpath)
struct dfs_mnt *dfs_mnt_lookup(const char *fullpath)
{
struct dfs_mnt *mnt = _root_mnt;
struct dfs_mnt *iter = RT_NULL;
if (mnt)
{
int mnt_len = rt_strlen(mnt->fullpath);
dfs_lock();
if (strncmp(mnt->fullpath, fullpath, strlen(fullpath))!= 0)
if ((strncmp(mnt->fullpath, fullpath, mnt_len) == 0) &&
(mnt_len == 1 || (fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
{
while (!rt_list_isempty(&mnt->child))
{
rt_list_for_each_entry(iter, &mnt->child, sibling)
{
int mnt_len = rt_strlen(iter->fullpath);
mnt_len = rt_strlen(iter->fullpath);
if ((strncmp(iter->fullpath, fullpath, mnt_len) == 0) &&
((fullpath[mnt_len] == '\0') ||
(fullpath[mnt_len] == '/')))
((fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
{
mnt = iter;
break;
@@ -166,6 +215,10 @@ struct dfs_mnt* dfs_mnt_lookup(const char* fullpath)
if (mnt != iter) break;
}
}
else
{
mnt = RT_NULL;
}
dfs_unlock();
if (mnt)

File diff suppressed because it is too large Load Diff

View File

@@ -1216,53 +1216,46 @@ FINSH_FUNCTION_EXPORT_ALIAS(chdir, cd, change current working directory);
*/
int access(const char *path, int amode)
{
int fd, ret = -1, flags = 0;
struct stat sb;
struct stat st;
if (path == NULL)
{
rt_set_errno(-EBADF);
rt_set_errno(-EINVAL);
return -1;
}
if (stat(path, &st) < 0)
{
rt_set_errno(-ENOENT);
return -1;
}
if (amode == F_OK)
{
if (stat(path, &sb) < 0)
return -1; /* already sets errno */
else
return 0;
return 0;
}
/* ignore R_OK,W_OK,X_OK condition */
if (dfs_file_isdir(path) == 0)
if ((amode & R_OK) && !(st.st_mode & S_IRUSR))
{
flags |= O_DIRECTORY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & R_OK)
if ((amode & W_OK) && !(st.st_mode & S_IWUSR))
{
flags |= O_RDONLY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & W_OK)
if ((amode & X_OK) && !(st.st_mode & S_IXUSR))
{
flags |= O_WRONLY;
rt_set_errno(-EACCES);
return -1;
}
if (amode & X_OK)
{
flags |= O_EXEC;
}
fd = open(path, flags, 0);
if (fd >= 0)
{
ret = 0;
close(fd);
}
return ret;
return 0;
}
/**
* this function is a POSIX compliant version, which will set current
* working directory.

View File

@@ -0,0 +1,494 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <dfs_seq_file.h>
#include <dfs_dentry.h>
#define DBG_TAG "DFS.seq"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
static void dfs_seq_overflow(struct dfs_seq_file *seq)
{
seq->count = seq->size;
}
static void *dfs_seq_alloc(unsigned long size)
{
return rt_calloc(1, size);
}
int dfs_seq_open(struct dfs_file *file, const struct dfs_seq_ops *ops)
{
struct dfs_seq_file *seq;
if (!ops)
{
LOG_E("dfs_seq_open: ops = null, pathname: %s\n", file->dentry->pathname);
return -EINVAL;
}
if (file->data)
{
LOG_W("dfs_seq_open: file->data != null\n");
}
seq = rt_calloc(1, sizeof(struct dfs_seq_file));
if (!seq)
return -ENOMEM;
file->data = seq;
rt_mutex_init(&seq->lock, "dfs_seq", RT_IPC_FLAG_PRIO);
seq->ops = ops;
seq->file = file;
return 0;
}
static int dfs_seq_traverse(struct dfs_seq_file *seq, off_t offset)
{
off_t pos = 0;
int error = 0;
void *p;
seq->index = 0;
seq->count = seq->from = 0;
if (!offset)
return 0;
if (!seq->buf)
{
seq->buf = dfs_seq_alloc(seq->size = PAGE_SIZE);
if (!seq->buf)
return -ENOMEM;
}
p = seq->ops->start(seq, &seq->index);
while (p)
{
error = seq->ops->show(seq, p);
if (error < 0)
break;
if (error)
{
error = 0;
seq->count = 0;
}
if (dfs_seq_is_full(seq))
goto Eoverflow;
p = seq->ops->next(seq, p, &seq->index);
if (pos + seq->count > offset)
{
seq->from = offset - pos;
seq->count -= seq->from;
break;
}
pos += seq->count;
seq->count = 0;
if (pos == offset)
break;
}
seq->ops->stop(seq, p);
return error;
Eoverflow:
seq->ops->stop(seq, p);
rt_free(seq->buf);
seq->count = 0;
seq->buf = dfs_seq_alloc(seq->size <<= 1);
return !seq->buf ? -ENOMEM : -EAGAIN;
}
ssize_t dfs_seq_read(struct dfs_file *file, void *buf, size_t size, off_t *pos)
{
struct dfs_seq_file *seq = file->data;
size_t copied = 0;
size_t n;
void *p;
int err = 0;
if (!size)
return 0;
rt_mutex_take(&seq->lock, RT_WAITING_FOREVER);
/*
* if request is to read from zero offset, reset iterator to first
* record as it might have been already advanced by previous requests
*/
if (*pos == 0)
{
seq->index = 0;
seq->count = 0;
}
/* Don't assume ki_pos is where we left it */
if (*pos != seq->read_pos)
{
while ((err = dfs_seq_traverse(seq, *pos)) == -EAGAIN)
;
if (err)
{
/* With prejudice... */
seq->read_pos = 0;
seq->index = 0;
seq->count = 0;
goto Done;
}
else
{
seq->read_pos = *pos;
}
}
/* grab buffer if we didn't have one */
if (!seq->buf)
{
seq->buf = dfs_seq_alloc(seq->size = PAGE_SIZE);
if (!seq->buf)
goto Enomem;
}
// something left in the buffer - copy it out first
if (seq->count)
{
n = seq->count > size ? size : seq->count;
rt_memcpy((char *)buf + copied, seq->buf + seq->from, n);
size -= n;
seq->count -= n;
seq->from += n;
copied += n;
if (seq->count) // hadn't managed to copy everything
goto Done;
}
// get a non-empty record in the buffer
seq->from = 0;
p = seq->ops->start(seq, &seq->index);
while (p)
{
err = seq->ops->show(seq, p);
if (err < 0) // hard error
break;
if (err) // ->show() says "skip it"
seq->count = 0;
if (!seq->count)
{ // empty record
p = seq->ops->next(seq, p, &seq->index);
continue;
}
if (!dfs_seq_is_full(seq)) // got it
goto Fill;
// need a bigger buffer
seq->ops->stop(seq, p);
rt_free(seq->buf);
seq->count = 0;
seq->buf = dfs_seq_alloc(seq->size <<= 1);
if (!seq->buf)
goto Enomem;
p = seq->ops->start(seq, &seq->index);
}
// EOF or an error
seq->ops->stop(seq, p);
seq->count = 0;
goto Done;
Fill:
// one non-empty record is in the buffer; if they want more,
// try to fit more in, but in any case we need to advance
// the iterator once for every record shown.
while (1)
{
size_t offs = seq->count;
off_t pos = seq->index;
p = seq->ops->next(seq, p, &seq->index);
if (pos == seq->index)
{
LOG_W(".next function %p did not update position index\n", seq->ops->next);
seq->index++;
}
if (!p) // no next record for us
break;
if (seq->count >= size)
break;
err = seq->ops->show(seq, p);
if (err > 0)
{ // ->show() says "skip it"
seq->count = offs;
}
else if (err || dfs_seq_is_full(seq))
{
seq->count = offs;
break;
}
}
seq->ops->stop(seq, p);
n = seq->count > size ? size : seq->count;
rt_memcpy((char *)buf + copied, seq->buf, n);
size -= n;
copied += n;
seq->count -= n;
seq->from = n;
Done:
if (!copied)
{
copied = seq->count ? -EFAULT : err;
}
else
{
*pos += copied;
seq->read_pos += copied;
}
rt_mutex_release(&seq->lock);
return copied;
Enomem:
err = -ENOMEM;
goto Done;
}
ssize_t dfs_seq_lseek(struct dfs_file *file, off_t offset, int whence)
{
struct dfs_seq_file *seq = file->data;
off_t retval = -EINVAL;
rt_mutex_take(&seq->lock, RT_WAITING_FOREVER);
switch (whence)
{
case SEEK_CUR:
offset += file->fpos;
case SEEK_SET:
if (offset < 0)
break;
retval = offset;
if (offset != seq->read_pos)
{
while ((retval = dfs_seq_traverse(seq, offset)) == -EAGAIN);
if (retval)
{
/* with extreme prejudice... */
retval = 0;
seq->read_pos = 0;
seq->index = 0;
seq->count = 0;
}
else
{
seq->read_pos = offset;
retval = offset;
}
}
}
rt_mutex_release(&seq->lock);
return retval;
}
int dfs_seq_release(struct dfs_file *file)
{
struct dfs_seq_file *seq = file->data;
if (seq)
{
rt_mutex_detach(&seq->lock);
if (seq->buf)
{
rt_free(seq->buf);
}
rt_free(seq);
}
return 0;
}
void dfs_seq_vprintf(struct dfs_seq_file *seq, const char *f, va_list args)
{
int len;
if (seq->count < seq->size)
{
len = vsnprintf(seq->buf + seq->count, seq->size - seq->count, f, args);
if (seq->count + len < seq->size)
{
seq->count += len;
return;
}
}
dfs_seq_overflow(seq);
}
void dfs_seq_printf(struct dfs_seq_file *seq, const char *f, ...)
{
va_list args;
va_start(args, f);
dfs_seq_vprintf(seq, f, args);
va_end(args);
}
/**
* write char to buffer
*/
void dfs_seq_putc(struct dfs_seq_file *seq, char c)
{
if (seq->count < seq->size)
{
seq->buf[seq->count++] = c;
}
}
/**
* write string to buffer
*/
void dfs_seq_puts(struct dfs_seq_file *seq, const char *s)
{
int len = strlen(s);
if (seq->count + len >= seq->size)
{
dfs_seq_overflow(seq);
return;
}
rt_memcpy(seq->buf + seq->count, s, len);
seq->count += len;
}
/**
* write arbitrary data to buffer
*/
int dfs_seq_write(struct dfs_seq_file *seq, const void *data, size_t len)
{
if (seq->count + len < seq->size)
{
rt_memcpy(seq->buf + seq->count, data, len);
seq->count += len;
return 0;
}
dfs_seq_overflow(seq);
return -1;
}
/**
* write padding spaces to buffer
*/
void dfs_seq_pad(struct dfs_seq_file *seq, char c)
{
int size = seq->pad_until - seq->count;
if (size > 0)
{
if (size + seq->count > seq->size)
{
dfs_seq_overflow(seq);
return;
}
rt_memset(seq->buf + seq->count, ' ', size);
seq->count += size;
}
if (c)
{
dfs_seq_putc(seq, c);
}
}
#if 1
/* test demo */
static char *txt[4] = {
"text1",
"text2",
"text3",
"text4",
};
static void *seq_test_start(struct dfs_seq_file *seq, off_t *index)
{
off_t i = *index; // seq->index
if (i >= 0 && i < 4)
{
return txt[i];
}
return RT_NULL;
}
static void seq_test_stop(struct dfs_seq_file *seq, void *data)
{
}
static void *seq_test_next(struct dfs_seq_file *seq, void *data, off_t *index)
{
off_t i = *index + 1; // seq->index
*index = i;
if (i >= 0 && i < 4)
{
return txt[i];
}
return RT_NULL;
}
static int seq_test_show(struct dfs_seq_file *seq, void *data)
{
const char *text = (const char *)data;
dfs_seq_setwidth(seq, 20);
dfs_seq_puts(seq, "puts ");
dfs_seq_putc(seq, 'c');
dfs_seq_write(seq, " write", 6);
dfs_seq_printf(seq, " %s", text);
dfs_seq_pad(seq, 0);
return 0;
}
static const struct dfs_seq_ops _test_ops = {
.start = seq_test_start,
.stop = seq_test_stop,
.next = seq_test_next,
.show = seq_test_show,
};
static int dfs_seq_test(int argc, char **argv)
{
struct dfs_file file = {0};
int ret = dfs_seq_open(&file, &_test_ops);
if (ret == 0)
{
char buf[256] = {0};
off_t pos = (argc > 1) ? atoi(argv[1]) : 0;
ssize_t len = (argc > 2) ? atoi(argv[2]) : 255;
if (len > 255)
{
len = 255;
rt_kprintf("buf len is %d, max read is 255\n", 256, len);
}
len = dfs_seq_read(&file, buf, len, &pos);
buf[len] = '\0';
rt_kprintf("show: \"%s\" len: %d\n", buf, len);
dfs_seq_release(&file);
}
return 0;
}
MSH_CMD_EXPORT_ALIAS(dfs_seq_test, seq_test, seq_test[pos][read_len]);
#endif

View File

@@ -10,6 +10,9 @@
#include <dfs_file.h>
#include <dfs_mnt.h>
#ifdef RT_USING_PAGECACHE
#include "dfs_pcache.h"
#endif
#define DBG_TAG "DFS.vnode"
#define DBG_LVL DBG_WARNING
@@ -58,7 +61,12 @@ int dfs_vnode_destroy(struct dfs_vnode* vnode)
if (rt_atomic_load(&(vnode->ref_count)) == 1)
{
LOG_I("free a vnode: %p", vnode);
#ifdef RT_USING_PAGECACHE
if (vnode->aspace)
{
dfs_aspace_destroy(vnode->aspace);
}
#endif
if (vnode->mnt)
{
DLOG(msg, "vnode", vnode->mnt->fs_ops->name, DLOG_MSG, "fs_ops->free_vnode");
@@ -106,7 +114,12 @@ void dfs_vnode_unref(struct dfs_vnode *vnode)
{
rt_atomic_sub(&(vnode->ref_count), 1);
DLOG(note, "vnode", "vnode ref_count=%d", rt_atomic_load(&(vnode->ref_count)));
#ifdef RT_USING_PAGECACHE
if (vnode->aspace)
{
dfs_aspace_destroy(vnode->aspace);
}
#endif
if (rt_atomic_load(&(vnode->ref_count)) == 0)
{
LOG_I("free a vnode: %p", vnode);

View File

@@ -2008,12 +2008,22 @@ static int job_control(struct tty_struct *tty)
return __tty_check_change(tty, SIGTTIN);
}
static struct rt_wqueue *_wait_queue_current_get(struct tty_struct *tty)
{
struct rt_lwp *lwp;
lwp = lwp_self();
if (!lwp || !lwp->tty)
lwp = RT_NULL;
return wait_queue_get(lwp, tty);
}
static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
{
int level = 0;
char *b = (char *)buf;
struct tty_struct *tty = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
struct rt_wqueue *wq = RT_NULL;
int wait_ret = 0;
int retval = 0;
@@ -2031,8 +2041,7 @@ static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
struct n_tty_data *ldata = tty->disc_data;
lwp = (struct rt_lwp *)(rt_thread_self()->lwp);
wq = wait_queue_get(lwp, tty);
wq = _wait_queue_current_get(tty);
while(count)
{
@@ -2171,15 +2180,13 @@ static int n_tty_poll(struct dfs_file *fd, struct rt_pollreq *req)
int mask = POLLOUT;
struct tty_struct *tty = RT_NULL;
struct rt_wqueue *wq = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_INITED);
lwp = (struct rt_lwp *)(rt_thread_self()->lwp);
wq = wait_queue_get(lwp, tty);
wq = _wait_queue_current_get(tty);
rt_poll_add(wq, req);
level = rt_hw_interrupt_disable();

View File

@@ -15,6 +15,7 @@
#include <rtthread.h>
#include <tty.h>
#include <tty_ldisc.h>
#include <shell.h>
#if defined(RT_USING_POSIX_DEVIO)
#include <termios.h>
@@ -272,9 +273,97 @@ static int tiocsctty(struct tty_struct *tty, int arg)
return 0;
}
static int tiocswinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
rt_kprintf("\x1b[8;%d;%dt", p_winsize->ws_col, p_winsize->ws_row);
return 0;
}
static int tiocgwinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
if(rt_thread_self() != rt_thread_find(FINSH_THREAD_NAME))
{
/* only can be used in tshell thread; otherwise, return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
}
else
{
#define _TIO_BUFLEN 20
char _tio_buf[_TIO_BUFLEN];
unsigned char cnt1, cnt2, cnt3, i;
char row_s[4], col_s[4];
char *p;
rt_memset(_tio_buf, 0, _TIO_BUFLEN);
/* send the command to terminal for getting the window size of the terminal */
rt_kprintf("\033[18t");
/* waiting for the response from the terminal */
i = 0;
while(i < _TIO_BUFLEN)
{
_tio_buf[i] = finsh_getchar();
if(_tio_buf[i] != 't')
{
i ++;
}
else
{
break;
}
}
if(i == _TIO_BUFLEN)
{
/* buffer overloaded, and return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
return 0;
}
/* interpreting data eg: "\033[8;1;15t" which means row is 1 and col is 15 (unit: size of ONE character) */
rt_memset(row_s,0,4);
rt_memset(col_s,0,4);
cnt1 = 0;
while(cnt1 < _TIO_BUFLEN && _tio_buf[cnt1] != ';')
{
cnt1++;
}
cnt2 = ++cnt1;
while(cnt2 < _TIO_BUFLEN && _tio_buf[cnt2] != ';')
{
cnt2++;
}
p = row_s;
while(cnt1 < cnt2)
{
*p++ = _tio_buf[cnt1++];
}
p = col_s;
cnt2++;
cnt3 = rt_strlen(_tio_buf) - 1;
while(cnt2 < cnt3)
{
*p++ = _tio_buf[cnt2++];
}
/* load the window size date */
p_winsize->ws_col = atoi(col_s);
p_winsize->ws_row = atoi(row_s);
#undef _TIO_BUFLEN
}
p_winsize->ws_xpixel = 0;/* unused */
p_winsize->ws_ypixel = 0;/* unused */
return 0;
}
static int tty_ioctl(struct dfs_file *fd, int cmd, void *args)
{
int ret = 0;
void *p = (void *)args;
struct tty_struct *tty = RT_NULL;
struct tty_struct *real_tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
@@ -295,6 +384,10 @@ static int tty_ioctl(struct dfs_file *fd, int cmd, void *args)
{
case TIOCSCTTY:
return tiocsctty(real_tty, 1);
case TIOCGWINSZ:
return tiocgwinsz(real_tty, p);
case TIOCSWINSZ:
return tiocswinsz(real_tty, p);
}
ld = tty->ldisc;

View File

@@ -14,6 +14,7 @@
#include <stdint.h>
#include <unistd.h>
#include <dfs_file.h>
#include <dfs.h>
#include "poll.h"
#include "eventfd.h"

View File

@@ -68,4 +68,20 @@ if RT_USING_LWP
bool "The unix98 PTY debug output"
default n
endif
menuconfig RT_USING_LDSO
bool "LDSO: dynamic load shared objects"
default n
if RT_USING_LDSO
config ELF_DEBUG_ENABLE
bool "Enable ldso debug"
default n
config ELF_LOAD_RANDOMIZE
bool "Enable random load address"
default n
endif
endif

View File

@@ -31,24 +31,22 @@ int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!mmu_table)
mmu_table = rt_hw_mmu_pgtbl_create();
if (mmu_table)
{
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
}
else
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
memset(mmu_table, 0, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}

View File

@@ -17,10 +17,11 @@
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0x0001000000000000UL
#define USER_HEAP_VEND 0x0000ffffB0000000UL
#define USER_HEAP_VADDR 0x0000ffff80000000UL
#define USER_HEAP_VADDR (0x0000ffff40000000UL)
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x0000ffff70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_STACK_VEND (USER_STACK_VSTART + 0x10000000)
#define USER_ARG_VADDR USER_STACK_VEND
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START

View File

@@ -50,10 +50,12 @@
arch_start_umode:
mov sp, x3
mov x4, #(SPSR_Mode(0) | SPSR_A64)
mov x3, x2 ;/* user stack top */
msr daifset, #3
dsb sy
mrs x30, sp_el0
/* user stack top */
msr sp_el0, x2
mov x3, x2
msr spsr_el1, x4
msr elr_el1, x1
eret

View File

@@ -23,13 +23,11 @@
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc(2);
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
@@ -37,9 +35,6 @@ int arch_user_space_init(struct rt_lwp *lwp)
lwp->end_heap = USER_HEAP_VADDR;
rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
@@ -87,7 +82,7 @@ void arch_user_space_free(struct rt_lwp *lwp)
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 2);
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else

View File

@@ -21,7 +21,7 @@
#define USER_STACK_VSTART 0x70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00100000UL
#define USER_VADDR_START 0x00010000UL
#define USER_LOAD_VADDR USER_VADDR_START
#ifdef __cplusplus

View File

@@ -43,7 +43,12 @@ arch_start_umode:
msr spsr, r9
mov sp, r3
mov r3, r2 ;/* user stack top */
/* set user stack top */
cps #Mode_SYS
mov sp, r2
cps #Mode_SVC
mov r3, r2
/* set data address. */
movs pc, r1

View File

@@ -93,17 +93,13 @@ int arch_user_space_init(struct rt_lwp *lwp)
{
rt_ubase_t *mmu_table;
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
@@ -129,7 +125,7 @@ void arch_user_space_free(struct rt_lwp *lwp)
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 0);
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else

View File

@@ -153,6 +153,9 @@ struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char
int len;
size_t *args_k;
struct process_aux *aux;
size_t prot = PROT_READ | PROT_WRITE;
size_t flags = MAP_FIXED | MAP_PRIVATE;
size_t zero = 0;
for (i = 0; i < argc; i++)
{
@@ -179,9 +182,8 @@ struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char
return RT_NULL;
}
/* args = (int *)lwp_map_user(lwp, 0, size); */
args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
if (args == RT_NULL)
args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
{
return RT_NULL;
}
@@ -1417,3 +1419,37 @@ void lwp_uthread_ctx_restore(void)
thread = rt_thread_self();
thread->user_ctx.ctx = RT_NULL;
}
void rt_update_process_times(void)
{
struct rt_thread *thread;
#ifdef RT_USING_SMP
struct rt_cpu* pcpu;
pcpu = rt_cpu_self();
#endif
thread = rt_thread_self();
if (!IS_USER_MODE(thread))
{
thread->user_time += 1;
#ifdef RT_USING_SMP
pcpu->cpu_stat.user += 1;
#endif
}
else
{
thread->system_time += 1;
#ifdef RT_USING_SMP
if (thread == pcpu->idle_thread)
{
pcpu->cpu_stat.idle += 1;
}
else
{
pcpu->cpu_stat.system += 1;
}
#endif
}
}

View File

@@ -240,7 +240,9 @@ struct __pthread {
}
#endif
#define AUX_ARRAY_ITEMS_NR 6
#ifndef AUX_ARRAY_ITEMS_NR
#define AUX_ARRAY_ITEMS_NR 32
#endif
/* aux key */
#define AT_NULL 0

811
components/lwp/lwp_elf.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -427,7 +427,6 @@ static int _ipc_msg_fd_new(void *file)
#endif
return fd;
}

View File

@@ -700,7 +700,7 @@ pid_t waitpid(pid_t pid, int *status, int options)
/* delete from sibling list of its parent */
struct rt_lwp **lwp_node;
*status = lwp->lwp_ret;
lwp_data_put(this_lwp, status, &lwp->lwp_ret, sizeof(*status));
lwp_node = &this_lwp->first_child;
while (*lwp_node != lwp)
{

View File

@@ -156,7 +156,6 @@ static int _lwp_shmget(size_t key, size_t size, int create)
p->mem_obj.on_varea_open = on_shm_varea_open;
p->mem_obj.on_varea_close = on_shm_varea_close;
p->mem_obj.hint_free = NULL;
p->mem_obj.on_page_offload = NULL;
/* then insert it into the balancing binary tree */
node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);

View File

@@ -1305,12 +1305,41 @@ rt_base_t sys_brk(void *addr)
void *sys_mmap2(void *addr, size_t length, int prot,
int flags, int fd, size_t pgoffset)
{
return lwp_mmap2(addr, length, prot, flags, fd, pgoffset);
sysret_t rc = 0;
long offset = 0;
/* aligned for user addr */
if ((rt_base_t)addr & ARCH_PAGE_MASK)
{
if (flags & MAP_FIXED)
rc = -EINVAL;
else
{
offset = (char *)addr - (char *)RT_ALIGN_DOWN((rt_base_t)addr, ARCH_PAGE_SIZE);
length += offset;
addr = (void *)RT_ALIGN_DOWN((rt_base_t)addr, ARCH_PAGE_SIZE);
}
}
if (rc == 0)
{
/* fix parameter passing (both along have same effect) */
if (fd == -1 || flags & MAP_ANONYMOUS)
{
fd = -1;
/* MAP_SHARED has no effect and treated as nothing */
flags &= ~MAP_SHARED;
flags |= MAP_PRIVATE | MAP_ANONYMOUS;
}
rc = (sysret_t)lwp_mmap2(lwp_self(), addr, length, prot, flags, fd, pgoffset);
}
return (char *)rc + offset;
}
sysret_t sys_munmap(void *addr, size_t length)
{
return lwp_munmap(addr);
return lwp_munmap(lwp_self(), addr, length);
}
void *sys_mremap(void *old_address, size_t old_size,
@@ -1999,17 +2028,6 @@ rt_weak long sys_clone(void *arg[])
return _sys_clone(arg);
}
int lwp_dup_user(rt_varea_t varea, void *arg);
static int _copy_process(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
{
int err;
dest_lwp->lwp_obj->source = src_lwp->aspace;
err = rt_aspace_traversal(src_lwp->aspace, lwp_dup_user, dest_lwp);
dest_lwp->lwp_obj->source = NULL;
return err;
}
static void lwp_struct_copy(struct rt_lwp *dst, struct rt_lwp *src)
{
#ifdef ARCH_MM_MMU
@@ -2106,8 +2124,8 @@ sysret_t _sys_fork(void)
self_lwp = lwp_self();
/* copy process */
if (_copy_process(lwp, self_lwp) != 0)
/* copy address space of process from this proc to forked one */
if (lwp_fork_aspace(lwp, self_lwp) != 0)
{
SET_ERRNO(ENOMEM);
goto fail;
@@ -4222,13 +4240,27 @@ sysret_t sys_getaddrinfo(const char *nodename,
SET_ERRNO(EFAULT);
goto exit;
}
#endif
k_nodename = (char *)kmem_get(len + 1);
if (!k_nodename)
{
SET_ERRNO(ENOMEM);
goto exit;
}
if (lwp_get_from_user(k_nodename, (void *)nodename, len + 1) != len + 1)
{
SET_ERRNO(EFAULT);
goto exit;
}
#else
k_nodename = rt_strdup(nodename);
if (!k_nodename)
{
SET_ERRNO(ENOMEM);
goto exit;
}
#endif
}
if (servname)
{
@@ -4239,13 +4271,27 @@ sysret_t sys_getaddrinfo(const char *nodename,
SET_ERRNO(EFAULT);
goto exit;
}
#endif
k_servname = (char *)kmem_get(len + 1);
if (!k_servname)
{
SET_ERRNO(ENOMEM);
goto exit;
}
if (lwp_get_from_user(k_servname, (void *)servname, len + 1) < 0)
{
SET_ERRNO(EFAULT);
goto exit;
}
#else
k_servname = rt_strdup(servname);
if (!k_servname)
{
SET_ERRNO(ENOMEM);
goto exit;
}
#endif
}
if (hints)
@@ -4300,15 +4346,28 @@ exit:
{
ret = GET_ERRNO();
}
#ifdef ARCH_MM_MMU
if (k_nodename)
{
kmem_put(k_nodename);
}
#else
if (k_nodename)
{
rt_free(k_nodename);
}
#endif
#ifdef ARCH_MM_MMU
if (k_servname)
{
kmem_put(k_servname);
}
#else
if (k_servname)
{
rt_free(k_servname);
}
#endif
if (k_hints)
{
rt_free(k_hints);
@@ -4324,7 +4383,7 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
{
int ret_val = -1;
int sal_ret = -1 , sal_err = -1;
struct hostent sal_he;
struct hostent sal_he, sal_tmp;
struct hostent *sal_result = NULL;
char *sal_buf = NULL;
char *k_name = NULL;
@@ -4354,22 +4413,35 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
SET_ERRNO(EFAULT);
goto __exit;
}
#endif
*result = ret;
sal_buf = (char *)malloc(HOSTENT_BUFSZ);
if (sal_buf == NULL)
k_name = (char *)kmem_get(len + 1);
if (!k_name)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
if (lwp_get_from_user(k_name, (void *)name, len + 1) < 0)
{
SET_ERRNO(EFAULT);
goto __exit;
}
#else
k_name = rt_strdup(name);
if (k_name == NULL)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
#endif
*result = ret;
sal_buf = (char *)malloc(HOSTENT_BUFSZ);
if (sal_buf == NULL)
{
SET_ERRNO(ENOMEM);
goto __exit;
}
/* get host by name in SAL */
sal_ret = sal_gethostbyname_r(k_name, &sal_he, sal_buf, HOSTENT_BUFSZ, &sal_result, &sal_err);
@@ -4386,6 +4458,28 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
}
cnt = index + 1;
#ifdef ARCH_MM_MMU
/* update user space hostent */
lwp_put_to_user(buf, k_name, buflen - (ptr - buf));
lwp_memcpy(&sal_tmp, &sal_he, sizeof(sal_he));
sal_tmp.h_name = ptr;
ptr += rt_strlen(k_name);
sal_tmp.h_addr_list = (char**)ptr;
ptr += cnt * sizeof(char *);
index = 0;
while (sal_he.h_addr_list[index] != NULL)
{
sal_tmp.h_addr_list[index] = ptr;
lwp_memcpy(ptr, sal_he.h_addr_list[index], sal_he.h_length);
ptr += sal_he.h_length;
index++;
}
sal_tmp.h_addr_list[index] = NULL;
lwp_put_to_user(ret, &sal_tmp, sizeof(sal_tmp));
#else
/* update user space hostent */
ret->h_addrtype = sal_he.h_addrtype;
ret->h_length = sal_he.h_length;
@@ -4407,10 +4501,10 @@ sysret_t sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
index++;
}
ret->h_addr_list[index] = NULL;
#endif
ret_val = 0;
}
ret_val = 0;
__exit:
if (ret_val < 0)
{
@@ -4422,10 +4516,17 @@ __exit:
{
free(sal_buf);
}
#ifdef ARCH_MM_MMU
if (k_name)
{
kmem_put(k_name);
}
#else
if (k_name)
{
free(k_name);
}
#endif
return ret_val;
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,6 +7,7 @@
* Date Author Notes
* 2019-10-28 Jesven first version
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
* 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
*/
#ifndef __LWP_USER_MM_H__
#define __LWP_USER_MM_H__
@@ -27,6 +28,8 @@ extern "C" {
#define LWP_MAP_FLAG_NONE 0x0000
#define LWP_MAP_FLAG_NOCACHE 0x0001
#define LWP_MAP_FLAG_MAP_FIXED 0x00010000ul
#define LWP_MAP_FLAG_PREFETCH 0x00020000ul
/**
* @brief Map files or devices into memory
@@ -41,7 +44,7 @@ extern "C" {
* @param pgoffset offset to fd in 4096 bytes unit
* @return void* the address is successful, otherwise return MAP_FAILED
*/
void* lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
void* lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
/**
* @brief Unmap memory region in user space
@@ -51,7 +54,7 @@ void* lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd, off_t pg
* @param length length in bytes of unmapping
* @return int errno
*/
int lwp_munmap(void *addr);
int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length);
/**
* @brief Test if address from user is accessible address by user
@@ -145,8 +148,8 @@ void lwp_unmap_user_space(struct rt_lwp *lwp);
int lwp_unmap_user(struct rt_lwp *lwp, void *va);
void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_bool_t text);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
void lwp_free_command_line_args(char** argv);
char** lwp_get_command_line_args(struct rt_lwp *lwp);
rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
@@ -158,6 +161,11 @@ int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);
rt_base_t lwp_brk(void *addr);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp);
void lwp_data_cache_flush(struct rt_lwp *lwp, void *vaddr, size_t size);
static inline void *_lwp_v2p(struct rt_lwp *lwp, void *vaddr)
@@ -173,6 +181,49 @@ static inline void *lwp_v2p(struct rt_lwp *lwp, void *vaddr)
return paddr;
}
/**
* @brief Remapping user space memory region to kernel
*
* @warning the remapped region in kernel should be unmapped after usage
*
* @param lwp target process
* @param uaddr user space address where the data writes to
* @param length the bytes to redirect
* @return void * the redirection address in kernel space
*/
void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length);
rt_inline rt_size_t lwp_user_mm_flag_to_kernel(int flags)
{
rt_size_t k_flags = 0;
if (flags & MAP_FIXED)
k_flags |= MMF_MAP_FIXED;
if (flags & (MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS))
k_flags |= MMF_MAP_PRIVATE;
if (flags & MAP_SHARED)
k_flags |= MMF_MAP_SHARED;
return k_flags;
}
rt_inline rt_size_t lwp_user_mm_attr_to_kernel(int prot)
{
rt_size_t k_attr = 0;
#ifdef IMPL_MPROTECT
if ((prot & PROT_EXEC) || (prot & PROT_WRITE) ||
((prot & PROT_READ) && (prot & PROT_WRITE)))
k_attr = MMU_MAP_U_RWCB;
else if (prot == PROT_NONE)
k_attr = MMU_MAP_K_RWCB;
else
k_attr = MMU_MAP_U_ROCB;
#else
k_attr = MMU_MAP_U_RWCB;
#endif /* IMPL_MPROTECT */
return k_attr;
}
#ifdef __cplusplus
}
#endif

View File

@@ -13,6 +13,9 @@
#include <rtthread.h>
#include <errno.h>
#include <stdlib.h>
typedef long sysret_t;
struct rt_syscall_def
@@ -40,4 +43,36 @@ struct rt_syscall_def
#define _SYS_WRAP(func) ({int _ret = func; _ret < 0 ? GET_ERRNO() : _ret;})
rt_inline sysret_t lwp_errno_to_posix(rt_err_t error)
{
sysret_t posix_rc;
switch (labs(error))
{
case RT_EOK:
posix_rc = 0;
break;
case RT_ETIMEOUT:
posix_rc = -ETIMEDOUT;
break;
case RT_EINVAL:
posix_rc = -EINVAL;
break;
case RT_ENOENT:
posix_rc = -ENOENT;
break;
case RT_ENOSPC:
posix_rc = -ENOSPC;
break;
case RT_EPERM:
posix_rc = -EPERM;
break;
default:
posix_rc = -1;
break;
}
return posix_rc;
}
#endif /* __SYSCALL_DATA_H__ */

View File

@@ -1,14 +1,14 @@
import os
from building import *
objs = []
src = []
objs = []
if GetDepend('ARCH_ARM_CORTEX_A') or GetDepend('ARCH_ARMV8') or GetDepend('ARCH_RISCV64'):
cwd = GetCurrentDir()
src += ['avl_adpt.c', 'ioremap.c', 'mm_aspace.c', 'mm_fault.c', 'mm_kmem.c', 'mm_object.c', 'mm_page.c']
if GetDepend('RT_USING_MEMBLOCK'):
src += ['mm_memblock.c']
src = Glob('*.c') + Glob('*_gcc.S')
if not GetDepend('RT_USING_MEMBLOCK'):
SrcRemove(src, ['mm_memblock.c'])
CPPPATH = [cwd]
group = DefineGroup('mm', src, depend = ['ARCH_MM_MMU'], CPPPATH = CPPPATH)

620
components/mm/mm_anon.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +1,26 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
* 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
*/
#ifndef __MM_ASPACE_H__
#define __MM_ASPACE_H__
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include "avl_adpt.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include <stddef.h>
#include <string.h>
#define MM_PAGE_SHIFT 12
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
#define PV_OFFSET (rt_kmem_pvoff())
@@ -55,6 +58,7 @@ typedef struct rt_aspace
struct _aspace_tree tree;
struct rt_mutex bst_lock;
struct rt_mem_obj *private_object;
rt_uint64_t asid;
} *rt_aspace_t;
@@ -97,7 +101,25 @@ typedef struct rt_mem_obj
/* do post close bushiness like def a ref */
void (*on_varea_close)(struct rt_varea *varea);
void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);
/* do preparation for address space modification of varea */
rt_err_t (*on_varea_shrink)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
/* do preparation for address space modification of varea */
rt_err_t (*on_varea_expand)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
/**
* this is like an on_varea_open() to `subset`, and an on_varea_shrink() to `existed`
* while resource can migrate from `existed` to `subset` at the same time
*/
rt_err_t (*on_varea_split)(struct rt_varea *existed, void *unmap_start,
rt_size_t unmap_len, struct rt_varea *subset);
/**
* this is like a on_varea_expand() to `merge_to` and on_varea_close() to `merge_from`
* while resource can migrate from `merge_from` to `merge_to` at the same time
*/
rt_err_t (*on_varea_merge)(struct rt_varea *merge_to, struct rt_varea *merge_from);
/* dynamic mem_obj API */
void (*page_read)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
void (*page_write)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
const char *(*get_name)(rt_varea_t varea);
} *rt_mem_obj_t;
@@ -110,6 +132,8 @@ enum rt_mmu_cntl
MMU_CNTL_CACHE,
MMU_CNTL_READONLY,
MMU_CNTL_READWRITE,
MMU_CNTL_OFFLOAD,
MMU_CNTL_INSTALL,
MMU_CNTL_DUMMY_END,
};
@@ -122,8 +146,11 @@ enum rt_mmu_cntl
#define WR_UNLOCK(aspace) \
rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
/* FIXME: fix rd_lock */
#define RD_LOCK(aspace) WR_LOCK(aspace)
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
#define RDWR_LOCK(aspace) ((void)aspace)
#define RDWR_UNLOCK(aspace) ((void)aspace)
rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
@@ -176,14 +203,50 @@ int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
void **ret_va);
/** map a private memory region to aspace */
int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
rt_size_t attr, mm_flag_t flags);
/**
* @brief Remove any mappings overlap the range [addr, addr + bytes)
* @brief Remove mappings containing address specified by addr
*
* @param aspace
* @param aspace target virtual address space
* @param addr addresses that mapping to be removed contains
* @return int rt errno
*/
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr the beginning of the range of pages to be unmapped
* @param length length of range in bytes
* @return int rt errno
*/
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr the beginning of the range of pages to be unmapped
* @param length length of range in bytes
* @return int rt errno
*/
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
/**
* @brief Remove pages of existed mappings in the range [addr, addr+length)
* Length is automatically rounded up to the next multiple of the page size.
*
* @param aspace target virtual address space
* @param addr
* @return int
*/
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
@@ -191,14 +254,28 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer);
rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer);
int rt_aspace_traversal(rt_aspace_t aspace,
int (*fn)(rt_varea_t varea, void *arg), void *arg);
void rt_aspace_print_all(rt_aspace_t aspace);
rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace);
rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr);
rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst);
rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst);
rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst);
/**
* @brief Map one page to varea
*
* @note caller should take the read/write lock
*
* @param varea target varea
* @param addr user address
* @param page the page frame to be mapped
@@ -209,6 +286,8 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
/**
* @brief Unmap one page in varea
*
* @note caller should take the read/write lock
*
* @param varea target varea
* @param addr user address
* @param page the page frame to be mapped
@@ -252,7 +331,16 @@ int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
*/
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
rt_ubase_t rt_kmem_pvoff(void);
rt_inline rt_mem_obj_t rt_mem_obj_create(rt_mem_obj_t source)
{
rt_mem_obj_t target;
target = rt_malloc(sizeof(*target));
if (target)
memcpy(target, source, sizeof(*target));
return target;
}
const rt_ubase_t rt_kmem_pvoff(void);
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
@@ -260,6 +348,8 @@ int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
void *rt_kmem_v2p(void *vaddr);
void *rt_kmem_p2v(void *paddr);
void rt_kmem_list(void);
#endif /* __MM_ASPACE_H__ */

View File

@@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2022-12-06 WangXiaoyao the first version
* 2023-08-19 Shell Support PRIVATE mapping and COW
*/
#include <rtthread.h>
@@ -23,27 +24,21 @@
#include <mmu.h>
#include <tlb.h>
#define UNRECOVERABLE 0
#define RECOVERABLE 1
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
msg->response.vaddr = 0;
msg->response.size = 0;
int err = MM_FAULT_FIXABLE_FALSE;
if (varea->mem_obj && varea->mem_obj->on_page_fault)
{
varea->mem_obj->on_page_fault(varea, msg);
err = _varea_map_with_msg(varea, msg);
err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
err = rt_varea_map_with_msg(varea, msg);
err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
}
return err;
}
static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
@@ -59,19 +54,37 @@ static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
rt_aspace_t aspace = varea->aspace;
int err = MM_FAULT_FIXABLE_FALSE;
if (rt_varea_is_private_locked(varea))
{
if (VAREA_IS_WRITABLE(varea) && (
msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT ||
msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
{
RDWR_LOCK(aspace);
err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
RDWR_UNLOCK(aspace);
if (err == MM_FAULT_FIXABLE_FALSE)
LOG_I("%s: fix private failure", __func__);
}
else
{
LOG_I("%s: No permission on %s(attr=0x%lx)", __func__, VAREA_NAME(varea), varea->attr);
}
}
else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
err = _fetch_page(varea, msg);
}
else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
varea->flag & MMF_COW)
{
if (err == MM_FAULT_FIXABLE_FALSE)
LOG_I("%s: page fault failure", __func__);
}
else
{
LOG_D("%s: can not fix", __func__);
/* signal a fault to user? */
}
return err;
@@ -79,7 +92,7 @@ static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *
static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
@@ -91,10 +104,13 @@ static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
int err = MM_FAULT_FIXABLE_FALSE;
uintptr_t va = (uintptr_t)msg->fault_vaddr;
va &= ~ARCH_PAGE_MASK;
msg->fault_vaddr = (void *)va;
rt_mm_fault_res_init(&msg->response);
RT_DEBUG_SCHEDULER_AVAILABLE(1);
if (aspace)
{
@@ -105,21 +121,34 @@ int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
if (varea)
{
void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
msg->off = ((char *)msg->fault_vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT;
/* permission checked by fault op */
switch (msg->fault_op)
if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
case MM_FAULT_OP_READ:
err = _read_fault(varea, pa, msg);
break;
case MM_FAULT_OP_WRITE:
err = _write_fault(varea, pa, msg);
break;
case MM_FAULT_OP_EXECUTE:
err = _exec_fault(varea, pa, msg);
break;
LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
err = MM_FAULT_FIXABLE_TRUE;
}
else
{
LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
/* permission checked by fault op */
switch (msg->fault_op)
{
case MM_FAULT_OP_READ:
err = _read_fault(varea, pa, msg);
break;
case MM_FAULT_OP_WRITE:
err = _write_fault(varea, pa, msg);
break;
case MM_FAULT_OP_EXECUTE:
err = _exec_fault(varea, pa, msg);
break;
}
}
}
else
{
LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
}
RD_UNLOCK(aspace);
}

View File

@@ -20,12 +20,8 @@
#define MM_FAULT_STATUS_OK_MAPPED 1
#define MM_FAULT_STATUS_UNRECOVERABLE 4
struct rt_mm_fault_res
{
void *vaddr;
rt_size_t size;
int status;
};
#define MM_FAULT_FIXABLE_FALSE 0
#define MM_FAULT_FIXABLE_TRUE 1
enum rt_mm_fault_op
{
@@ -36,10 +32,41 @@ enum rt_mm_fault_op
enum rt_mm_fault_type
{
/**
* Occurs when an instruction attempts to access a memory address that it
* does not have permission to access
*/
MM_FAULT_TYPE_ACCESS_FAULT,
/**
* Occurs when a load or store instruction accesses a virtual memory
* address that is not currently mapped to a physical memory page
*/
MM_FAULT_TYPE_PAGE_FAULT,
/**
* Occurs like a SIGBUS
*/
MM_FAULT_TYPE_BUS_ERROR,
MM_FAULT_TYPE_GENERIC,
__PRIVATE_PAGE_INSERT,
};
enum rt_mm_hint_prefetch
{
MM_FAULT_HINT_PREFETCH_NONE,
MM_FAULT_HINT_PREFETCH_READY,
};
struct rt_mm_fault_res
{
void *vaddr;
rt_size_t size;
int status;
/* hint for prefetch strategy */
enum rt_mm_hint_prefetch hint;
};
struct rt_aspace_fault_msg
@@ -52,8 +79,36 @@ struct rt_aspace_fault_msg
struct rt_mm_fault_res response;
};
struct rt_aspace_io_msg
{
/* offset in varea */
rt_size_t off;
/* fault address in target address space */
void *fault_vaddr;
/* read/write buffer in kernel space */
void *buffer_vaddr;
struct rt_mm_fault_res response;
};
rt_inline void rt_mm_fault_res_init(struct rt_mm_fault_res *res)
{
res->vaddr = RT_NULL;
res->size = 0;
res->hint = MM_FAULT_HINT_PREFETCH_NONE;
res->status = MM_FAULT_STATUS_UNRECOVERABLE;
}
rt_inline void rt_mm_io_msg_init(struct rt_aspace_io_msg *io, rt_size_t off, void *fault_vaddr, void *buffer_vaddr)
{
io->off = off;
io->fault_vaddr = fault_vaddr;
io->buffer_vaddr = buffer_vaddr;
rt_mm_fault_res_init(&io->response);
}
struct rt_aspace;
/* MMU base page fault handler, return 1 is fixable */
/* MMU base page fault handler, MM_FAULT_FIXABLE_TRUE/MM_FAULT_FIXABLE_FALSE will be returned */
int rt_aspace_fault_try_fix(struct rt_aspace *aspace, struct rt_aspace_fault_msg *msg);
#endif /* __MM_FAULT_H__ */

Some files were not shown because too many files have changed in this diff Show More