提交 eddbc895 编写于 作者: Y Yifan Wu

Implement sys_read && allocate pid and kernel stack dynamically.

上级 1bc53c0b
......@@ -47,6 +47,9 @@ impl MemorySet {
areas: Vec::new(),
}
}
pub fn dealloc_all_frames(&mut self) {
*self = Self::new_bare();
}
pub fn token(&self) -> usize {
self.page_table.token()
}
......@@ -59,6 +62,14 @@ impl MemorySet {
permission,
), None);
}
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
panic!("Area not found!");
}
fn push(&mut self, mut map_area: MapArea, data: Option<&[u8]>) {
map_area.map(&mut self.page_table);
if let Some(data) = data {
......@@ -228,7 +239,6 @@ impl MapArea {
let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap();
page_table.map(vpn, ppn, pte_flags);
}
#[allow(unused)]
pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
match self.map_type {
MapType::Framed => {
......@@ -243,7 +253,6 @@ impl MapArea {
self.map_one(page_table, vpn);
}
}
#[allow(unused)]
pub fn unmap(&mut self, page_table: &mut PageTable) {
for vpn in self.vpn_range {
self.unmap_one(page_table, vpn);
......
......@@ -131,7 +131,7 @@ impl PageTable {
}
}
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static [u8]> {
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
let page_table = PageTable::from_token(token);
let mut start = ptr as usize;
let end = start + len;
......@@ -146,7 +146,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
vpn.step();
let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end));
v.push(&ppn.get_bytes_array()[start_va.page_offset()..end_va.page_offset()]);
v.push(&mut ppn.get_bytes_array()[start_va.page_offset()..end_va.page_offset()]);
start = end_va.into();
}
v
......
use crate::mm::translated_byte_buffer;
use crate::task::current_user_token;
use crate::task::{current_user_token, suspend_current_and_run_next};
use crate::sbi::console_getchar;
const FD_STDIN: usize = 0;
const FD_STDOUT: usize = 1;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
......@@ -16,4 +18,29 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
panic!("Unsupported fd in sys_write!");
}
}
}
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
match fd {
FD_STDIN => {
assert_eq!(len, 1, "Only support len = 1 in sys_read!");
let mut c: usize;
loop {
c = console_getchar();
if c == 0 {
suspend_current_and_run_next();
continue;
} else {
break;
}
}
let ch = c as u8;
let mut buffers = translated_byte_buffer(current_user_token(), buf, len);
unsafe { buffers[0].as_mut_ptr().write_volatile(ch); }
1
}
_ => {
panic!("Unsupported fd in sys_read!");
}
}
}
\ No newline at end of file
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
const SYSCALL_YIELD: usize = 124;
......@@ -11,6 +12,7 @@ use process::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
SYSCALL_EXIT => sys_exit(args[0] as i32),
SYSCALL_YIELD => sys_yield(),
......
......@@ -3,6 +3,7 @@ mod switch;
mod task;
mod manager;
mod processor;
mod pid;
use crate::loader::{get_num_app, get_app_data};
use crate::trap::TrapContext;
......@@ -14,6 +15,7 @@ use alloc::vec::Vec;
use alloc::sync::Arc;
use spin::Mutex;
use manager::fetch_task;
use pid::{PidHandle, pid_alloc, KernelStack};
pub use context::TaskContext;
pub use processor::{
......
use alloc::vec::Vec;
use lazy_static::*;
use spin::Mutex;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
use crate::config::{
PAGE_SIZE,
TRAMPOLINE,
KERNEL_STACK_SIZE,
};
struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
} else {
self.current += 1;
PidHandle(self.current - 1)
}
}
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
self.recycled.iter().find(|ppid| **ppid == pid).is_none(),
"pid {} has been deallocated!", pid
);
self.recycled.push(pid);
}
}
lazy_static! {
static ref PID_ALLOCATOR : Mutex<PidAllocator> = Mutex::new(PidAllocator::new());
}
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
fn drop(&mut self) {
PID_ALLOCATOR.lock().dealloc(self.0);
}
}
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.lock().alloc()
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
KERNEL_SPACE
.lock()
.insert_framed_area(
kernel_stack_bottom.into(),
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack {
pid: pid_handle.0,
}
}
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.lock()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}
\ No newline at end of file
......@@ -2,12 +2,14 @@ use crate::mm::{MemorySet, MapPermission, PhysPageNum, KERNEL_SPACE, VirtAddr};
use crate::trap::{TrapContext, trap_handler};
use crate::config::{TRAP_CONTEXT, kernel_stack_position};
use super::TaskContext;
use super::{PidHandle, pid_alloc, KernelStack};
pub struct TaskControlBlock {
// immutable
pub trap_cx_ppn: PhysPageNum,
pub base_size: usize,
//pub pid: usize,
pub pid: PidHandle,
pub kernel_stack: KernelStack,
// mutable
pub task_cx_ptr: usize,
pub task_status: TaskStatus,
......@@ -32,23 +34,20 @@ impl TaskControlBlock {
.unwrap()
.ppn();
let task_status = TaskStatus::Ready;
// map a kernel-stack in kernel space
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(app_id);
KERNEL_SPACE
.lock()
.insert_framed_area(
kernel_stack_bottom.into(),
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
let task_cx_ptr = (kernel_stack_top - core::mem::size_of::<TaskContext>()) as *mut TaskContext;
unsafe { *task_cx_ptr = TaskContext::goto_trap_return(); }
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// push a task context which goes to trap_return to the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
let task_control_block = Self {
trap_cx_ppn,
base_size: user_sp,
pid: pid_handle,
kernel_stack,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
trap_cx_ppn,
base_size: user_sp,
};
// prepare TrapContext in user space
let trap_cx = task_control_block.get_trap_cx();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册