Compare commits

...

9 Commits

Author SHA1 Message Date
563d97f372 add cleaner exit, freeing thread's stack
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 13:50:24 +01:00
685dea4f6a basic scheduler
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 13:21:42 +01:00
af4ab10505 Minor thread and scheduler refactoring
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 12:23:27 +01:00
3a8167b6ad Basic thread run routine
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 12:05:20 +01:00
2522ece23f double faulting in thread start while pushing thread's general registers
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 03:49:04 +01:00
5a4f8a561e working on thread, assembly is trolling me somehow
Some checks failed
continuous-integration/drone/push Build is failing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 03:02:46 +01:00
bc0c885052 add memory allocation for thread stack
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-31 00:05:17 +01:00
268b36188e start thread new
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-30 23:51:03 +01:00
9199c60948 create thread and scheduler base files
All checks were successful
continuous-integration/drone/push Build is passing
Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
2022-12-29 18:58:48 +01:00
6 changed files with 309 additions and 5 deletions

@ -65,11 +65,14 @@ impl FileSystem for VirtualFS {
loop { loop {
if let Some(fs) = map.find_exact(&path_split) { if let Some(fs) = map.find_exact(&path_split) {
// TODO, remove path prefix of the mount point // TODO, remove path prefix of the mount point
return fs.borrow_mut().open(mnt_relative_path.as_str(), flags).await; return fs
} .borrow_mut()
else { .open(mnt_relative_path.as_str(), flags)
.await;
} else {
let component = path_split.remove(path_split.len() - 1); let component = path_split.remove(path_split.len() - 1);
mnt_relative_path = String::from("/") + component.as_str() + mnt_relative_path.as_str(); mnt_relative_path =
String::from("/") + component.as_str() + mnt_relative_path.as_str();
} }
} }
} else { } else {

@ -8,6 +8,7 @@ mod fd;
mod fs; mod fs;
mod interrupts; mod interrupts;
mod memory; mod memory;
mod proc;
mod syscalls; mod syscalls;
mod task; mod task;
mod utils; mod utils;
@ -22,6 +23,9 @@ use drivers::vga::{self, Color, ColorCode};
use multiboot2::BootInformation; use multiboot2::BootInformation;
use task::{executor::Executor, keyboard, Task}; use task::{executor::Executor, keyboard, Task};
use alloc::sync::Arc;
use core::cell::RefCell;
#[alloc_error_handler] #[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! { fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
panic!("Allocation error: {:?}", layout) panic!("Allocation error: {:?}", layout)
@ -61,6 +65,7 @@ pub extern "C" fn julios_main(multiboot_info_addr: usize) -> ! {
executor.spawn(Task::new(drivers::atapi::init())); executor.spawn(Task::new(drivers::atapi::init()));
executor.spawn(Task::new(keyboard::print_keypresses())); executor.spawn(Task::new(keyboard::print_keypresses()));
executor.spawn(Task::new(get_file())); executor.spawn(Task::new(get_file()));
executor.spawn(Task::new(proc::scheduler::scheduler_run()));
executor.run(); executor.run();
} }
@ -83,4 +88,12 @@ async fn get_file() {
serial_println!("{}", alloc::str::from_utf8(&buf).unwrap()); serial_println!("{}", alloc::str::from_utf8(&buf).unwrap());
fd.borrow_mut().close().await; fd.borrow_mut().close().await;
let thread = Arc::new(RefCell::new(proc::thread::Thread::new(
proc::thread::routine as u64,
)));
proc::scheduler::SCHEDULER
.lock()
.await
.register(thread.clone());
} }

2
src/proc/mod.rs Normal file

@ -0,0 +1,2 @@
pub mod scheduler;
pub mod thread;

124
src/proc/scheduler/mod.rs Normal file

@ -0,0 +1,124 @@
use crate::utils::mutex::AsyncMutex;
use super::thread::{Thread, ThreadId};
use alloc::{collections::BTreeMap, sync::Arc};
use core::cell::RefCell;
use core::{
pin::Pin,
task::{Context, Poll},
};
use crossbeam_queue::ArrayQueue;
use futures_util::stream::{Stream, StreamExt};
use futures_util::task::AtomicWaker;
use lazy_static::lazy_static;
lazy_static! {
pub static ref SCHEDULER: AsyncMutex<Scheduler> = AsyncMutex::new(Scheduler::new());
}
pub type Threadt = Arc<RefCell<Thread>>;
struct ThreadStream {
ids: ArrayQueue<ThreadId>,
waker: AtomicWaker
}
impl ThreadStream {
pub fn new() -> Self {
ThreadStream {
ids: ArrayQueue::new(100),
waker: AtomicWaker::new(),
}
}
pub fn register(&mut self, id: ThreadId) {
self.ids
.push(id)
.expect("Thread queue full");
self.waker.wake();
}
}
impl Stream for ThreadStream {
type Item = ThreadId;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
if let Ok(id) = self.ids.pop() {
return Poll::Ready(Some(id));
}
self.waker.register(&cx.waker());
match self.ids.pop() {
Ok(id) => {
self.waker.take();
Poll::Ready(Some(id))
}
Err(crossbeam_queue::PopError) => Poll::Pending,
}
}
}
pub struct Scheduler {
pub threads: BTreeMap<ThreadId, Threadt>,
thread_queue: ThreadStream,
}
impl Scheduler {
pub fn new() -> Self {
let mut res = Scheduler {
threads: BTreeMap::new(),
thread_queue: ThreadStream::new(),
};
let k_thread: Thread = Thread {
id: ThreadId(0),
entry_point: 0,
started: true,
rsp: 0,
base_stack: 0,
};
res.register(Arc::new(RefCell::new(k_thread)));
res
}
pub async fn run(&mut self) {
while let Some(id) = self.thread_queue.next().await {
if let Some(thread) = self.get_thread(id) { // Thread still exists
unsafe {
(&mut*thread.as_ptr()).run();
}
self.thread_queue.register(id);
}
}
}
pub fn register(&mut self, thread: Threadt) {
let thread_id = thread.borrow().id;
if self.threads.insert(thread_id, thread).is_some() {
panic!("Duplicate thread ID")
}
if thread_id != ThreadId(0) {
self.thread_queue.register(thread_id);
}
}
pub fn exit(&mut self, id: ThreadId) {
self.threads.remove(&id).unwrap().borrow().exit();
}
pub fn get_thread(&mut self, id: ThreadId) -> Option<Threadt> {
if let Some(thread) = self.threads.get_mut(&id) {
Some(thread.clone())
} else {
None
}
}
}
pub async fn scheduler_run() {
let mut scheduler = SCHEDULER.lock().await;
SCHEDULER.force_unlock();
scheduler.run().await;
}

158
src/proc/thread/mod.rs Normal file

@ -0,0 +1,158 @@
use crate::println;
use crate::utils::mutex::AsyncMutex;
use super::scheduler::SCHEDULER;
use core::arch::asm;
use core::sync::atomic::{AtomicU64, Ordering};
use alloc::alloc::{alloc, dealloc, Layout};
use lazy_static::lazy_static;
const STACK_SIZE: usize = 4096 * 20;
lazy_static! {
pub static ref RUNNING_THREAD: AsyncMutex<ThreadId> = AsyncMutex::new(ThreadId(0));
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct ThreadId(pub u64);
impl ThreadId {
fn new() -> Self {
static NEXT_ID: AtomicU64 = AtomicU64::new(1);
ThreadId(NEXT_ID.fetch_add(1, Ordering::Relaxed))
}
}
pub fn exit() {
println!("Exiting thread");
let k_thread: *mut Thread;
{
let mut scheduler = SCHEDULER.try_lock().unwrap();
k_thread = scheduler
.get_thread(ThreadId(0))
.unwrap()
.as_ptr();
scheduler.exit(*RUNNING_THREAD.try_lock().unwrap());
} // Drop scheduler mutex guard
unsafe {
(&mut* k_thread).run();
}
}
pub fn routine() {
println!("Routine executed");
exit();
}
pub struct Thread {
pub id: ThreadId,
pub entry_point: u64,
pub started: bool,
pub rsp: u64,
pub base_stack: u64
}
impl Thread {
pub fn new(entry_point: u64) -> Self {
unsafe {
let stack_bottom = alloc(Layout::new::<[u8; STACK_SIZE]>()) as u64;
Thread {
id: ThreadId::new(),
entry_point: entry_point,
started: false,
rsp: stack_bottom + STACK_SIZE as u64,
base_stack: stack_bottom,
}
}
}
pub fn exit(&self) {
unsafe {
dealloc(self.base_stack as *mut u8, Layout::new::<[u8; STACK_SIZE]>());
}
}
pub fn run(&mut self) {
println!("Running thread {:?}", self.id);
unsafe {
let mut current_thread_guard = RUNNING_THREAD.try_lock().unwrap();
let mut scheduler = SCHEDULER.try_lock().unwrap();
if let Some(current_thread) = scheduler.get_thread(*current_thread_guard) {
let current_rsp: u64;
asm!(
"push rsp", // Recover current rsp
"pop {out}",
"sub {out}, 56", // Offset to saved registers
out = out(reg) current_rsp, // Save thread rsp
);
current_thread.borrow_mut().rsp = current_rsp;
}
else { // Thread does not exists anymore
*current_thread_guard = self.id; // change running thread
asm!( // Just switch to new thead without saving registers
"push {rsp}", // Set stack pointer to the new thread
"pop rsp",
"pop rdi", // Restore new thread regs
"pop rsi",
"pop rbp",
"pop rdx",
"pop rcx",
"pop rbx",
"pop rax",
rsp = in(reg) self.rsp,
);
return;
}
*current_thread_guard = self.id; // change running thread
} // The scheduler and running thread guards are dropped here
unsafe {
if self.started {
asm!(
"push rax", // Save current thread regs
"push rbx",
"push rcx",
"push rdx",
"push rbp",
"push rsi",
"push rdi",
"push {rsp}", // Set stack pointer to the new thread
"pop rsp",
"pop rdi", // Restore new thread regs
"pop rsi",
"pop rbp",
"pop rdx",
"pop rcx",
"pop rbx",
"pop rax",
rsp = in(reg) self.rsp,
);
} else {
self.started = true;
asm!(
"push rax", // Save current thread regs
"push rbx",
"push rcx",
"push rdx",
"push rbp",
"push rsi",
"push rdi",
"push {rsp}", // Set stack pointer to the new thread
"pop rsp",
"jmp {rip}", // Jump to thread routine
rsp = in(reg) self.rsp,
rip = in(reg) self.entry_point,
);
}
}
}
}

@ -73,13 +73,17 @@ impl<T> AsyncMutex<T> {
} }
pub fn try_lock(&self) -> Option<AsyncMutexGuard<T>> { pub fn try_lock(&self) -> Option<AsyncMutexGuard<T>> {
if self.lock.try_lock() { if !self.lock.try_lock() {
Some(AsyncMutexGuard { mutex: self }) Some(AsyncMutexGuard { mutex: self })
} else { } else {
None None
} }
} }
pub fn force_unlock(&self) {
self.lock.drop();
}
pub async fn lock(&self) -> AsyncMutexGuard<'_, T> { pub async fn lock(&self) -> AsyncMutexGuard<'_, T> {
self.lock.clone().await; self.lock.clone().await;
AsyncMutexGuard { mutex: self } AsyncMutexGuard { mutex: self }