From a5ba50f0aaaef010b7ef02e7a1cd0f59937876e6 Mon Sep 17 00:00:00 2001 From: Julien CLEMENT Date: Wed, 8 Dec 2021 01:28:18 +0100 Subject: [PATCH] feat(pagin): add WIP kernel remap Signed-off-by: Julien CLEMENT --- src/memory/frame_allocator.rs | 34 ++++++++++++ src/memory/paging/mod.rs | 86 +++++++++++++++++++++++++++-- src/memory/paging/temporary_page.rs | 34 ++++++++++++ 3 files changed, 150 insertions(+), 4 deletions(-) create mode 100644 src/memory/paging/temporary_page.rs diff --git a/src/memory/frame_allocator.rs b/src/memory/frame_allocator.rs index 33d5f50..cbc3428 100644 --- a/src/memory/frame_allocator.rs +++ b/src/memory/frame_allocator.rs @@ -92,3 +92,37 @@ impl FrameDeallocator for AreaFrameAllocator { unimplemented!() } } + +pub struct TinyAllocator([Option; 3]); + +impl TinyAllocator { + pub fn new(allocator: &mut A) -> TinyAllocator + where A: FrameAllocator + { + let mut f = || allocator.allocate_frame(); + let frames = [f(), f(), f()]; + TinyAllocator(frames) + } +} + +unsafe impl FrameAllocator for TinyAllocator { + fn allocate_frame(&mut self) -> Option { + for frame_option in &mut self.0 { + if frame_option.is_some() { + return frame_option.take(); + } + } + None + } +} +impl FrameDeallocator for TinyAllocator { + unsafe fn deallocate_frame(&mut self, frame: Frame) { + for frame_option in &mut self.0 { + if frame_option.is_none() { + *frame_option = Some(frame); + return; + } + } + panic!("Tiny allocator can hold only 3 frames."); + } +} diff --git a/src/memory/paging/mod.rs b/src/memory/paging/mod.rs index b124aab..c5825ce 100644 --- a/src/memory/paging/mod.rs +++ b/src/memory/paging/mod.rs @@ -1,19 +1,97 @@ use multiboot2::BootInformation; -use x86_64::structures::paging::{FrameAllocator, Size4KiB, PageTable, RecursivePageTable, Page, PageTableFlags as Flags, Mapper}; +pub use x86_64::structures::paging::{FrameAllocator, Size4KiB, PageTable, RecursivePageTable, Page, PageTableFlags as Flags, Mapper, PhysFrame as Frame}; use crate::println; -use x86_64::VirtAddr; +use x86_64::{VirtAddr, PhysAddr}; +use x86_64::registers::control; +use temporary_page::TemporaryPage; +use super::PAGE_SIZE; + +mod temporary_page; pub const P4: *mut PageTable = 0o177777_777_777_777_777_0000 as *mut _; -pub fn kernel_remap(_allocator: &mut A, _boot_info: &BootInformation) +pub fn kernel_remap(allocator: &mut A, boot_info: &BootInformation) where A: FrameAllocator { + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtAddr::new(0xcafebabe)), allocator); + let mut active_table = get_active_page_table(); + let mut new_table = { + let frame = allocator.allocate_frame().expect("No more frames"); + InactivePageTable::new(frame, &mut active_table, &mut temporary_page) + }; + new_table.under(&mut active_table, &mut temporary_page, |mapper| { + let elf_sections_tag = boot_info.elf_sections_tag().expect("Elf sections tag required"); + + for section in elf_sections_tag.sections() { + if !section.is_allocated() { + // section is not loaded to memory + continue; + } + assert!(section.start_address() % PAGE_SIZE == 0, + "sections need to be page aligned"); + + println!("mapping section at addr: {:#x}, size: {:#x}", + section.addr, section.size); + + let flags = Flags::WRITABLE; + + let start_frame = Frame::::containing_address(PhysAddr::new(section.start_address() as u64)); + let end_frame = Frame::containing_address(PhysAddr::new(section.end_address() as u64 - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + unsafe { + mapper.identity_map(frame, flags, allocator).expect("Failed to identity map kernel").flush(); + } + } + } + + }); } +struct InactivePageTable { + p4_frame: Frame, +} + +impl InactivePageTable { + pub fn new(frame: Frame, active_table: & mut RecursivePageTable, + temporary_page: &mut TemporaryPage) -> InactivePageTable { + let table = temporary_page.map_table_frame(frame, active_table); + table.zero(); + table[511].set_frame(frame.clone(), Flags::PRESENT | Flags::WRITABLE); + temporary_page.unmap(active_table); + InactivePageTable { p4_frame: frame } + } + + pub fn under(&mut self, active_table: &mut RecursivePageTable, + temporary_page: &mut TemporaryPage, f: F) + where F: FnOnce(&mut RecursivePageTable) + { + let backup = control::Cr3::read().0; + let p4_table = temporary_page.map_table_frame(backup, active_table); + unsafe { + (*P4)[511].set_frame(self.p4_frame, Flags::PRESENT | Flags::WRITABLE); + } + x86_64::instructions::tlb::flush_all(); + + f(active_table); + + p4_table[511].set_frame(self.p4_frame, Flags::PRESENT | Flags::WRITABLE); + x86_64::instructions::tlb::flush_all(); + + temporary_page.unmap(active_table); + } +} + +pub fn get_active_page_table() -> RecursivePageTable<'static> { + unsafe { + RecursivePageTable::new(&mut *P4).expect("Could not create Page Table") + } +} + + pub fn test_paging(allocator: &mut A) where A: FrameAllocator { - let mut page_table = unsafe { RecursivePageTable::new(&mut *P4).expect("Could not create Page Table") }; + let mut page_table = get_active_page_table(); let addr = 42 * 512 * 512 * 4096; // 42th P3 entry let page = Page::containing_address(VirtAddr::new(addr)); diff --git a/src/memory/paging/temporary_page.rs b/src/memory/paging/temporary_page.rs new file mode 100644 index 0000000..4b3cadf --- /dev/null +++ b/src/memory/paging/temporary_page.rs @@ -0,0 +1,34 @@ +use super::{Page, Flags, VirtAddr, RecursivePageTable, Mapper, FrameAllocator, Size4KiB, Frame, PageTable}; +use super::super::frame_allocator::TinyAllocator; + +pub struct TemporaryPage { + page: Page, + allocator: TinyAllocator, +} + +impl TemporaryPage { + pub fn new(page: Page, allocator: &mut A) -> TemporaryPage + where A: FrameAllocator + { + TemporaryPage { + page, + allocator: TinyAllocator::new(allocator), + } + } + + pub fn map(&mut self, frame: Frame, active_table: &mut RecursivePageTable) -> VirtAddr { + unsafe { + active_table.map_to(self.page, frame, Flags::PRESENT | Flags::WRITABLE, &mut self.allocator).expect("Failed to map temporary page").flush(); + } + self.page.start_address() + } + + pub fn unmap(&mut self, active_table: &mut RecursivePageTable) { + active_table.unmap(self.page).expect("Failed to unmap").1.flush() + } + + pub fn map_table_frame(&mut self, frame: Frame, + active_table: &mut RecursivePageTable) -> &mut PageTable { + unsafe { &mut *(self.map(frame, active_table).as_u64() as *mut PageTable) } + } +}