feat(paging): add stack guard page
All checks were successful
continuous-integration/drone/push Build is passing

Signed-off-by: Julien CLEMENT <julien.clement@epita.fr>
This commit is contained in:
Julien CLEMENT 2021-12-08 16:54:01 +01:00
parent f844883192
commit 2433d99bc7
4 changed files with 113 additions and 69 deletions

View File

@ -11,9 +11,9 @@ mod vga;
extern crate multiboot2;
use core::panic::PanicInfo;
use vga::{Color, ColorCode};
use memory::paging::{Size4KiB, FrameAllocator};
use memory::paging::{FrameAllocator, Size4KiB};
use multiboot2::BootInformation;
use vga::{Color, ColorCode};
#[panic_handler]
fn panic_handler(info: &PanicInfo) -> ! {
@ -29,7 +29,8 @@ pub fn hlt_loop() -> ! {
}
pub fn init<A>(frame_allocator: &mut A, boot_info: &BootInformation)
where A: FrameAllocator<Size4KiB>
where
A: FrameAllocator<Size4KiB>,
{
vga::change_color(ColorCode::new(Color::LightCyan, Color::Black));
println!("Starting init");
@ -47,7 +48,6 @@ fn enable_nxe_bit() {
unsafe { Efer::update(|efer| *efer |= EferFlags::NO_EXECUTE_ENABLE) }
}
fn enable_write_protect_bit() {
println!("Enabling write protection bit");
use x86_64::registers::control::{Cr0, Cr0Flags};
@ -73,8 +73,13 @@ fn get_frame_allocator(multiboot_info_addr: usize) -> memory::AreaFrameAllocator
let multiboot_start: u64 = multiboot_info_addr as u64;
let multiboot_end: u64 = multiboot_start + (boot_info.total_size as u64);
memory::AreaFrameAllocator::new( kernel_start, kernel_end, multiboot_start,
multiboot_end, memory_map_tag.memory_areas())
memory::AreaFrameAllocator::new(
kernel_start,
kernel_end,
multiboot_start,
multiboot_end,
memory_map_tag.memory_areas(),
)
}
#[no_mangle]
@ -83,10 +88,8 @@ pub extern "C" fn julios_main(multiboot_info_addr: usize) -> ! {
let mut frame_allocator = get_frame_allocator(multiboot_info_addr);
init(&mut frame_allocator, &boot_info);
println!("***JuliOS V0.1.0***");
serial_println!("Hello serial");
memory::paging::test_paging(&mut frame_allocator);
panic!("Kernel end of flow");
}

View File

@ -16,7 +16,13 @@ pub struct AreaFrameAllocator {
}
impl AreaFrameAllocator {
pub fn new(kernel_start: u64, kernel_end: u64, multiboot_start: u64, multiboot_end: u64, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
pub fn new(
kernel_start: u64,
kernel_end: u64,
multiboot_start: u64,
multiboot_end: u64,
memory_areas: MemoryAreaIter,
) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysAddr::new(0)),
current_area: None,
@ -38,7 +44,7 @@ impl AreaFrameAllocator {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysAddr::new(address)) >= self.next_free_frame
})
.min_by_key(|area| area.base_addr);
.min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysAddr::new(area.base_addr));
@ -97,7 +103,8 @@ pub struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
pub fn new<A>(allocator: &mut A) -> TinyAllocator
where A: FrameAllocator<Size4KiB>
where
A: FrameAllocator<Size4KiB>,
{
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];

View File

@ -1,18 +1,20 @@
use multiboot2::{BootInformation, ElfSection};
pub use x86_64::structures::paging::{FrameAllocator, Size4KiB, PageTable, RecursivePageTable, Page, PageTableFlags as Flags, Mapper, PhysFrame as Frame};
use crate::println;
use x86_64::{VirtAddr, PhysAddr};
use x86_64::registers::control;
use temporary_page::TemporaryPage;
use super::PAGE_SIZE;
use crate::println;
use multiboot2::{BootInformation, ElfSection};
use temporary_page::TemporaryPage;
use x86_64::registers::control;
pub use x86_64::structures::paging::{
FrameAllocator, Mapper, Page, PageTable, PageTableFlags as Flags, PhysFrame as Frame,
RecursivePageTable, Size4KiB,
};
use x86_64::{PhysAddr, VirtAddr};
mod temporary_page;
pub const P4: *mut PageTable = 0o177777_777_777_777_777_0000 as *mut _;
fn get_flags_from_elf_section(section: &ElfSection) -> Flags {
use multiboot2::{ELF_SECTION_ALLOCATED, ELF_SECTION_WRITABLE,
ELF_SECTION_EXECUTABLE};
use multiboot2::{ELF_SECTION_ALLOCATED, ELF_SECTION_EXECUTABLE, ELF_SECTION_WRITABLE};
let mut flags = Flags::empty();
@ -30,53 +32,83 @@ fn get_flags_from_elf_section(section: &ElfSection) -> Flags {
}
pub fn kernel_remap<A>(allocator: &mut A, boot_info: &BootInformation)
where A: FrameAllocator<Size4KiB>
where
A: FrameAllocator<Size4KiB>,
{
println!("Remapping kernel");
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtAddr::new(0xcafebabe)), allocator);
let mut temporary_page = TemporaryPage::new(
Page::containing_address(VirtAddr::new(0xcafebabe)),
allocator,
);
let mut active_table = get_active_page_table();
let mut new_table = {
let frame = allocator.allocate_frame().expect("No more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
new_table.under(&mut active_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag().expect("Elf sections tag required");
let elf_sections_tag = boot_info
.elf_sections_tag()
.expect("Elf sections tag required");
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.start_address() % PAGE_SIZE == 0,
"sections need to be page aligned");
assert!(
section.start_address() % PAGE_SIZE == 0,
"sections need to be page aligned"
);
let flags = get_flags_from_elf_section(section);
let start_frame = Frame::<Size4KiB>::containing_address(PhysAddr::new(section.start_address() as u64));
let end_frame = Frame::containing_address(PhysAddr::new(section.end_address() as u64 - 1));
let start_frame = Frame::<Size4KiB>::containing_address(PhysAddr::new(
section.start_address() as u64,
));
let end_frame =
Frame::containing_address(PhysAddr::new(section.end_address() as u64 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
unsafe {
mapper.identity_map(frame, flags, allocator).expect("Failed to identity map kernel").flush();
mapper
.identity_map(frame, flags, allocator)
.expect("Failed to identity map kernel")
.flush();
}
}
}
let vga_buffer_frame = Frame::<Size4KiB>::containing_address(PhysAddr::new(0xb8000));
unsafe {
mapper.identity_map(vga_buffer_frame, Flags::PRESENT | Flags::WRITABLE, allocator).expect("Failed to identity map VGA buffer").flush();
mapper
.identity_map(
vga_buffer_frame,
Flags::PRESENT | Flags::WRITABLE,
allocator,
)
.expect("Failed to identity map VGA buffer")
.flush();
}
let multiboot_start = Frame::<Size4KiB>::containing_address(PhysAddr::new(boot_info.start_address() as u64));
let multiboot_end = Frame::containing_address(PhysAddr::new(boot_info.end_address() as u64 - 1));
let multiboot_start =
Frame::<Size4KiB>::containing_address(PhysAddr::new(boot_info.start_address() as u64));
let multiboot_end =
Frame::containing_address(PhysAddr::new(boot_info.end_address() as u64 - 1));
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
unsafe {
mapper.identity_map(frame, Flags::PRESENT, allocator).expect("Failed to identity map multiboot info struct").flush();
mapper
.identity_map(frame, Flags::PRESENT, allocator)
.expect("Failed to identity map multiboot info struct")
.flush();
}
}
});
new_table.activate();
let old_table = new_table.activate();
println!("Loaded new page table!");
let old_p4_page = Page::<Size4KiB>::containing_address(VirtAddr::new(
old_table.p4_frame.start_address().as_u64(),
));
active_table.unmap(old_p4_page).expect("Failed to unmap old P4").1.flush();
println!("Stack guard page at {:#x}", old_p4_page.start_address());
}
struct InactivePageTable {
@ -84,8 +116,11 @@ struct InactivePageTable {
}
impl InactivePageTable {
pub fn new(frame: Frame, active_table: & mut RecursivePageTable,
temporary_page: &mut TemporaryPage) -> InactivePageTable {
pub fn new(
frame: Frame,
active_table: &mut RecursivePageTable,
temporary_page: &mut TemporaryPage,
) -> InactivePageTable {
let table = temporary_page.map_table_frame(frame, active_table);
table.zero();
table[511].set_frame(frame.clone(), Flags::PRESENT | Flags::WRITABLE);
@ -93,9 +128,13 @@ impl InactivePageTable {
InactivePageTable { p4_frame: frame }
}
pub fn under<F>(&mut self, active_table: &mut RecursivePageTable,
temporary_page: &mut TemporaryPage, f: F)
where F: FnOnce(&mut RecursivePageTable)
pub fn under<F>(
&mut self,
active_table: &mut RecursivePageTable,
temporary_page: &mut TemporaryPage,
f: F,
) where
F: FnOnce(&mut RecursivePageTable),
{
let backup = control::Cr3::read().0;
let p4_table = temporary_page.map_table_frame(backup, active_table);
@ -114,9 +153,7 @@ impl InactivePageTable {
pub fn activate(&mut self) -> InactivePageTable {
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
control::Cr3::read().0.start_address()
),
p4_frame: Frame::containing_address(control::Cr3::read().0.start_address()),
};
unsafe {
control::Cr3::write(self.p4_frame, control::Cr3Flags::empty());
@ -126,26 +163,5 @@ impl InactivePageTable {
}
pub fn get_active_page_table() -> RecursivePageTable<'static> {
unsafe {
RecursivePageTable::new(&mut *P4).expect("Could not create Page Table")
}
}
pub fn test_paging<A>(allocator: &mut A)
where A: FrameAllocator<Size4KiB>
{
let mut page_table = get_active_page_table();
let addr = 42 * 512 * 512 * 4096; // 42th P3 entry
let page = Page::containing_address(VirtAddr::new(addr));
let frame = allocator.allocate_frame().expect("no more frames");
println!("None = , map to {:?}", frame);
unsafe { page_table.map_to(page, frame, Flags::PRESENT, allocator).expect("Could not map").flush() };
println!("next free frame: {:?}", allocator.allocate_frame());
let page_ptr: *mut u8 = page.start_address().as_mut_ptr();
let frame_ptr: *mut u8 = frame.start_address().as_u64() as *mut u8;
println!("Page: {:#?}, Frame: {:#?}", page_ptr, frame_ptr);
unsafe { RecursivePageTable::new(&mut *P4).expect("Could not create Page Table") }
}

View File

@ -1,5 +1,7 @@
use super::{Page, Flags, VirtAddr, RecursivePageTable, Mapper, FrameAllocator, Size4KiB, Frame, PageTable};
use super::super::frame_allocator::TinyAllocator;
use super::{
Flags, Frame, FrameAllocator, Mapper, Page, PageTable, RecursivePageTable, Size4KiB, VirtAddr,
};
pub struct TemporaryPage {
page: Page,
@ -8,7 +10,8 @@ pub struct TemporaryPage {
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
where A: FrameAllocator<Size4KiB>
where
A: FrameAllocator<Size4KiB>,
{
TemporaryPage {
page,
@ -18,17 +21,32 @@ impl TemporaryPage {
pub fn map(&mut self, frame: Frame, active_table: &mut RecursivePageTable) -> VirtAddr {
unsafe {
active_table.map_to(self.page, frame, Flags::PRESENT | Flags::WRITABLE, &mut self.allocator).expect("Failed to map temporary page").flush();
active_table
.map_to(
self.page,
frame,
Flags::PRESENT | Flags::WRITABLE,
&mut self.allocator,
)
.expect("Failed to map temporary page")
.flush();
}
self.page.start_address()
}
pub fn unmap(&mut self, active_table: &mut RecursivePageTable) {
active_table.unmap(self.page).expect("Failed to unmap").1.flush()
active_table
.unmap(self.page)
.expect("Failed to unmap")
.1
.flush()
}
pub fn map_table_frame(&mut self, frame: Frame,
active_table: &mut RecursivePageTable) -> &mut PageTable {
pub fn map_table_frame(
&mut self,
frame: Frame,
active_table: &mut RecursivePageTable,
) -> &mut PageTable {
unsafe { &mut *(self.map(frame, active_table).as_u64() as *mut PageTable) }
}
}