From 212c86c42a506ad76e6f4a2e7a3eb6014c220c34 Mon Sep 17 00:00:00 2001 From: MemoryShore <1353318529@qq.com> Date: Fri, 12 Apr 2024 01:40:44 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=A3=E7=A0=81=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kernel/src/arch/x86_64/interrupt/trap.rs | 6 +- kernel/src/arch/x86_64/mm/fault.rs | 215 +--------------------- kernel/src/mm/fault.rs | 224 +++++++++++++++++++++++ kernel/src/mm/mod.rs | 1 + 4 files changed, 232 insertions(+), 214 deletions(-) create mode 100644 kernel/src/mm/fault.rs diff --git a/kernel/src/arch/x86_64/interrupt/trap.rs b/kernel/src/arch/x86_64/interrupt/trap.rs index 803115a9a..84e3aee80 100644 --- a/kernel/src/arch/x86_64/interrupt/trap.rs +++ b/kernel/src/arch/x86_64/interrupt/trap.rs @@ -1,7 +1,7 @@ use system_error::SystemError; use crate::{ - arch::{mm::fault::PageFaultHandler, CurrentIrqArch}, + arch::{mm::fault::X86_64PageFault, CurrentIrqArch}, exception::InterruptArch, kerror, kwarn, mm::VirtAddr, @@ -411,9 +411,9 @@ unsafe extern "C" fn do_page_fault(regs: &'static TrapFrame, error_code: u64) { let address = VirtAddr::new(address); let error_code = X86PfErrorCode::from_bits_truncate(error_code as u32); if address.check_user() { - PageFaultHandler::do_user_addr_fault(regs, error_code, address); + X86_64PageFault::do_user_addr_fault(regs, error_code, address); } else { - PageFaultHandler::do_kern_addr_fault(regs, error_code, address); + X86_64PageFault::do_kern_addr_fault(regs, error_code, address); } CurrentIrqArch::interrupt_enable(); } diff --git a/kernel/src/arch/x86_64/mm/fault.rs b/kernel/src/arch/x86_64/mm/fault.rs index 7ead82bd1..ec4d299b7 100644 --- a/kernel/src/arch/x86_64/mm/fault.rs +++ b/kernel/src/arch/x86_64/mm/fault.rs @@ -1,5 +1,4 @@ use core::{ - alloc::Layout, intrinsics::{likely, unlikely}, panic, }; @@ -16,11 +15,10 @@ use crate::{ exception::InterruptArch, kerror, mm::{ - page::{page_manager_lock_irqsave, PageFlags}, + fault::{FaultFlags, PageFaultHandler}, ucontext::{AddressSpace, LockedVMA}, VirtAddr, VmFaultReason, VmFlags, }, - process::{ProcessManager, ProcessState}, }; use super::LockedFrameAllocator; @@ -28,24 +26,6 @@ use super::LockedFrameAllocator; pub type PageMapper = crate::mm::page::PageMapper; -bitflags! { - pub struct FaultFlags: u64{ - const FAULT_FLAG_WRITE = 1 << 0; - const FAULT_FLAG_MKWRITE = 1 << 1; - const FAULT_FLAG_ALLOW_RETRY = 1 << 2; - const FAULT_FLAG_RETRY_NOWAIT = 1 << 3; - const FAULT_FLAG_KILLABLE = 1 << 4; - const FAULT_FLAG_TRIED = 1 << 5; - const FAULT_FLAG_USER = 1 << 6; - const FAULT_FLAG_REMOTE = 1 << 7; - const FAULT_FLAG_INSTRUCTION = 1 << 8; - const FAULT_FLAG_INTERRUPTIBLE =1 << 9; - const FAULT_FLAG_UNSHARE = 1 << 10; - const FAULT_FLAG_ORIG_PTE_VALID = 1 << 11; - const FAULT_FLAG_VMA_LOCK = 1 << 12; - } -} - impl LockedVMA { pub fn access_error(&self, error_code: X86PfErrorCode) -> bool { let vm_flags = *self.lock().vm_flags(); @@ -94,9 +74,9 @@ impl LockedVMA { } } -pub struct PageFaultHandler; +pub struct X86_64PageFault; -impl PageFaultHandler { +impl X86_64PageFault { pub fn show_fault_oops( regs: &'static TrapFrame, error_code: X86PfErrorCode, @@ -255,7 +235,7 @@ impl PageFaultHandler { } let mapper = &mut space_guard.user_mapper.utable; - fault = Self::handle_mm_fault(vma.clone(), mapper, address, flags, regs); + fault = PageFaultHandler::handle_mm_fault(vma.clone(), mapper, address, flags, regs); if !fault.contains(VmFaultReason::VM_FAULT_COMPLETED) { return; @@ -283,191 +263,4 @@ impl PageFaultHandler { panic!("page fault out of memory"); } } - - pub unsafe fn handle_mm_fault( - vma: Arc, - mapper: &mut PageMapper, - address: VirtAddr, - flags: FaultFlags, - _regs: &'static TrapFrame, - ) -> VmFaultReason { - let current_pcb = ProcessManager::current_pcb(); - let mut guard = current_pcb.sched_info().inner_lock_write_irqsave(); - guard.set_state(ProcessState::Runnable); - - if !vma.access_permitted( - flags.contains(FaultFlags::FAULT_FLAG_WRITE), - flags.contains(FaultFlags::FAULT_FLAG_INSTRUCTION), - flags.contains(FaultFlags::FAULT_FLAG_REMOTE), - ) { - return VmFaultReason::VM_FAULT_SIGSEGV; - } - - let guard = vma.lock(); - let vm_flags = *guard.vm_flags(); - drop(guard); - if unlikely(vm_flags.contains(VmFlags::VM_HUGETLB)) { - //TODO: 添加handle_hugetlb_fault - } else { - Self::handle_normal_fault(vma.clone(), mapper, address, flags); - } - - VmFaultReason::VM_FAULT_COMPLETED - } - - pub unsafe fn handle_normal_fault( - vma: Arc, - mapper: &mut PageMapper, - address: VirtAddr, - flags: FaultFlags, - ) -> VmFaultReason { - if mapper.get_entry(address, 3).is_none() { - mapper - .allocate_table(address, 2) - .expect("failed to allocate PUD table"); - } - let page_flags = vma.lock().flags(); - - for level in 2..=3 { - let level = MMArch::PAGE_LEVELS - level; - if mapper.get_entry(address, level).is_none() { - if vma.is_hugepage() { - if vma.is_anonymous() { - mapper.map_huge_page(address, page_flags); - } - } else if mapper.allocate_table(address, level - 1).is_none() { - return VmFaultReason::VM_FAULT_OOM; - } - } - } - - Self::handle_pte_fault(vma, mapper, address, flags) - } - - pub unsafe fn handle_pte_fault( - vma: Arc, - mapper: &mut PageMapper, - address: VirtAddr, - flags: FaultFlags, - ) -> VmFaultReason { - if let Some(mut entry) = mapper.get_entry(address, 0) { - if !entry.present() { - return Self::do_swap_page(vma, mapper, address, flags); - } - if entry.protnone() && vma.is_accessible() { - return Self::do_numa_page(vma, mapper, address, flags); - } - if flags.intersects(FaultFlags::FAULT_FLAG_WRITE | FaultFlags::FAULT_FLAG_UNSHARE) { - if !entry.write() { - return Self::do_wp_page(vma, mapper, address); - } else { - entry.set_flags(PageFlags::from_data(MMArch::ENTRY_FLAG_DIRTY)); - } - } - let pte_table = mapper.get_table(address, 0).unwrap(); - let i = pte_table.index_of(address).unwrap(); - entry.set_flags(entry.flags().set_access(true)); - pte_table.set_entry(i, entry); - } else if vma.is_anonymous() { - return Self::do_anonymous_page(vma, mapper, address); - } else { - return Self::do_fault(vma, mapper, address, flags); - } - - VmFaultReason::VM_FAULT_COMPLETED - } - - pub unsafe fn do_anonymous_page( - vma: Arc, - mapper: &mut PageMapper, - address: VirtAddr, - ) -> VmFaultReason { - if let Some(flush) = mapper.map(address, vma.lock().flags()) { - flush.flush(); - crate::debug::klog::mm::mm_debug_log( - klog_types::AllocatorLogType::LazyAlloc(klog_types::AllocLogItem::new( - Layout::from_size_align(MMArch::PAGE_SIZE, MMArch::PAGE_SIZE).unwrap(), - Some(address.data()), - Some(mapper.translate(address).unwrap().0.data()), - )), - klog_types::LogSource::Buddy, - ); - let paddr = mapper.translate(address).unwrap().0; - let mut anon_vma_guard = page_manager_lock_irqsave(); - let page = anon_vma_guard.get_mut(&paddr); - page.insert_vma(vma.clone()); - VmFaultReason::VM_FAULT_COMPLETED - } else { - VmFaultReason::VM_FAULT_OOM - } - } - - pub unsafe fn do_fault( - _vma: Arc, - _mapper: &mut PageMapper, - _address: VirtAddr, - _flags: FaultFlags, - ) -> VmFaultReason { - panic!("do_fault has not yet been implemented"); - } - - pub unsafe fn do_swap_page( - _vma: Arc, - _mapper: &mut PageMapper, - _address: VirtAddr, - _flags: FaultFlags, - ) -> VmFaultReason { - panic!("do_swap_page has not yet been implemented"); - } - - pub unsafe fn do_numa_page( - _vma: Arc, - _mapper: &mut PageMapper, - _address: VirtAddr, - _flags: FaultFlags, - ) -> VmFaultReason { - panic!("do_numa_page has not yet been implemented"); - } - - pub unsafe fn do_wp_page( - vma: Arc, - mapper: &mut PageMapper, - address: VirtAddr, - ) -> VmFaultReason { - let old_paddr = mapper.translate(address).unwrap().0; - let mut page_manager = page_manager_lock_irqsave(); - let map_count = page_manager.get_mut(&old_paddr).map_count; - drop(page_manager); - - let mut entry = mapper.get_entry(address, 0).unwrap(); - let new_flags = entry.flags().set_write(true); - - if map_count == 1 { - let table = mapper.get_table(address, 0).unwrap(); - let i = table.index_of(address).unwrap(); - entry.set_flags(new_flags); - table.set_entry(i, entry); - VmFaultReason::VM_FAULT_COMPLETED - } else if let Some(flush) = mapper.map(address, new_flags) { - let mut page_manager = page_manager_lock_irqsave(); - let old_page = page_manager.get_mut(&old_paddr); - old_page.remove_vma(&vma); - drop(page_manager); - - flush.flush(); - let paddr = mapper.translate(address).unwrap().0; - let mut anon_vma_guard = page_manager_lock_irqsave(); - let page = anon_vma_guard.get_mut(&paddr); - page.insert_vma(vma.clone()); - - (MMArch::phys_2_virt(paddr).unwrap().data() as *mut u8).copy_from_nonoverlapping( - MMArch::phys_2_virt(old_paddr).unwrap().data() as *mut u8, - MMArch::PAGE_SIZE, - ); - - VmFaultReason::VM_FAULT_COMPLETED - } else { - VmFaultReason::VM_FAULT_OOM - } - } } diff --git a/kernel/src/mm/fault.rs b/kernel/src/mm/fault.rs new file mode 100644 index 000000000..24c38a28b --- /dev/null +++ b/kernel/src/mm/fault.rs @@ -0,0 +1,224 @@ +use core::{alloc::Layout, intrinsics::unlikely, panic}; + +use alloc::sync::Arc; + +use crate::{ + arch::{interrupt::TrapFrame, mm::PageMapper, MMArch}, + mm::{ + page::{page_manager_lock_irqsave, PageFlags}, + ucontext::LockedVMA, + VirtAddr, VmFaultReason, VmFlags, + }, + process::{ProcessManager, ProcessState}, +}; + +use crate::mm::MemoryManagementArch; + +bitflags! { + pub struct FaultFlags: u64{ + const FAULT_FLAG_WRITE = 1 << 0; + const FAULT_FLAG_MKWRITE = 1 << 1; + const FAULT_FLAG_ALLOW_RETRY = 1 << 2; + const FAULT_FLAG_RETRY_NOWAIT = 1 << 3; + const FAULT_FLAG_KILLABLE = 1 << 4; + const FAULT_FLAG_TRIED = 1 << 5; + const FAULT_FLAG_USER = 1 << 6; + const FAULT_FLAG_REMOTE = 1 << 7; + const FAULT_FLAG_INSTRUCTION = 1 << 8; + const FAULT_FLAG_INTERRUPTIBLE =1 << 9; + const FAULT_FLAG_UNSHARE = 1 << 10; + const FAULT_FLAG_ORIG_PTE_VALID = 1 << 11; + const FAULT_FLAG_VMA_LOCK = 1 << 12; + } +} + +pub struct PageFaultHandler; + +impl PageFaultHandler { + pub unsafe fn handle_mm_fault( + vma: Arc, + mapper: &mut PageMapper, + address: VirtAddr, + flags: FaultFlags, + _regs: &'static TrapFrame, + ) -> VmFaultReason { + let current_pcb = ProcessManager::current_pcb(); + let mut guard = current_pcb.sched_info().inner_lock_write_irqsave(); + guard.set_state(ProcessState::Runnable); + + if !vma.access_permitted( + flags.contains(FaultFlags::FAULT_FLAG_WRITE), + flags.contains(FaultFlags::FAULT_FLAG_INSTRUCTION), + flags.contains(FaultFlags::FAULT_FLAG_REMOTE), + ) { + return VmFaultReason::VM_FAULT_SIGSEGV; + } + + let guard = vma.lock(); + let vm_flags = *guard.vm_flags(); + drop(guard); + if unlikely(vm_flags.contains(VmFlags::VM_HUGETLB)) { + //TODO: 添加handle_hugetlb_fault + } else { + Self::handle_normal_fault(vma.clone(), mapper, address, flags); + } + + VmFaultReason::VM_FAULT_COMPLETED + } + + pub unsafe fn handle_normal_fault( + vma: Arc, + mapper: &mut PageMapper, + address: VirtAddr, + flags: FaultFlags, + ) -> VmFaultReason { + if mapper.get_entry(address, 3).is_none() { + mapper + .allocate_table(address, 2) + .expect("failed to allocate PUD table"); + } + let page_flags = vma.lock().flags(); + + for level in 2..=3 { + let level = MMArch::PAGE_LEVELS - level; + if mapper.get_entry(address, level).is_none() { + if vma.is_hugepage() { + if vma.is_anonymous() { + mapper.map_huge_page(address, page_flags); + } + } else if mapper.allocate_table(address, level - 1).is_none() { + return VmFaultReason::VM_FAULT_OOM; + } + } + } + + Self::handle_pte_fault(vma, mapper, address, flags) + } + + pub unsafe fn handle_pte_fault( + vma: Arc, + mapper: &mut PageMapper, + address: VirtAddr, + flags: FaultFlags, + ) -> VmFaultReason { + if let Some(mut entry) = mapper.get_entry(address, 0) { + if !entry.present() { + return Self::do_swap_page(vma, mapper, address, flags); + } + if entry.protnone() && vma.is_accessible() { + return Self::do_numa_page(vma, mapper, address, flags); + } + if flags.intersects(FaultFlags::FAULT_FLAG_WRITE | FaultFlags::FAULT_FLAG_UNSHARE) { + if !entry.write() { + return Self::do_wp_page(vma, mapper, address); + } else { + entry.set_flags(PageFlags::from_data(MMArch::ENTRY_FLAG_DIRTY)); + } + } + let pte_table = mapper.get_table(address, 0).unwrap(); + let i = pte_table.index_of(address).unwrap(); + entry.set_flags(entry.flags().set_access(true)); + pte_table.set_entry(i, entry); + } else if vma.is_anonymous() { + return Self::do_anonymous_page(vma, mapper, address); + } else { + return Self::do_fault(vma, mapper, address, flags); + } + + VmFaultReason::VM_FAULT_COMPLETED + } + + pub unsafe fn do_anonymous_page( + vma: Arc, + mapper: &mut PageMapper, + address: VirtAddr, + ) -> VmFaultReason { + if let Some(flush) = mapper.map(address, vma.lock().flags()) { + flush.flush(); + crate::debug::klog::mm::mm_debug_log( + klog_types::AllocatorLogType::LazyAlloc(klog_types::AllocLogItem::new( + Layout::from_size_align(MMArch::PAGE_SIZE, MMArch::PAGE_SIZE).unwrap(), + Some(address.data()), + Some(mapper.translate(address).unwrap().0.data()), + )), + klog_types::LogSource::Buddy, + ); + let paddr = mapper.translate(address).unwrap().0; + let mut anon_vma_guard = page_manager_lock_irqsave(); + let page = anon_vma_guard.get_mut(&paddr); + page.insert_vma(vma.clone()); + VmFaultReason::VM_FAULT_COMPLETED + } else { + VmFaultReason::VM_FAULT_OOM + } + } + + pub unsafe fn do_fault( + _vma: Arc, + _mapper: &mut PageMapper, + _address: VirtAddr, + _flags: FaultFlags, + ) -> VmFaultReason { + panic!("do_fault has not yet been implemented"); + } + + pub unsafe fn do_swap_page( + _vma: Arc, + _mapper: &mut PageMapper, + _address: VirtAddr, + _flags: FaultFlags, + ) -> VmFaultReason { + panic!("do_swap_page has not yet been implemented"); + } + + pub unsafe fn do_numa_page( + _vma: Arc, + _mapper: &mut PageMapper, + _address: VirtAddr, + _flags: FaultFlags, + ) -> VmFaultReason { + panic!("do_numa_page has not yet been implemented"); + } + + pub unsafe fn do_wp_page( + vma: Arc, + mapper: &mut PageMapper, + address: VirtAddr, + ) -> VmFaultReason { + let old_paddr = mapper.translate(address).unwrap().0; + let mut page_manager = page_manager_lock_irqsave(); + let map_count = page_manager.get_mut(&old_paddr).map_count; + drop(page_manager); + + let mut entry = mapper.get_entry(address, 0).unwrap(); + let new_flags = entry.flags().set_write(true); + + if map_count == 1 { + let table = mapper.get_table(address, 0).unwrap(); + let i = table.index_of(address).unwrap(); + entry.set_flags(new_flags); + table.set_entry(i, entry); + VmFaultReason::VM_FAULT_COMPLETED + } else if let Some(flush) = mapper.map(address, new_flags) { + let mut page_manager = page_manager_lock_irqsave(); + let old_page = page_manager.get_mut(&old_paddr); + old_page.remove_vma(&vma); + drop(page_manager); + + flush.flush(); + let paddr = mapper.translate(address).unwrap().0; + let mut anon_vma_guard = page_manager_lock_irqsave(); + let page = anon_vma_guard.get_mut(&paddr); + page.insert_vma(vma.clone()); + + (MMArch::phys_2_virt(paddr).unwrap().data() as *mut u8).copy_from_nonoverlapping( + MMArch::phys_2_virt(old_paddr).unwrap().data() as *mut u8, + MMArch::PAGE_SIZE, + ); + + VmFaultReason::VM_FAULT_COMPLETED + } else { + VmFaultReason::VM_FAULT_OOM + } + } +} diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index dcb3fda12..37028d921 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -22,6 +22,7 @@ use self::{ pub mod allocator; pub mod c_adapter; pub mod early_ioremap; +pub mod fault; pub mod init; pub mod kernel_mapper; pub mod madvise;