Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: 修正进程pcb的on_cpu字段未设置导致的panic问题 #1057

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion kernel/src/process/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,12 @@ impl ProcessManager {
// avoid deadlock
drop(writer);

let rq = cpu_rq(pcb.sched_info().on_cpu().unwrap().data() as usize);
let rq = cpu_rq(
pcb.sched_info()
.on_cpu()
.unwrap_or(smp_get_processor_id())
.data() as usize,
);

let (rq, _guard) = rq.self_lock();
rq.update_rq_clock();
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/sched/clock.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
//! 这个文件实现的是调度过程中涉及到的时钟
//!
use crate::{arch::CurrentTimeArch, time::TimeArch};
use crate::{arch::CurrentTimeArch, smp::cpu::ProcessorId, time::TimeArch};

pub struct SchedClock;

impl SchedClock {
#[inline]
pub fn sched_clock_cpu(_cpu: usize) -> u64 {
pub fn sched_clock_cpu(_cpu: ProcessorId) -> u64 {
#[cfg(target_arch = "x86_64")]
{
if crate::arch::driver::tsc::TSCManager::cpu_khz() == 0 {
Expand Down
15 changes: 9 additions & 6 deletions kernel/src/sched/cputime.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
use core::sync::atomic::{compiler_fence, AtomicUsize, Ordering};

use crate::{
arch::CurrentIrqArch, exception::InterruptArch, process::ProcessControlBlock,
smp::core::smp_get_processor_id, time::jiffies::TICK_NESC,
arch::CurrentIrqArch,
exception::InterruptArch,
process::ProcessControlBlock,
smp::{core::smp_get_processor_id, cpu::ProcessorId},
time::jiffies::TICK_NESC,
};
use alloc::sync::Arc;

use super::{clock::SchedClock, cpu_irq_time};

pub fn irq_time_read(cpu: usize) -> u64 {
pub fn irq_time_read(cpu: ProcessorId) -> u64 {
compiler_fence(Ordering::SeqCst);
let irqtime = cpu_irq_time(cpu);

Expand Down Expand Up @@ -49,7 +52,7 @@ impl IrqTime {
}

pub fn irqtime_start() {
let cpu = smp_get_processor_id().data() as usize;
let cpu = smp_get_processor_id();
let irq_time = cpu_irq_time(cpu);
compiler_fence(Ordering::SeqCst);
irq_time.irq_start_time = SchedClock::sched_clock_cpu(cpu) as u64;
Expand All @@ -58,7 +61,7 @@ impl IrqTime {

pub fn irqtime_account_irq(_pcb: Arc<ProcessControlBlock>) {
compiler_fence(Ordering::SeqCst);
let cpu = smp_get_processor_id().data() as usize;
let cpu = smp_get_processor_id();
let irq_time = cpu_irq_time(cpu);
compiler_fence(Ordering::SeqCst);
let delta = SchedClock::sched_clock_cpu(cpu) as u64 - irq_time.irq_start_time;
Expand Down Expand Up @@ -93,7 +96,7 @@ impl CpuTimeFunc {
let mut accounted = Self::steal_account_process_time(max);

if accounted < max {
let irqtime = cpu_irq_time(smp_get_processor_id().data() as usize);
let irqtime = cpu_irq_time(smp_get_processor_id());
accounted += irqtime.irqtime_tick_accounted(max - accounted);
}

Expand Down
15 changes: 8 additions & 7 deletions kernel/src/sched/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ pub const SCHED_CAPACITY_SHIFT: u64 = SCHED_FIXEDPOINT_SHIFT;
pub const SCHED_CAPACITY_SCALE: u64 = 1 << SCHED_CAPACITY_SHIFT;

#[inline]
pub fn cpu_irq_time(cpu: usize) -> &'static mut IrqTime {
unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu] }
pub fn cpu_irq_time(cpu: ProcessorId) -> &'static mut IrqTime {
unsafe { CPU_IRQ_TIME.as_mut().unwrap()[cpu.data() as usize] }
}

#[inline]
Expand Down Expand Up @@ -289,7 +289,7 @@ pub struct CpuRunQueue {
lock: SpinLock<()>,
lock_on_who: AtomicUsize,

cpu: usize,
cpu: ProcessorId,
clock_task: u64,
clock: u64,
prev_irq_time: u64,
Expand Down Expand Up @@ -329,7 +329,7 @@ pub struct CpuRunQueue {
}

impl CpuRunQueue {
pub fn new(cpu: usize) -> Self {
pub fn new(cpu: ProcessorId) -> Self {
Self {
lock: SpinLock::new(()),
lock_on_who: AtomicUsize::new(usize::MAX),
Expand Down Expand Up @@ -460,6 +460,7 @@ impl CpuRunQueue {
self.enqueue_task(pcb.clone(), flags);

*pcb.sched_info().on_rq.lock_irqsave() = OnRq::Queued;
pcb.sched_info().set_on_cpu(Some(self.cpu));
}

/// 检查对应的task是否可以抢占当前运行的task
Expand Down Expand Up @@ -638,7 +639,7 @@ impl CpuRunQueue {

let cpu = self.cpu;

if cpu == smp_get_processor_id().data() as usize {
if cpu == smp_get_processor_id() {
// assert!(
// Arc::ptr_eq(&current, &ProcessManager::current_pcb()),
// "rq current name {} process current {}",
Expand All @@ -653,7 +654,7 @@ impl CpuRunQueue {
}

// 向目标cpu发送重调度ipi
send_resched_ipi(ProcessorId::new(cpu as u32));
send_resched_ipi(cpu);
}

/// 选择下一个task
Expand Down Expand Up @@ -986,7 +987,7 @@ pub fn sched_init() {

let mut cpu_runqueue = Vec::with_capacity(PerCpu::MAX_CPU_NUM as usize);
for cpu in 0..PerCpu::MAX_CPU_NUM as usize {
let rq = Arc::new(CpuRunQueue::new(cpu));
let rq = Arc::new(CpuRunQueue::new(ProcessorId::new(cpu as u32)));
rq.cfs.force_mut().set_rq(Arc::downgrade(&rq));
cpu_runqueue.push(rq);
}
Expand Down
Loading