Skip to content

Commit 64f084a

Browse files
committed
ch8
1 parent 267a266 commit 64f084a

File tree

11 files changed

+570
-59
lines changed

11 files changed

+570
-59
lines changed

os/src/fs/inode.rs

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,20 @@ lazy_static! {
5959
/// List all apps in the root directory
6060
pub fn list_apps() {
6161
println!("/**** APPS ****");
62-
for app in ROOT_INODE.ls() {
62+
63+
// 1. 从根目录获取所有应用程序的名称列表。
64+
// 注意:这里需要将 `apps` 声明为 `mut` (可变),因为排序操作会直接修改它。
65+
let mut apps = ROOT_INODE.ls();
66+
67+
// 2. 对列表进行排序(按字典序从小到大)。
68+
// .sort() 是 Vec 类型的标准方法,会直接在原向量上进行排序。
69+
apps.sort();
70+
71+
// 3. 遍历排序后的列表并打印。
72+
for app in apps {
6373
println!("{}", app);
6474
}
75+
6576
println!("**************/");
6677
}
6778

os/src/mm/page_table.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ impl PageTable {
8787
frames: Vec::new(),
8888
}
8989
}
90-
fn find_pte_create(&mut self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
90+
/// find_pte_create
91+
pub fn find_pte_create(&mut self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
9192
let idxs = vpn.indexes();
9293
let mut ppn = self.root_ppn;
9394
let mut result: Option<&mut PageTableEntry> = None;
@@ -106,7 +107,8 @@ impl PageTable {
106107
}
107108
result
108109
}
109-
fn find_pte(&self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
110+
/// find_pte
111+
pub fn find_pte(&self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
110112
let idxs = vpn.indexes();
111113
let mut ppn = self.root_ppn;
112114
let mut result: Option<&mut PageTableEntry> = None;

os/src/syscall/process.rs

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
use crate::{
22
fs::{open_file, OpenFlags},
3-
mm::{translated_ref, translated_refmut, translated_str},
3+
mm::{translated_ref, translated_refmut, translated_str, PageTable, PhysAddr, VirtAddr},
44
task::{
55
current_process, current_task, current_user_token, exit_current_and_run_next, pid2process,
66
suspend_current_and_run_next, SignalFlags,
7-
},
7+
}, timer::get_time_us,
88
};
99
use alloc::{string::String, sync::Arc, vec::Vec};
1010

@@ -151,12 +151,23 @@ pub fn sys_kill(pid: usize, signal: u32) -> isize {
151151
/// YOUR JOB: get time with second and microsecond
152152
/// HINT: You might reimplement it with virtual memory management.
153153
/// HINT: What if [`TimeVal`] is splitted by two pages ?
154-
pub fn sys_get_time(_ts: *mut TimeVal, _tz: usize) -> isize {
155-
trace!(
156-
"kernel:pid[{}] sys_get_time NOT IMPLEMENTED",
157-
current_task().unwrap().process.upgrade().unwrap().getpid()
158-
);
159-
-1
154+
pub fn sys_get_time(ts: *mut TimeVal, _tz: usize) -> isize {
155+
trace!("kernel:pid[{}] sys_get_time", current_task().unwrap().process.upgrade().unwrap().getpid());
156+
157+
let us = get_time_us();
158+
let token = current_user_token();
159+
let page_table = PageTable::from_token(token);
160+
let va: VirtAddr = (ts as usize).into();
161+
let base_paddr =
162+
PhysAddr::from(page_table.find_pte(va.floor()).unwrap().ppn());
163+
let ts_paddr = (base_paddr.0 + va.page_offset()) as *mut TimeVal;
164+
unsafe {
165+
*ts_paddr = TimeVal {
166+
sec: us / 1_000_000,
167+
usec: us % 1_000_000,
168+
}
169+
}
170+
0
160171
}
161172

162173
/// mmap syscall

os/src/syscall/sync.rs

Lines changed: 151 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,36 @@ use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
22
use crate::task::{block_current_and_run_next, current_process, current_task};
33
use crate::timer::{add_timer, get_time_ms};
44
use alloc::sync::Arc;
5+
use alloc::vec;
6+
use alloc::vec::Vec;
7+
8+
fn detect_deadlock(
9+
mut available: Vec<isize>,
10+
alloc: &Vec<Vec<isize>>,
11+
need: &Vec<Vec<isize>>,
12+
) -> bool {
13+
let mut finish = vec![false; alloc.len()];
14+
let mut changed = true;
15+
while changed {
16+
changed = false;
17+
for (index, (task_need, task_alloc)) in need.iter().zip(alloc.iter()).enumerate() {
18+
if finish[index] {
19+
continue;
20+
}
21+
if available.iter().zip(task_need).all(|(a, b)| a >= b) {
22+
available = available
23+
.iter()
24+
.zip(task_alloc)
25+
.map(|(a, b)| a + b)
26+
.collect();
27+
finish[index] = true;
28+
changed = true;
29+
}
30+
}
31+
}
32+
!finish.iter().all(|&x| x)
33+
}
34+
535
/// sleep syscall
636
pub fn sys_sleep(ms: usize) -> isize {
737
trace!(
@@ -49,49 +79,82 @@ pub fn sys_mutex_create(blocking: bool) -> isize {
4979
.map(|(id, _)| id)
5080
{
5181
process_inner.mutex_list[id] = mutex;
82+
process_inner.resource_available[0][id] = 1;
83+
process_inner.resource_alloc[0]
84+
.iter_mut()
85+
.for_each(|resource_alloc| resource_alloc[id] = 0);
86+
process_inner.resource_need[0]
87+
.iter_mut()
88+
.for_each(|resource_need| resource_need[id] = 0);
5289
id as isize
5390
} else {
5491
process_inner.mutex_list.push(mutex);
92+
process_inner.resource_available[0].push(1);
93+
process_inner.resource_alloc[0]
94+
.iter_mut()
95+
.for_each(|resource_alloc| resource_alloc.push(0));
96+
process_inner.resource_need[0]
97+
.iter_mut()
98+
.for_each(|resource_need| resource_need.push(0));
5599
process_inner.mutex_list.len() as isize - 1
56100
}
57101
}
58102
/// mutex lock syscall
59103
pub fn sys_mutex_lock(mutex_id: usize) -> isize {
104+
let tid = current_task()
105+
.unwrap()
106+
.inner_exclusive_access()
107+
.res
108+
.as_ref()
109+
.unwrap()
110+
.tid;
60111
trace!(
61112
"kernel:pid[{}] tid[{}] sys_mutex_lock",
62113
current_task().unwrap().process.upgrade().unwrap().getpid(),
63-
current_task()
64-
.unwrap()
65-
.inner_exclusive_access()
66-
.res
67-
.as_ref()
68-
.unwrap()
69-
.tid
114+
tid
70115
);
71116
let process = current_process();
72-
let process_inner = process.inner_exclusive_access();
117+
let mut process_inner = process.inner_exclusive_access();
118+
process_inner.resource_need[0][tid][mutex_id] += 1;
73119
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
74-
drop(process_inner);
75-
drop(process);
76-
mutex.lock();
77-
0
120+
if detect_deadlock(
121+
process_inner.resource_available[0].clone(),
122+
&process_inner.resource_alloc[0],
123+
&process_inner.resource_need[0],
124+
) {
125+
process_inner.resource_need[0][tid][mutex_id] -= 1;
126+
-0xDEAD
127+
} else {
128+
drop(process_inner);
129+
drop(process);
130+
mutex.lock();
131+
let process = current_process();
132+
let mut process_inner = process.inner_exclusive_access();
133+
process_inner.resource_available[0][mutex_id] -= 1;
134+
process_inner.resource_alloc[0][tid][mutex_id] += 1;
135+
process_inner.resource_need[0][tid][mutex_id] -= 1;
136+
0
137+
}
78138
}
79139
/// mutex unlock syscall
80140
pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
141+
let tid = current_task()
142+
.unwrap()
143+
.inner_exclusive_access()
144+
.res
145+
.as_ref()
146+
.unwrap()
147+
.tid;
81148
trace!(
82149
"kernel:pid[{}] tid[{}] sys_mutex_unlock",
83150
current_task().unwrap().process.upgrade().unwrap().getpid(),
84-
current_task()
85-
.unwrap()
86-
.inner_exclusive_access()
87-
.res
88-
.as_ref()
89-
.unwrap()
90-
.tid
151+
tid
91152
);
92153
let process = current_process();
93-
let process_inner = process.inner_exclusive_access();
154+
let mut process_inner = process.inner_exclusive_access();
94155
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
156+
process_inner.resource_available[0][mutex_id] += 1;
157+
process_inner.resource_alloc[0][tid][mutex_id] -= 1;
95158
drop(process_inner);
96159
drop(process);
97160
mutex.unlock();
@@ -120,54 +183,89 @@ pub fn sys_semaphore_create(res_count: usize) -> isize {
120183
.map(|(id, _)| id)
121184
{
122185
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
186+
process_inner.resource_available[1][id] = res_count as isize;
187+
process_inner.resource_alloc[1]
188+
.iter_mut()
189+
.for_each(|resource_alloc| resource_alloc[id] = 0);
190+
process_inner.resource_need[1]
191+
.iter_mut()
192+
.for_each(|resource_need| resource_need[id] = 0);
123193
id
124194
} else {
125195
process_inner
126196
.semaphore_list
127197
.push(Some(Arc::new(Semaphore::new(res_count))));
198+
process_inner.resource_available[1].push(res_count as isize);
199+
process_inner.resource_alloc[1]
200+
.iter_mut()
201+
.for_each(|resource_alloc| resource_alloc.push(0));
202+
process_inner.resource_need[1]
203+
.iter_mut()
204+
.for_each(|resource_need| resource_need.push(0));
128205
process_inner.semaphore_list.len() - 1
129206
};
130207
id as isize
131208
}
132209
/// semaphore up syscall
133210
pub fn sys_semaphore_up(sem_id: usize) -> isize {
211+
let tid = current_task()
212+
.unwrap()
213+
.inner_exclusive_access()
214+
.res
215+
.as_ref()
216+
.unwrap()
217+
.tid;
134218
trace!(
135219
"kernel:pid[{}] tid[{}] sys_semaphore_up",
136220
current_task().unwrap().process.upgrade().unwrap().getpid(),
137-
current_task()
138-
.unwrap()
139-
.inner_exclusive_access()
140-
.res
141-
.as_ref()
142-
.unwrap()
143-
.tid
221+
tid
144222
);
145223
let process = current_process();
146-
let process_inner = process.inner_exclusive_access();
224+
let mut process_inner = process.inner_exclusive_access();
225+
process_inner.resource_alloc[1][tid][sem_id] -= 1;
226+
process_inner.resource_available[1][sem_id] += 1;
147227
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
148228
drop(process_inner);
149229
sem.up();
150230
0
151231
}
152232
/// semaphore down syscall
153233
pub fn sys_semaphore_down(sem_id: usize) -> isize {
234+
let tid = current_task()
235+
.unwrap()
236+
.inner_exclusive_access()
237+
.res
238+
.as_ref()
239+
.unwrap()
240+
.tid;
154241
trace!(
155242
"kernel:pid[{}] tid[{}] sys_semaphore_down",
156243
current_task().unwrap().process.upgrade().unwrap().getpid(),
157-
current_task()
158-
.unwrap()
159-
.inner_exclusive_access()
160-
.res
161-
.as_ref()
162-
.unwrap()
163-
.tid
244+
tid
164245
);
165246
let process = current_process();
166-
let process_inner = process.inner_exclusive_access();
247+
let mut process_inner = process.inner_exclusive_access();
248+
process_inner.resource_need[1][tid][sem_id] += 1;
167249
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
168-
drop(process_inner);
169-
sem.down();
170-
0
250+
if process_inner.detect_deadlock
251+
&& detect_deadlock(
252+
process_inner.resource_available[1].clone(),
253+
&process_inner.resource_alloc[1],
254+
&process_inner.resource_need[1],
255+
)
256+
{
257+
process_inner.resource_need[1][tid][sem_id] -= 1;
258+
-0xDEAD
259+
} else {
260+
drop(process_inner);
261+
sem.down();
262+
let process = current_process();
263+
let mut process_inner = process.inner_exclusive_access();
264+
process_inner.resource_available[1][sem_id] -= 1;
265+
process_inner.resource_alloc[1][tid][sem_id] += 1;
266+
process_inner.resource_need[1][tid][sem_id] -= 1;
267+
0
268+
}
171269
}
172270
/// condvar create syscall
173271
pub fn sys_condvar_create() -> isize {
@@ -247,5 +345,17 @@ pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
247345
/// YOUR JOB: Implement deadlock detection, but might not all in this syscall
248346
pub fn sys_enable_deadlock_detect(_enabled: usize) -> isize {
249347
trace!("kernel: sys_enable_deadlock_detect NOT IMPLEMENTED");
250-
-1
251-
}
348+
let process = current_process();
349+
let mut process_inner = process.inner_exclusive_access();
350+
match _enabled {
351+
0 => {
352+
process_inner.detect_deadlock = false;
353+
0
354+
}
355+
1 => {
356+
process_inner.detect_deadlock = true;
357+
0
358+
}
359+
_ => -1,
360+
}
361+
}

os/src/syscall/thread.rs

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use crate::{
33
task::{add_task, current_task, TaskControlBlock},
44
trap::{trap_handler, TrapContext},
55
};
6-
use alloc::sync::Arc;
6+
use alloc::{sync::Arc, vec};
77
/// thread create syscall
88
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
99
trace!(
@@ -36,11 +36,20 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
3636
let new_task_tid = new_task_res.tid;
3737
let mut process_inner = process.inner_exclusive_access();
3838
// add new thread to current process
39-
let tasks = &mut process_inner.tasks;
40-
while tasks.len() < new_task_tid + 1 {
41-
tasks.push(None);
39+
let mutex_resource_count = process_inner.resource_available[0].len();
40+
let sem_resource_count = process_inner.resource_available[1].len();
41+
while process_inner.tasks.len() < new_task_tid + 1 {
42+
process_inner.tasks.push(None);
43+
process_inner.resource_alloc[0].push(vec![0;mutex_resource_count]);
44+
process_inner.resource_need[0].push(vec![0;mutex_resource_count]);
45+
process_inner.resource_alloc[1].push(vec![0;sem_resource_count]);
46+
process_inner.resource_need[1].push(vec![0;sem_resource_count]);
4247
}
43-
tasks[new_task_tid] = Some(Arc::clone(&new_task));
48+
process_inner.tasks[new_task_tid] = Some(Arc::clone(&new_task));
49+
process_inner.resource_alloc[0][new_task_tid].iter_mut().for_each(|x|*x=0);
50+
process_inner.resource_alloc[1][new_task_tid].iter_mut().for_each(|x|*x=0);
51+
process_inner.resource_need[0][new_task_tid].iter_mut().for_each(|x|*x=0);
52+
process_inner.resource_need[1][new_task_tid].iter_mut().for_each(|x|*x=0);
4453
let new_task_trap_cx = new_task_inner.get_trap_cx();
4554
*new_task_trap_cx = TrapContext::app_init_context(
4655
entry,
@@ -117,4 +126,4 @@ pub fn sys_waittid(tid: usize) -> i32 {
117126
// waited thread has not exited
118127
-2
119128
}
120-
}
129+
}

0 commit comments

Comments
 (0)