Skip to content

Commit cb265ae

Browse files
committed
Scale frequency to suppress RCU CPU stall warning
Since the emulator currently operates using sequential emulation, the execution time for the boot process is relatively long, which can result in the generation of RCU CPU stall warnings. To address this issue, there are several potential solutions: 1. Scale the frequency to slow down emulator time during the boot process, thereby eliminating RCU CPU stall warnings. 2. During the boot process, avoid using 'clock_gettime' to update ticks and instead manage the tick increment relationship manually. 3. Implement multi-threaded emulation to accelerate the emulator's execution speed. For the third point, while implementing multi-threaded emulation can significantly accelerate the emulator's execution speed, it cannot guarantee that this issue will not reappear as the number of cores increases in the future. Therefore, a better approach is to use methods 1 and 2 to allow the emulator to set an expected time for completing the boot process. The advantages and disadvantages of the scale method are as follows: Advantages: - Simple implementation - Effectively sets the expected boot process completion time - Results have strong interpretability - Emulator time can be easily mapped back to real time Disadvantages: - Slower execution speed The advantages and disadvantages of the increment ticks method are as follows: Advantages: - Faster execution speed - Effectively sets the expected boot process completion time Disadvantages: - More complex implementation - Some results are difficult to interpret - Emulator time is difficult to map back to real time Based on practical tests, the second method provides limited acceleration but introduces some significant drawbacks, such as difficulty in interpreting results and the complexity of managing the increment relationship. Therefore, this commit opts for the scale frequency method to address this issue. This commit divides time into emulator time and real time. During the boot process, the timer uses scale frequency to slow down the growth of emulator time, eliminating RCU CPU stall warnings. After the boot process is complete, the growth of emulator time aligns with real time. To configure the scale frequency parameter, three pieces of information are required: 1. The expected completion time of the boot process 2. A reference point for estimating the boot process completion time 3. The relationship between the reference point and the number of SMPs According to the Linux kernel documentation: https://docs.kernel.org/RCU/stallwarn.html#config-rcu-cpu-stall-timeout The grace period for RCU CPU stalls is typically set to 21 seconds. By dividing this value by two as the expected completion time, we can provide a sufficient buffer to reduce the impact of errors and avoid RCU CPU stall warnings. Using 'gprof' for basic statistical analysis, it was found that 'semu_timer_clocksource' accounts for approximately 10% of the boot process execution time. Since the logic within 'semu_timer_clocksource' is relatively simple, its execution time can be assumed to be nearly equal to 'clock_gettime'. Furthermore, by adding a counter to 'semu_timer_clocksource', it was observed that each time the number of SMPs increases by 1, the execution count of 'semu_timer_clocksource' increases by approximately '2 * 10^8' With this information, we can estimate the boot process completion time as 'sec_per_call * SMPs * 2 * 10^8 * (100% / 10%)' seconds, and thereby calculate the scale frequency parameter. For instance, if the estimated time is 200 seconds and the target time is 10 seconds, the scaling factor would be '10 / 200'.
1 parent 36fc1b2 commit cb265ae

File tree

4 files changed

+175
-17
lines changed

4 files changed

+175
-17
lines changed

Makefile

+2
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,8 @@ E :=
7878
S := $E $E
7979

8080
SMP ?= 1
81+
CFLAGS += -D SEMU_SMP=$(SMP)
82+
CFLAGS += -D SEMU_BOOT_TARGET_TIME=10
8183
.PHONY: riscv-harts.dtsi
8284
riscv-harts.dtsi:
8385
$(Q)python3 scripts/gen-hart-dts.py $@ $(SMP) $(CLOCK_FREQ)

riscv.c

+8
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,14 @@ static void op_sret(hart_t *vm)
382382
vm->s_mode = vm->sstatus_spp;
383383
vm->sstatus_sie = vm->sstatus_spie;
384384

385+
/* After the booting process is complete, initrd will be loaded. At this
386+
* point, the sytstem will switch to U mode for the first time. Therefore,
387+
* by checking whether the switch to U mode has already occurred, we can
388+
* determine if the boot process has been completed.
389+
*/
390+
if (!boot_complete && !vm->s_mode)
391+
boot_complete = true;
392+
385393
/* Reset stack */
386394
vm->sstatus_spp = false;
387395
vm->sstatus_spie = true;

utils.c

+162-17
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919
#endif
2020
#endif
2121

22+
bool boot_complete = false;
23+
static double scale_factor;
24+
2225
/* Calculate "x * n / d" without unnecessary overflow or loss of precision.
2326
*
2427
* Reference:
@@ -32,35 +35,177 @@ static inline uint64_t mult_frac(uint64_t x, uint64_t n, uint64_t d)
3235
return q * n + r * n / d;
3336
}
3437

35-
void semu_timer_init(semu_timer_t *timer, uint64_t freq)
38+
/* On POSIX => use clock_gettime().
39+
* On macOS => use mach_absolute_time().
40+
* Else => fallback to time(0) in seconds, convert to ns.
41+
*
42+
* Now, the POSIX/macOS logic can be clearly reused. Meanwhile, the fallback
43+
* path might just do a coarser approach with time(0).
44+
*/
45+
static inline uint64_t host_time_ns()
3646
{
37-
timer->freq = freq;
38-
semu_timer_rebase(timer, 0);
47+
#if defined(HAVE_POSIX_TIMER)
48+
struct timespec ts;
49+
clock_gettime(CLOCKID, &ts);
50+
return (uint64_t) ts.tv_sec * 1e9 + (uint64_t) ts.tv_nsec;
51+
52+
#elif defined(HAVE_MACH_TIMER)
53+
static mach_timebase_info_data_t ts = {0};
54+
if (ts.denom == 0)
55+
(void) mach_timebase_info(&ts);
56+
57+
uint64_t now = mach_absolute_time();
58+
/* convert to nanoseconds: (now * t.numer / t.denom) */
59+
return mult_frac(now, ts.numer, (uint64_t) ts.denom);
60+
61+
#else
62+
/* Minimal fallback: time(0) in seconds => convert to ns. */
63+
time_t now_sec = time(0);
64+
return (uint64_t) now_sec * 1e9;
65+
#endif
3966
}
4067

41-
static uint64_t semu_timer_clocksource(uint64_t freq)
68+
/* Measure the overhead of a high-resolution timer call, typically
69+
* 'clock_gettime()' on POSIX or 'mach_absolute_time()' on macOS.
70+
*
71+
* 1) Times how long it takes to call 'host_time_ns()' repeatedly (iterations).
72+
* 2) Derives an average overhead per call => ns_per_call.
73+
* 3) Because semu_timer_clocksource is ~10% of boot overhead, and called ~2e8
74+
* times * SMP, we get predict_sec = ns_per_call * SMP * 2. Then set
75+
* 'scale_factor' so the entire boot completes in SEMU_BOOT_TARGET_TIME
76+
* seconds.
77+
*/
78+
static void measure_bogomips_ns(uint64_t iterations)
4279
{
43-
#if defined(HAVE_POSIX_TIMER)
44-
struct timespec t;
45-
clock_gettime(CLOCKID, &t);
46-
return t.tv_sec * freq + mult_frac(t.tv_nsec, freq, 1e9);
80+
/* Perform 'iterations' times calling the host HRT.
81+
*
82+
*
83+
* Assuming the cost of loop overhead is 'e' and the cost of 'host_time_ns'
84+
* is 't', we perform a two-stage measurement to eliminate the loop
85+
* overhead. In the first loop, 'host_time_ns' is called only once per
86+
* iteration, while in the second loop, it is called twice per iteration.
87+
*
88+
* In this way, the cost of the first loop is 'e + t', and the cost of the
89+
* second loop is 'e + 2t'. By subtracting the two, we can effectively
90+
* eliminate the loop overhead.
91+
*
92+
* Reference:
93+
* https://ates.dev/posts/2025-01-12-accurate-benchmarking/
94+
*/
95+
const uint64_t start_ns_1 = host_time_ns();
96+
for (uint64_t loops = 0; loops < iterations; loops++)
97+
(void) host_time_ns();
98+
99+
const uint64_t end_ns_1 = host_time_ns();
100+
const uint64_t elapsed_ns_1 = end_ns_1 - start_ns_1;
101+
102+
/* Second measurement */
103+
const uint64_t start_ns_2 = host_time_ns();
104+
for (uint64_t loops = 0; loops < iterations; loops++) {
105+
(void) host_time_ns();
106+
(void) host_time_ns();
107+
}
108+
109+
const uint64_t end_ns_2 = host_time_ns();
110+
const uint64_t elapsed_ns_2 = end_ns_2 - start_ns_2;
111+
112+
/* Calculate average overhead per call */
113+
const double ns_per_call =
114+
(double) (elapsed_ns_2 - elapsed_ns_1) / (double) iterations;
115+
116+
/* 'semu_timer_clocksource' is called ~2e8 times per SMP. Each call's
117+
* overhead ~ ns_per_call. The total overhead is ~ ns_per_call * SMP * 2e8.
118+
* That overhead is about 10% of the entire boot, so effectively:
119+
* predict_sec = ns_per_call * SMP * 2e8 * (100%/10%) / 1e9
120+
* = ns_per_call * SMP * 2.0
121+
* Then scale_factor = (desired_time) / (predict_sec).
122+
*/
123+
const double predict_sec = ns_per_call * SEMU_SMP * 2.0;
124+
scale_factor = SEMU_BOOT_TARGET_TIME / predict_sec;
125+
}
126+
127+
/* The main function that returns the "emulated time" in ticks.
128+
*
129+
* Before the boot completes, we scale time by 'scale_factor' for a "fake
130+
* increments" approach. After boot completes, we switch to real time
131+
* with an offset bridging so that there's no big jump.
132+
*/
133+
static uint64_t semu_timer_clocksource(semu_timer_t *timer)
134+
{
135+
/* After boot process complete, the timer will switch to real time. Thus,
136+
* there is an offset between the real time and the emulator time.
137+
*
138+
* After switching to real time, the correct way to update time is to
139+
* calculate the increment of time. Then add it to the emulator time.
140+
*/
141+
static int64_t offset = 0;
142+
static bool first_switch = true;
143+
144+
#if defined(HAVE_POSIX_TIMER) || defined(HAVE_MACH_TIMER)
145+
uint64_t now_ns = host_time_ns();
146+
147+
/* real_ticks = (now_ns * freq) / 1e9 */
148+
uint64_t real_ticks = mult_frac(now_ns, timer->freq, 1e9);
149+
150+
/* scaled_ticks = (now_ns * (freq*scale_factor)) / 1e9
151+
* = ((now_ns * freq) / 1e9) * scale_factor
152+
*/
153+
uint64_t scaled_ticks = real_ticks * scale_factor;
154+
155+
if (!boot_complete)
156+
return scaled_ticks; /* Return scaled ticks in the boot phase. */
157+
158+
/* The boot is done => switch to real freq with an offset bridging. */
159+
if (first_switch) {
160+
first_switch = false;
161+
offset = (int64_t) (real_ticks - scaled_ticks);
162+
}
163+
return (uint64_t) ((int64_t) real_ticks - offset);
164+
47165
#elif defined(HAVE_MACH_TIMER)
48-
static mach_timebase_info_data_t t;
49-
if (t.denom == 0)
50-
(void) mach_timebase_info(&t);
51-
return mult_frac(mult_frac(mach_absolute_time(), t.numer, t.denom), freq,
52-
1e9);
53-
#else
54-
return time(0) * freq;
166+
/* Because we don't rely on sub-second calls to 'host_time_ns()' here,
167+
* we directly use time(0). This means the time resolution is coarse (1
168+
* second), but the logic is the same: we do a scaled approach pre-boot,
169+
* then real freq with an offset post-boot.
170+
*/
171+
time_t now_sec = time(0);
172+
173+
/* Before boot done, scale time. */
174+
if (!boot_complete)
175+
return (uint64_t) now_sec * (uint64_t) (timer->freq * scale_factor);
176+
177+
if (first_switch) {
178+
first_switch = false;
179+
uint64_t real_val = (uint64_t) now_sec * (uint64_t) timer->freq;
180+
uint64_t scaled_val =
181+
(uint64_t) now_sec * (uint64_t) (timer->freq * scale_factor);
182+
offset = (int64_t) (real_val - scaled_val);
183+
}
184+
185+
/* Return real freq minus offset. */
186+
uint64_t real_freq_val = (uint64_t) now_sec * (uint64_t) timer->freq;
187+
return real_freq_val - offset;
55188
#endif
56189
}
57190

191+
void semu_timer_init(semu_timer_t *timer, uint64_t freq)
192+
{
193+
/* Measure how long each call to 'host_time_ns()' roughly takes,
194+
* then use that to pick 'scale_factor'. For example, pass freq
195+
* as the loop count or some large number to get a stable measure.
196+
*/
197+
measure_bogomips_ns(freq);
198+
199+
timer->freq = freq;
200+
semu_timer_rebase(timer, 0);
201+
}
202+
58203
uint64_t semu_timer_get(semu_timer_t *timer)
59204
{
60-
return semu_timer_clocksource(timer->freq) - timer->begin;
205+
return semu_timer_clocksource(timer) - timer->begin;
61206
}
62207

63208
void semu_timer_rebase(semu_timer_t *timer, uint64_t time)
64209
{
65-
timer->begin = semu_timer_clocksource(timer->freq) - time;
210+
timer->begin = semu_timer_clocksource(timer) - time;
66211
}

utils.h

+3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#pragma once
22

3+
#include <stdbool.h>
34
#include <stdint.h>
45

56
/* TIMER */
@@ -8,6 +9,8 @@ typedef struct {
89
uint64_t freq;
910
} semu_timer_t;
1011

12+
extern bool boot_complete; /* Time to reach the first user process. */
13+
1114
void semu_timer_init(semu_timer_t *timer, uint64_t freq);
1215
uint64_t semu_timer_get(semu_timer_t *timer);
1316
void semu_timer_rebase(semu_timer_t *timer, uint64_t time);

0 commit comments

Comments
 (0)