|
28 | 28 | #include <unistd.h>
|
29 | 29 |
|
30 | 30 | #include <tas.h>
|
31 |
| -#include <utils_timeout.h> |
32 | 31 |
|
33 | 32 | extern int kernel_notifyfd;
|
34 | 33 |
|
35 |
| -static void notify_core(int cfd, uint32_t *last_ts, uint32_t ts_us, |
36 |
| - uint32_t delta) |
| 34 | +static void notify_core(int cfd, uint64_t *last_ts, uint64_t tsc, |
| 35 | + uint64_t delta) |
37 | 36 | {
|
38 | 37 | uint64_t val;
|
39 | 38 |
|
40 |
| - if(ts_us - *last_ts > delta) { |
| 39 | + if(tsc - *last_ts > delta) { |
41 | 40 | val = 1;
|
42 | 41 | if (write(cfd, &val, sizeof(uint64_t)) != sizeof(uint64_t)) {
|
43 | 42 | perror("notify_core: write failed");
|
44 | 43 | abort();
|
45 | 44 | }
|
46 | 45 | }
|
47 | 46 |
|
48 |
| - *last_ts = ts_us; |
| 47 | + *last_ts = tsc; |
49 | 48 | }
|
50 | 49 |
|
51 | 50 | void notify_fastpath_core(unsigned core)
|
52 | 51 | {
|
53 | 52 | notify_core(fp_state->kctx[core].evfd, &fp_state->kctx[core].last_ts,
|
54 |
| - util_timeout_time_us(), tas_info->poll_cycle_tas); |
| 53 | + util_rdtsc(), tas_info->poll_cycle_tas); |
55 | 54 | }
|
56 | 55 |
|
57 |
| -void notify_app_core(int appfd, uint32_t *last_ts) |
| 56 | +void notify_app_core(int appfd, uint64_t *last_ts) |
58 | 57 | {
|
59 |
| - notify_core(appfd, last_ts, util_timeout_time_us(), tas_info->poll_cycle_app); |
| 58 | + notify_core(appfd, last_ts, util_rdtsc(), tas_info->poll_cycle_app); |
60 | 59 | }
|
61 | 60 |
|
62 |
| -void notify_appctx(struct flextcp_pl_appctx *ctx, uint32_t ts_us) |
| 61 | +void notify_appctx(struct flextcp_pl_appctx *ctx, uint64_t tsc) |
63 | 62 | {
|
64 |
| - notify_core(ctx->evfd, &ctx->last_ts, ts_us, tas_info->poll_cycle_app); |
| 63 | + notify_core(ctx->evfd, &ctx->last_ts, tsc, tas_info->poll_cycle_app); |
65 | 64 | }
|
66 | 65 |
|
67 | 66 | void notify_slowpath_core(void)
|
68 | 67 | {
|
69 |
| - static uint32_t __thread last_ts = 0; |
70 |
| - notify_core(kernel_notifyfd, &last_ts, util_timeout_time_us(), |
| 68 | + static uint64_t __thread last_ts = 0; |
| 69 | + notify_core(kernel_notifyfd, &last_ts, util_rdtsc(), |
71 | 70 | tas_info->poll_cycle_tas);
|
72 | 71 | }
|
73 | 72 |
|
74 |
| -int notify_canblock(struct notify_blockstate *nbs, int had_data, uint32_t ts) |
| 73 | +int notify_canblock(struct notify_blockstate *nbs, int had_data, uint64_t tsc) |
75 | 74 | {
|
76 | 75 | if (had_data) {
|
77 | 76 | /* not idle this round, reset everything */
|
78 | 77 | nbs->can_block = nbs->second_bar = 0;
|
79 |
| - nbs->last_active_ts = ts; |
| 78 | + nbs->last_active_ts = tsc; |
80 | 79 | } else if (nbs->second_bar) {
|
81 | 80 | /* we can block now, reset afterwards */
|
82 | 81 | nbs->can_block = nbs->second_bar = 0;
|
83 |
| - nbs->last_active_ts = ts; |
| 82 | + nbs->last_active_ts = tsc; |
84 | 83 | return 1;
|
85 | 84 | } else if (nbs->can_block &&
|
86 |
| - ts - nbs->last_active_ts > tas_info->poll_cycle_tas) |
| 85 | + tsc - nbs->last_active_ts > tas_info->poll_cycle_tas) |
87 | 86 | {
|
88 | 87 | /* we've reached the poll cycle interval, so just poll once more */
|
89 | 88 | nbs->second_bar = 1;
|
|
0 commit comments