-
Notifications
You must be signed in to change notification settings - Fork 176
Expand file tree
/
Copy pathreset-gpu.sh
More file actions
executable file
·411 lines (354 loc) · 10.6 KB
/
reset-gpu.sh
File metadata and controls
executable file
·411 lines (354 loc) · 10.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
#!/usr/bin/env bash
# Attempts to fully reset NVIDIA GPUs by clearing processes, invoking NVML resets,
# reloading kernel modules, and issuing PCIe function-level resets as needed.
set -Eeuo pipefail
if [[ "${DEBUG:-0}" -ne 0 ]]; then
set -x
fi
SCRIPT_BASENAME=$(basename "$0")
log() {
printf '[%s] %s\n' "$SCRIPT_BASENAME" "$*"
}
warn() {
printf '[%s][WARN] %s\n' "$SCRIPT_BASENAME" "$*" >&2
}
ensure_root() {
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
if command -v sudo >/dev/null 2>&1; then
log "Elevating privileges with sudo"
exec sudo --preserve-env=DEBUG bash "$0" "$@"
else
warn "Root privileges are required to reset the GPU"
exit 1
fi
fi
}
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
warn "Missing required command: $cmd"
exit 1
fi
}
declare -a GPU_IDS=()
declare -A GPU_PERSISTENCE_STATE=()
PERSISTENCED_WAS_ACTIVE=0
declare -a DISPLAY_MANAGER_CANDIDATES=(display-manager gdm gdm3 lightdm sddm lxdm)
declare -a STOPPED_DISPLAY_SERVICES=()
declare -a GPU_HELPER_SERVICES=(nvidia-dcgm nvidia-powerd nvidia-fabricmanager nvidia-vgpu-mgr nvidia-vgpud nvidia-gridd nvidia-vgpu-dma)
declare -a STOPPED_GPU_HELPERS=()
discover_gpus() {
mapfile -t GPU_IDS < <(
nvidia-smi --query-gpu=index --format=csv,noheader 2>/dev/null |
awk '{gsub(/^[[:space:]]+|[[:space:]]+$/, "", $0); if (length) print}'
)
if (( ${#GPU_IDS[@]} == 0 )); then
warn "No NVIDIA GPUs detected"
exit 1
fi
while IFS=',' read -r idx state; do
idx=$(awk '{$1=$1};1' <<<"$idx")
state=$(awk '{$1=$1};1' <<<"$state")
[[ -n "$idx" ]] && GPU_PERSISTENCE_STATE["$idx"]="$state"
done < <(nvidia-smi --query-gpu=index,persistence_mode --format=csv,noheader 2>/dev/null || true)
}
stop_persistenced() {
if command -v systemctl >/dev/null 2>&1; then
if systemctl is-active --quiet nvidia-persistenced 2>/dev/null; then
PERSISTENCED_WAS_ACTIVE=1
if systemctl stop nvidia-persistenced >/dev/null 2>&1; then
log "Stopped nvidia-persistenced service"
else
warn "Failed to stop nvidia-persistenced service"
fi
fi
fi
}
start_persistenced_if_needed() {
if (( PERSISTENCED_WAS_ACTIVE )); then
if systemctl start nvidia-persistenced >/dev/null 2>&1; then
log "Restarted nvidia-persistenced service"
else
warn "Failed to restart nvidia-persistenced service"
fi
fi
}
stop_gpu_helper_services() {
local svc svc_unit
if ! command -v systemctl >/dev/null 2>&1; then
return
fi
for svc in "${GPU_HELPER_SERVICES[@]}"; do
svc_unit=""
if systemctl is-active --quiet "$svc" 2>/dev/null; then
svc_unit="$svc"
elif systemctl is-active --quiet "${svc}.service" 2>/dev/null; then
svc_unit="${svc}.service"
else
continue
fi
if systemctl stop "$svc_unit" >/dev/null 2>&1; then
STOPPED_GPU_HELPERS+=("$svc_unit")
log "Stopped NVIDIA helper service $svc_unit"
else
warn "Failed to stop NVIDIA helper service $svc_unit"
fi
done
}
restart_gpu_helper_services() {
local svc
if ! command -v systemctl >/dev/null 2>&1; then
return
fi
for svc in "${STOPPED_GPU_HELPERS[@]}"; do
if systemctl start "$svc" >/dev/null 2>&1; then
log "Restarted NVIDIA helper service $svc"
else
warn "Failed to restart NVIDIA helper service $svc"
fi
done
}
disable_persistence_mode() {
for id in "${GPU_IDS[@]}"; do
if nvidia-smi -i "$id" -pm 0 >/dev/null 2>&1; then
log "Disabled persistence mode on GPU $id"
else
warn "Unable to disable persistence mode on GPU $id"
fi
done
}
restore_persistence_mode() {
for id in "${GPU_IDS[@]}"; do
local original="${GPU_PERSISTENCE_STATE[$id]:-Enabled}"
local target=1
if [[ "$original" =~ [Dd]isabled ]]; then
target=0
fi
if nvidia-smi -i "$id" -pm "$target" >/dev/null 2>&1; then
log "Restored persistence mode ($original) on GPU $id"
else
warn "Failed to restore persistence mode on GPU $id"
fi
done
}
terminate_residual_display_processes() {
local -a targets=(Xorg X Xwayland gnome-shell kwin_x11 kwin_wayland sway weston)
local proc
if ! command -v pgrep >/dev/null 2>&1; then
return
fi
for proc in "${targets[@]}"; do
mapfile -t pids < <(pgrep -x "$proc" 2>/dev/null || true)
if (( ${#pids[@]} == 0 )); then
continue
fi
log "Terminating lingering display process $proc (PIDs: ${pids[*]})"
for pid in "${pids[@]}"; do
kill -TERM "$pid" >/dev/null 2>&1 || true
done
done
sleep 1
for proc in "${targets[@]}"; do
mapfile -t pids < <(pgrep -x "$proc" 2>/dev/null || true)
if (( ${#pids[@]} == 0 )); then
continue
fi
warn "Force killing stubborn display process $proc (PIDs: ${pids[*]})"
for pid in "${pids[@]}"; do
kill -KILL "$pid" >/dev/null 2>&1 || true
done
done
}
stop_display_stack() {
local svc svc_unit
if ! command -v systemctl >/dev/null 2>&1; then
return
fi
for svc in "${DISPLAY_MANAGER_CANDIDATES[@]}"; do
svc_unit=""
if systemctl is-active --quiet "$svc" 2>/dev/null; then
svc_unit="$svc"
elif systemctl is-active --quiet "${svc}.service" 2>/dev/null; then
svc_unit="${svc}.service"
else
continue
fi
if systemctl stop "$svc_unit" >/dev/null 2>&1; then
STOPPED_DISPLAY_SERVICES+=("$svc_unit")
log "Stopped display manager service $svc_unit"
else
warn "Failed to stop display manager service $svc_unit"
fi
done
terminate_residual_display_processes
}
restart_display_stack() {
local svc
if ! command -v systemctl >/dev/null 2>&1; then
return
fi
for svc in "${STOPPED_DISPLAY_SERVICES[@]}"; do
if systemctl start "$svc" >/dev/null 2>&1; then
log "Restarted display manager service $svc"
else
warn "Failed to restart display manager service $svc"
fi
done
}
collect_gpu_pids() {
nvidia-smi --query-compute-apps=pid --format=csv,noheader 2>/dev/null |
awk '{gsub(/^[[:space:]]+|[[:space:]]+$/, "", $0); if ($0 ~ /^[0-9]+$/) print $0}'
}
drain_gpu_processes() {
local max_attempts=3
local attempt
local -a pids=()
for ((attempt = 1; attempt <= max_attempts; attempt++)); do
mapfile -t pids < <(collect_gpu_pids || true)
if (( ${#pids[@]} == 0 )); then
log "No compute processes are holding the GPU"
return 0
fi
local signal="TERM"
if (( attempt == max_attempts )); then
signal="KILL"
fi
log "Attempt $attempt: sending SIG${signal} to GPU processes ${pids[*]}"
for pid in "${pids[@]}"; do
if kill "-${signal}" "$pid" >/dev/null 2>&1; then
:
else
warn "Failed to signal PID $pid"
fi
done
sleep 2
done
mapfile -t pids < <(collect_gpu_pids || true)
if (( ${#pids[@]} > 0 )); then
warn "GPU is still busy with PIDs: ${pids[*]}"
return 1
fi
}
gpu_reset_via_nvml() {
local result=1
for id in "${GPU_IDS[@]}"; do
if nvidia-smi --gpu-reset -i "$id" >/dev/null 2>&1; then
log "nvidia-smi GPU reset succeeded for GPU $id"
result=0
else
warn "nvidia-smi GPU reset is unsupported or failed for GPU $id"
fi
done
return "$result"
}
module_loaded() {
local module="$1"
grep -q "^${module} " /proc/modules 2>/dev/null
}
reload_nvidia_modules() {
local modules_to_remove=(nvidia_drm nvidia_modeset nvidia_uvm nvidia)
local removed_any=0
for module in "${modules_to_remove[@]}"; do
if module_loaded "$module"; then
removed_any=1
if modprobe -r "$module" >/dev/null 2>&1; then
log "Removed kernel module $module"
else
warn "Failed to remove kernel module $module"
fi
fi
done
if (( removed_any == 0 )); then
log "No NVIDIA kernel modules were loaded prior to reload step"
fi
local modules_to_add=(nvidia nvidia_modeset nvidia_uvm nvidia_drm)
for module in "${modules_to_add[@]}"; do
if modprobe "$module" >/dev/null 2>&1; then
log "Loaded kernel module $module"
else
warn "Kernel module $module could not be loaded (continuing)"
fi
done
}
normalize_bus_id() {
local bdf="$1"
if [[ "$bdf" != *:*:*.* ]]; then
return 1
fi
local domain="${bdf%%:*}"
local rest="${bdf#*:}"
domain="${domain: -4}"
local normalized="${domain}:${rest}"
printf '%s\n' "${normalized,,}"
}
pci_function_level_reset() {
mapfile -t bus_ids < <(
nvidia-smi --query-gpu=pci.bus_id --format=csv,noheader 2>/dev/null |
awk '{gsub(/^[[:space:]]+|[[:space:]]+$/, "", $0); if (length) print}'
)
for raw_id in "${bus_ids[@]}"; do
local normalized
if ! normalized=$(normalize_bus_id "$raw_id"); then
warn "Unable to normalize PCI bus id: $raw_id"
continue
fi
local device_path="/sys/bus/pci/devices/$normalized"
if [[ ! -d "$device_path" ]]; then
warn "PCI device path not found: $device_path"
continue
fi
local driver_name=""
if [[ -L "$device_path/driver" ]]; then
driver_name=$(basename "$(readlink "$device_path/driver")")
fi
if [[ -n "$driver_name" && -w "/sys/bus/pci/drivers/$driver_name/unbind" ]]; then
if printf '%s\n' "$normalized" >"/sys/bus/pci/drivers/$driver_name/unbind"; then
log "Unbound $normalized from driver $driver_name"
else
warn "Failed to unbind $normalized from driver $driver_name"
fi
fi
if [[ -w "$device_path/reset" ]]; then
if printf '1\n' >"$device_path/reset"; then
log "Issued PCIe function-level reset for $normalized"
else
warn "Failed to trigger PCIe reset for $normalized"
fi
else
warn "PCI reset interface not available for $normalized"
fi
if [[ -n "$driver_name" && -w "/sys/bus/pci/drivers/$driver_name/bind" ]]; then
if printf '%s\n' "$normalized" >"/sys/bus/pci/drivers/$driver_name/bind"; then
log "Rebound $normalized to driver $driver_name"
else
warn "Failed to rebind $normalized to driver $driver_name"
fi
fi
done
}
cleanup() {
local rc="$1"
if (( ${#GPU_IDS[@]} )); then
restore_persistence_mode || true
fi
restart_gpu_helper_services || true
restart_display_stack || true
start_persistenced_if_needed || true
exit "$rc"
}
main() {
ensure_root "$@"
require_cmd nvidia-smi
discover_gpus
trap 'rc=$?; trap - EXIT; cleanup "$rc"' EXIT
stop_persistenced
stop_gpu_helper_services
stop_display_stack
disable_persistence_mode
drain_gpu_processes || warn "Some processes could not be stopped; GPU reset may fail"
gpu_reset_via_nvml || warn "NVML reset did not complete; attempting kernel module reload"
reload_nvidia_modules
pci_function_level_reset
log "GPU reset sequence completed"
}
main "$@"