forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreproduce_v2.py
65 lines (48 loc) · 2.51 KB
/
reproduce_v2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from vllm import LLM, SamplingParams
from vllm.liquid.request import LiquidRequest, LiquidType
# from vllm import EngineArgs, LLMEngine
import asyncio
import torch
import os
# model = "meta-llama/Meta-Llama-3-8B"
model = "facebook/opt-6.7b"
# model_path = os.path.join("./models", model)
def main():
llm = LLM(
model,
enforce_eager=True,
# load_format="auto",
# tensor_parallel_size=2,
liquid_gpu_range = [0,1],
liquid_gpu_space = 32,
liquid_driver_gpu_id = 0,
liquid_total_num_shards = 2,
# gpu_memory_utilization=0.7,
)
torch.cuda.empty_cache()
free_mem, _ = torch.cuda.mem_get_info()
print(f"After initializing model, allocated space on GPU 0: {torch.cuda.memory_allocated()/(1024**3):.2f} GB, reserved space on GPU 0: {torch.cuda.memory_reserved()/(1024**3):.2f} GB, free space: {free_mem/(1024**3):.2f}GB")
shards_weights = llm.llm_engine.model_executor.get_shards_weights(shard_ids=[1])
cpu_shards_weights = {}
for name, tensor in shards_weights.items():
cpu_shards_weights[name] = tensor.to("cpu")
# del tensor
# for name, tensor in shards_weights.items():
# del tensor
del shards_weights
torch.cuda.empty_cache()
free_mem, _ = torch.cuda.mem_get_info()
print(f"After sending weights to cpu and deleting original weights, allocated space on GPU 0: {torch.cuda.memory_allocated()/(1024**3):.2f} GB, reserved space on GPU 0: {torch.cuda.memory_reserved()/(1024**3):.2f} GB, free space: {free_mem/(1024**3):.2f}GB")
llm.llm_engine.model_executor.delete_shards_weights(shard_ids=[1])
received_shards_weights = {}
for name, _ in cpu_shards_weights.items():
received_shards_weights[name] = cpu_shards_weights[name].to("cuda")
torch.cuda.empty_cache()
free_mem, _ = torch.cuda.mem_get_info()
print(f"After recving weights from cpu, allocated space on GPU 0: {torch.cuda.memory_allocated()/(1024**3):.2f} GB, reserved space on GPU 0: {torch.cuda.memory_reserved()/(1024**3):.2f} GB, free space: {free_mem/(1024**3):.2f}GB")
llm.llm_engine.model_executor.append_shards_weights(shard_ids=[1], shards_weights=received_shards_weights)
torch.cuda.empty_cache()
free_mem, _ = torch.cuda.mem_get_info()
print(f"After appending, allocated space on GPU 0: {torch.cuda.memory_allocated()/(1024**3):.2f} GB, reserved space on GPU 0: {torch.cuda.memory_reserved()/(1024**3):.2f} GB, free space: {free_mem/(1024**3):.2f}GB")
if __name__ == '__main__':
main()