forked from sjtu-medialab/Pioneer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
74 lines (55 loc) · 2.34 KB
/
utils.py
File metadata and controls
74 lines (55 loc) · 2.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import numpy as np
import torch
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_data):
observation, capacities , video_quality, audio_quality = batch_data
self.state = observation
self.action = capacities
batch_size, seq_length, state_size = observation.shape
self.next_state = torch.zeros(batch_size, seq_length, state_size)
self.next_state[:,:-1,:] = observation[:,1:,:]
last_recv_rate = observation[:,:,1*10-10]
last_loss = observation[:,:,11*10-10]
last_delay = observation[:,:,4*10-10]
self.reward = 0.5* last_recv_rate/(8*1e6) + video_quality/5.0 + audio_quality/5.0 - 0.5* last_loss - 0.3 * (last_delay / 500.0)
# + video_quality/5.0 + audio_quality/5.0
self.not_done = torch.zeros(batch_size, seq_length, 1) + 1
self.not_done[:,-1,:] = self.not_done[:,-1,:] - 1
return (
torch.FloatTensor(self.state).to(self.device),
torch.FloatTensor(self.action).to(self.device),
torch.FloatTensor(self.next_state).to(self.device),
torch.FloatTensor(self.reward).to(self.device),
torch.FloatTensor(self.not_done).to(self.device)
)
def convert_D4RL(self, dataset):
self.state = dataset['observations']
self.action = dataset['actions']
self.next_state = dataset['next_observations']
self.reward = dataset['rewards'].reshape(-1,1)
self.not_done = 1. - dataset['terminals'].reshape(-1,1)
self.size = self.state.shape[0]
def normalize_states(self, eps = 1e-3):
mean = self.state.mean(0,keepdims=True)
std = self.state.std(0,keepdims=True) + eps
self.state = (self.state - mean)/std
self.next_state = (self.next_state - mean)/std
return mean, std