Skip to content

Commit 5400c22

Browse files
committed
Update readme
1 parent 17d1465 commit 5400c22

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

Diff for: README.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ import d3rlpy
128128
dataset, env = d3rlpy.datasets.get_d4rl('hopper-medium-v0')
129129

130130
# prepare algorithm
131-
cql = d3rlpy.algos.CQLConfig().create(device='cuda:0')
131+
cql = d3rlpy.algos.CQLConfig(compile_graph=True).create(device='cuda:0')
132132

133133
# train
134134
cql.fit(
@@ -157,6 +157,7 @@ dataset, env = d3rlpy.datasets.get_atari_transitions(
157157
cql = d3rlpy.algos.DiscreteCQLConfig(
158158
observation_scaler=d3rlpy.preprocessing.PixelObservationScaler(),
159159
reward_scaler=d3rlpy.preprocessing.ClipRewardScaler(-1.0, 1.0),
160+
compile_graph=True,
160161
).create(device='cuda:0')
161162

162163
# start training
@@ -180,7 +181,7 @@ env = gym.make('Hopper-v3')
180181
eval_env = gym.make('Hopper-v3')
181182

182183
# prepare algorithm
183-
sac = d3rlpy.algos.SACConfig().create(device='cuda:0')
184+
sac = d3rlpy.algos.SACConfig(compile_graph=True).create(device='cuda:0')
184185

185186
# prepare replay buffer
186187
buffer = d3rlpy.dataset.create_fifo_replay_buffer(limit=1000000, env=env)

0 commit comments

Comments
 (0)