Skip to content

Commit eb1f6f5

Browse files
committed
Enable GPU support; move model and data to GPU if available
1 parent 9dc8613 commit eb1f6f5

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

beginner_source/basics/optimization_tutorial.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,10 @@ def forward(self, x):
6464
logits = self.linear_relu_stack(x)
6565
return logits
6666

67-
model = NeuralNetwork()
67+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
68+
print(f"Using device: {device}")
69+
70+
model = NeuralNetwork().to(device)
6871

6972

7073
##############################################
@@ -153,6 +156,7 @@ def train_loop(dataloader, model, loss_fn, optimizer):
153156
# Unnecessary in this situation but added for best practices
154157
model.train()
155158
for batch, (X, y) in enumerate(dataloader):
159+
X, y = X.to(device), y.to(device)
156160
# Compute prediction and loss
157161
pred = model(X)
158162
loss = loss_fn(pred, y)
@@ -163,7 +167,7 @@ def train_loop(dataloader, model, loss_fn, optimizer):
163167
optimizer.zero_grad()
164168

165169
if batch % 100 == 0:
166-
loss, current = loss.item(), batch * batch_size + len(X)
170+
loss, current = loss.item(), batch * len(X)
167171
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
168172

169173

@@ -179,6 +183,7 @@ def test_loop(dataloader, model, loss_fn):
179183
# also serves to reduce unnecessary gradient computations and memory usage for tensors with requires_grad=True
180184
with torch.no_grad():
181185
for X, y in dataloader:
186+
X, y = X.to(device), y.to(device)
182187
pred = model(X)
183188
test_loss += loss_fn(pred, y).item()
184189
correct += (pred.argmax(1) == y).type(torch.float).sum().item()

0 commit comments

Comments
 (0)