Skip to content

Commit dfca1e4

Browse files
xuhdevfacebook-github-bot
authored andcommitted
Replace all AT_ASSERTM under c10/ (except Exception.h) (pytorch#50843)
Summary: Pull Request resolved: pytorch#50843 AT_ASSERTM is deprecated and should be replaced by either TORCH_CHECK or TORCH_INTERNAL_ASSERT, depending on the situation. Test Plan: Imported from OSS Reviewed By: ailzhang Differential Revision: D26074365 Pulled By: ezyang fbshipit-source-id: 46e13588fad4e24828f3cc99635e9cb2223a6c2c
1 parent c41ca4a commit dfca1e4

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

c10/core/Allocator.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ void SetAllocator(at::DeviceType t, at::Allocator* alloc, uint8_t priority) {
3030

3131
at::Allocator* GetAllocator(const at::DeviceType& t) {
3232
auto* alloc = allocator_array[static_cast<int>(t)];
33-
AT_ASSERTM(alloc, "Allocator for ", t, " is not set.");
33+
TORCH_INTERNAL_ASSERT(alloc, "Allocator for ", t, " is not set.");
3434
return alloc;
3535
}
3636

c10/core/StorageImpl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ struct C10_API StorageImpl final : public c10::intrusive_ptr_target {
2323
received_cuda_(false),
2424
allocator_(allocator) {
2525
if (resizable) {
26-
AT_ASSERTM(
26+
TORCH_INTERNAL_ASSERT(
2727
allocator_, "For resizable storage, allocator must be provided");
2828
}
2929
}

c10/core/TensorOptions.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,7 @@ inline DeviceType computeDeviceType(DispatchKey tid) {
708708
} else if (tid == DispatchKey::QuantizedXPU) {
709709
return DeviceType::XPU;
710710
} else {
711-
AT_ASSERTM(false, "Unknown DispatchKey: ", tid);
711+
TORCH_INTERNAL_ASSERT(false, "Unknown DispatchKey: ", tid);
712712
}
713713
}
714714

c10/cuda/CUDAStream.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static StreamId CUDAStream_getStreamId(const LeakyStreamInternals* ptr) {
175175
StreamIdType::HIGH, ptr - high_priority_streams[device_index].data());
176176
}
177177

178-
AT_ASSERTM(
178+
TORCH_INTERNAL_ASSERT(
179179
0,
180180
"Could not compute stream ID for ",
181181
ptr,
@@ -197,7 +197,7 @@ static void initGlobalStreamState() {
197197
num_gpus = device_count();
198198
// Check if the number of GPUs matches the expected compile-time max number
199199
// of GPUs.
200-
AT_ASSERTM(
200+
TORCH_CHECK(
201201
num_gpus <= C10_COMPILE_TIME_MAX_GPUS,
202202
"Number of CUDA devices on the machine is larger than the compiled "
203203
"max number of gpus expected (",
@@ -269,7 +269,7 @@ LeakyStreamInternals* CUDAStream_internals(CUDAStream s) {
269269
size_t si = streamIdIndex(s.unwrap().id());
270270
switch (st) {
271271
case StreamIdType::DEFAULT:
272-
AT_ASSERTM(
272+
TORCH_INTERNAL_ASSERT(
273273
si == 0,
274274
"Unrecognized stream ",
275275
s.unwrap(),
@@ -284,7 +284,7 @@ LeakyStreamInternals* CUDAStream_internals(CUDAStream s) {
284284
case StreamIdType::HIGH:
285285
return &high_priority_streams[device_index][si];
286286
default:
287-
AT_ASSERTM(
287+
TORCH_INTERNAL_ASSERT(
288288
0,
289289
"Unrecognized stream ",
290290
s.unwrap(),

0 commit comments

Comments
 (0)