Skip to content

Commit f6895ff

Browse files
committed
update
1 parent c7b2bd7 commit f6895ff

File tree

1 file changed

+87
-0
lines changed

1 file changed

+87
-0
lines changed

tests/cpp/test_tensor.cc

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,11 @@
1717
* under the License.
1818
*/
1919
#include <gtest/gtest.h>
20+
#include <tvm/ffi/c_api.h>
2021
#include <tvm/ffi/container/tensor.h>
22+
#include <tvm/ffi/extra/c_env_api.h>
23+
24+
#include <cstddef>
2125

2226
namespace {
2327

@@ -188,4 +192,87 @@ TEST(Tensor, TensorView) {
188192
EXPECT_EQ(tensor_view2.dtype().lanes, 1);
189193
}
190194

195+
TEST(Tensor, DLPackExchangeAPI) {
196+
// Get the DLPackExchangeAPI struct pointer
197+
const DLPackExchangeAPI* api = TVMFFIGetDLPackExchangeAPI();
198+
199+
// step 1: Verify API struct is valid
200+
ASSERT_NE(api, nullptr);
201+
202+
// step 2: Verify version information
203+
EXPECT_EQ(api->version.major, DLPACK_MAJOR_VERSION);
204+
EXPECT_EQ(api->version.minor, DLPACK_MINOR_VERSION);
205+
206+
// step 3: Verify prev_version_api
207+
EXPECT_EQ(api->prev_version_api, nullptr);
208+
209+
// step 4: Verify all function pointers are set
210+
EXPECT_NE(api->managed_tensor_allocator, nullptr);
211+
EXPECT_NE(api->managed_tensor_from_py_object_no_sync, nullptr);
212+
EXPECT_NE(api->managed_tensor_to_py_object_no_sync, nullptr);
213+
EXPECT_NE(api->dltensor_from_py_object_no_sync, nullptr);
214+
EXPECT_NE(api->current_work_stream, nullptr);
215+
216+
// step 5: Test managed_tensor_allocator function
217+
{
218+
DLTensor prototype;
219+
int64_t shape_data[] = {2, 3, 4};
220+
prototype.shape = shape_data;
221+
prototype.ndim = 3;
222+
prototype.dtype = DLDataType({kDLFloat, 32, 1});
223+
prototype.device = DLDevice({kDLCPU, 0});
224+
prototype.strides = nullptr;
225+
prototype.byte_offset = 0;
226+
prototype.data = nullptr;
227+
228+
DLManagedTensorVersioned* allocated = nullptr;
229+
struct ErrorCtx {
230+
std::string kind;
231+
std::string message;
232+
static void SetError(void* ctx, const char* kind, const char* msg) {
233+
auto* error_ctx = static_cast<ErrorCtx*>(ctx);
234+
error_ctx->kind = kind;
235+
error_ctx->message = msg;
236+
}
237+
} error_ctx;
238+
239+
int ret = api->managed_tensor_allocator(&prototype, &allocated, &error_ctx, ErrorCtx::SetError);
240+
241+
EXPECT_EQ(ret, -1); // Should fail because no allocator is set
242+
EXPECT_EQ(error_ctx.kind, "NoAllocatorError");
243+
244+
// Now test with allocator set
245+
TVMFFIEnvSetTensorAllocator(TestDLPackTensorAllocator, 0, nullptr);
246+
ret = api->managed_tensor_allocator(&prototype, &allocated, &error_ctx, ErrorCtx::SetError);
247+
248+
EXPECT_EQ(ret, 0);
249+
ASSERT_NE(allocated, nullptr);
250+
EXPECT_EQ(allocated->dl_tensor.ndim, 3);
251+
EXPECT_EQ(allocated->dl_tensor.shape[0], 2);
252+
EXPECT_EQ(allocated->dl_tensor.shape[1], 3);
253+
EXPECT_EQ(allocated->dl_tensor.shape[2], 4);
254+
EXPECT_EQ(allocated->dl_tensor.dtype.code, kDLFloat);
255+
EXPECT_EQ(allocated->dl_tensor.dtype.bits, 32);
256+
EXPECT_EQ(allocated->dl_tensor.device.device_type, kDLCPU);
257+
EXPECT_NE(allocated->dl_tensor.data, nullptr);
258+
259+
// Clean up
260+
if (allocated->deleter) {
261+
allocated->deleter(allocated);
262+
}
263+
264+
// Reset allocator
265+
TVMFFIEnvSetTensorAllocator(nullptr, 0, nullptr);
266+
}
267+
268+
// step 6: Test current_work_stream function
269+
{
270+
void* stream = nullptr;
271+
int ret = api->current_work_stream(kDLCPU, 0, &stream);
272+
273+
EXPECT_EQ(ret, 0);
274+
// For CPU, stream can be NULL or any value - just verify no crash
275+
}
276+
}
277+
191278
} // namespace

0 commit comments

Comments
 (0)