@@ -32,6 +32,7 @@ tvm::relay::Var TVMCompiler::convertToRelay(Value* val, TVMContext ctx) {
3232 auto optional_ivalue = toIValue (val);
3333 if (optional_ivalue.has_value ()) {
3434 if (optional_ivalue.value ().isTensor ()) {
35+ auto t = optional_ivalue.value ().toTensor ();
3536 val->inferTypeFrom (optional_ivalue.value ().toTensor ());
3637 } else {
3738 auto expr = convertToRelay (optional_ivalue.value (), ctx)
@@ -45,15 +46,23 @@ tvm::relay::Var TVMCompiler::convertToRelay(Value* val, TVMContext ctx) {
4546 if (val->isCompleteTensor ()) {
4647 // Ensure if complete tensor has device type then it is CPU
4748 // otherwise it is assume to be CPU.
48- auto pt_t = val->type ()->cast <CompleteTensorType>();
49- auto device_type = pt_t ->device ();
49+ auto pt_t = val->type ()->cast <ProfiledTensorType>();
50+ TORCH_INTERNAL_ASSERT (pt_t );
51+ auto optional_device_type = pt_t ->device ();
52+ TORCH_INTERNAL_ASSERT (optional_device_type);
53+ auto device_type = optional_device_type.value ();
5054 AT_CHECK (device_type == at::DeviceType::CPU,
5155 " Expected CPU device type but got:" , device_type);
5256 tvm::Array<tvm::relay::IndexExpr> sizes;
53- for (const auto & size : pt_t ->sizes ()) {
54- sizes.push_back (tvm::relay::IndexExpr (static_cast <int32_t >(size)));
57+ const auto & varying_sizes = pt_t ->sizes ();
58+ for (const auto & optional_size : varying_sizes.sizes ()) {
59+ TORCH_INTERNAL_ASSERT (optional_size);
60+ sizes.push_back (tvm::relay::IndexExpr (
61+ static_cast <int32_t >(optional_size.value ())));
5562 }
56- at::ScalarType pt_type = pt_t ->scalarType ();
63+ auto optional_dtype = pt_t ->scalarType ();
64+ TORCH_INTERNAL_ASSERT (optional_dtype);
65+ at::ScalarType pt_type = optional_dtype.value ();
5766 auto t = tvm::relay::TensorTypeNode::make (sizes, scalarTypeToTVMType (pt_type));
5867 auto v = tvm::relay::VarNode::make (
5968 val->debugName () +
0 commit comments