Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/RunTorchModel.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ cd build-light
cmake -DCMAKE_CXX_COMPILER=/usr/bin/c++ \
-DONNX_MLIR_ENABLE_PYRUNTIME_LIGHT=ON \
..
make OMCreatONNXMLIRTOrchPackage
make OMCreateONNXMLIRTorchPackage
pip3 install -e src/Runtime/python/onnxmlirtorch
```
## Install from pip repository
Expand Down
3 changes: 2 additions & 1 deletion src/Runtime/python/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,10 @@ add_custom_target(OMCreatePyRuntimePackage
)

# Target to prepare onnxmlirtorch package
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/onnxmlirtorch/src/onnxmlirtorch/libs)
add_custom_target(OMCreateONNXMLIRTorchPackage
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/onnxmlirtorch ${CMAKE_CURRENT_BINARY_DIR}
COMMAND cp ${ONNX_MLIR_BIN_ROOT}/${CMAKE_BUILD_TYPE}/lib/PyRuntimeC.*.so ${CMAKE_CURRENT_BINARY_DIR}/onnxmlirtorch/src/onnxmlirtorch/
COMMAND cp ${ONNX_MLIR_BIN_ROOT}/${CMAKE_BUILD_TYPE}/lib/PyRuntimeC.*.so ${CMAKE_CURRENT_BINARY_DIR}/onnxmlirtorch/src/onnxmlirtorch/libs
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing libs folder so that OMExecutionSession was not found.

COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/onnxmlirdocker.py ${CMAKE_CURRENT_BINARY_DIR}/onnxmlirtorch/src/onnxmlirtorch
COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/PyRuntime.py ${CMAKE_CURRENT_BINARY_DIR}/onnxmlirtorch/src/onnxmlirtorch
DEPENDS PyRuntimeC
Expand Down
4 changes: 2 additions & 2 deletions src/Runtime/python/PyRuntime.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
import pkgutil

if __package__ == "onnxmlir" or __package__ == "onnxmlirtorch":
loader = pkgutil.get_loader("onnxmlir")
loader = pkgutil.get_loader(__package__)
PyRuntimeC_module = os.path.join(
os.path.dirname(loader.get_filename("onnxmlir")), "libs"
os.path.dirname(loader.get_filename(__package__)), "libs"
)
sys.path.append(PyRuntimeC_module)

Expand Down
5 changes: 3 additions & 2 deletions src/Runtime/python/onnxmlirdocker.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class InferenceSession:
def __init__(self, model_path, **kwargs):
self.debug = False
self.session = None
self.output_dir = tempfile.TemporaryDirectory()
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We want to keep this temp folder, so move it to the class member. Otherwise, the temp folder is cleaned up automatically.

self.handleParameters(model_path, **kwargs)
if self.session is not None:
return
Expand Down Expand Up @@ -117,7 +118,7 @@ def handleParameters(self, model_path, **kwargs):
self.compiled_model += ".so"
self.output_dirname = os.path.dirname(self.compiled_model)
else:
self.output_dirname = tempfile.TemporaryDirectory().name
self.output_dirname = self.output_dir.name
self.compiled_model = os.path.join(
self.output_dirname, self.model_basename.removesuffix(self.model_suffix)
)
Expand Down Expand Up @@ -292,7 +293,7 @@ def Compile(self):

def getSession(self):
# When the script is used in package onnxmlir, the files to be imported
# are within the package. Path in the pakcage should be used.
# are within the package. Path in the package should be used.
# Otherwise, env variable ONNX_MLIR_HOME is used to for import path
if __package__ == "onnxmlir" or __package__ == "onnxmlirtorch":
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,16 @@ class config:
cache_size = 3


glocalSessionCache = SessionCache(config.cache_size)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Make it global. Otherwise, the cache is local to each pytorch call to the backend.



class ONNXMLIRTorch:
def __init__(self, torch_model, **kwargs):
self.torch_model = torch_model
# Temporary directory
self.workdir = tempfile.TemporaryDirectory()
self.default_model_name = "model"
self.sessionCache = SessionCache(config.cache_size)
self.sessionCache = glocalSessionCache
if "compile_tag" in kwargs.keys():
self.tag = kwargs["compile_tag"]
else:
Expand Down Expand Up @@ -195,5 +198,5 @@ def forward(self, *args, **kwargs):
_, sess = cached_session

# Run the inference
outputs = sess.run(None, np_args)
outputs = sess.run(np_args)
return [torch.from_numpy(output) for output in outputs]
4 changes: 2 additions & 2 deletions src/Runtime/python/onnxmlirtorch/tests/torch_compile_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,13 @@ def forward(self, x, y):
output = opt_mod(input, input)
print("output: ", output)


# Second inference
# Second inference: different input shapes, so recompile the model.
input1 = torch.randn(3)
input2 = torch.randn(3)
output1 = opt_mod(input1, input2)
print("output: ", output1)

# Third inference: reuse the compiled .so in the cache: no recompilation.
input3 = torch.randn(2)
output2 = opt_mod(input3, input3)
print("output: ", output2)
Loading