Skip to content

Commit

Permalink
fix merge conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
xadupre committed Jun 7, 2024
1 parent 0258037 commit cb76886
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 10 deletions.
3 changes: 0 additions & 3 deletions operators/cuda/cuda_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,8 @@
#include "cuda/add_mul.h"
#include "cuda/fast_gelu.h"
#include "cuda/negxplus1.h"
<<<<<<< HEAD
#include "cuda/scatter_nd_of_shape.h"
=======
#include "cuda/transpose_cast.h"
>>>>>>> 79f3b048d4d195b6684f2d1b6ca5bfe1ab9ea8d6
#endif

FxLoadCustomOpFactory LoadCustomOpClasses_Contrib = []() -> CustomOpArray& {
Expand Down
7 changes: 0 additions & 7 deletions test/cuda/test_cudaops.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,12 +169,6 @@ def _negxplus1_cuda(self, itype):
got = sess.run(None, feeds1)[0]
assert_almost_equal(expected, got, decimal=5)

<<<<<<< HEAD
@unittest.skipIf(not has_cuda(), reason="cuda not available")
def test_cuda_negxplus1(self):
self._negxplus1_cuda(TensorProto.FLOAT)
self._negxplus1_cuda(TensorProto.FLOAT16)
=======
@unittest.skipIf(not has_cuda(), reason="CUDA is missing")
def test_cuda_negxplus1(self):
self._negxplus1_cuda(TensorProto.FLOAT)
Expand Down Expand Up @@ -289,7 +283,6 @@ def test_add_shared_input_cuda_broadcast2(self):
shapeb=(3, 2, 3),
shapec=(3, 2, 3),
)
>>>>>>> 04029e13f7998574670e2bc7eb500825800654fa

def _scatternd_of_shape_optimize_cuda(self, optimize, dim3, itype):
indices_shape = ["i", "j", 1] if dim3 else ["j", 1]
Expand Down

0 comments on commit cb76886

Please sign in to comment.