Skip to content

Commit

Permalink
Fixed layernorm and logsoftmax ut error
Browse files Browse the repository at this point in the history
Type:  Bug Fix
Signed-off-by: Feiyue Chen <[email protected]>
  • Loading branch information
chenfeiyue-cfy committed Jul 22, 2024
1 parent 720fe03 commit 3e95e96
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 11 deletions.
14 changes: 5 additions & 9 deletions src/tim/vx/ops/layernormalization_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ TEST(LayerNorm, axis_0_shape_3_6_1_float) {
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;

tim::vx::ShapeType io_shape({3, 6, 1});
tim::vx::ShapeType param_shape({6});
tim::vx::ShapeType param_shape({3});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
io_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32,
Expand All @@ -54,11 +54,9 @@ TEST(LayerNorm, axis_0_shape_3_6_1_float) {
-6, 0, 6,
-7, 0, 7 };
std::vector<float> gamma = {
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f
};
std::vector<float> beta = {
.0f, .0f, .0f,
.0f, .0f, .0f
};
std::vector<float> golden = {
Expand Down Expand Up @@ -91,7 +89,7 @@ TEST(LayerNorm, axis_0_shape_2_3_6_1_float) {
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;

tim::vx::ShapeType io_shape({2, 3, 6, 1});
tim::vx::ShapeType param_shape({6});
tim::vx::ShapeType param_shape({2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
io_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32,
Expand All @@ -113,12 +111,10 @@ TEST(LayerNorm, axis_0_shape_2_3_6_1_float) {
-7, 7, -7, 7, -7, 7
};
std::vector<float> gamma = {
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f
1.0f, 1.0f
};
std::vector<float> beta = {
.0f, .0f, .0f,
.0f, .0f, .0f
.0f, .0f
};
std::vector<float> golden = {
-1.f, 1.f, -1.f, 1.f, -1.f, 1.f,
Expand Down Expand Up @@ -150,7 +146,7 @@ TEST(LayerNorm, axis_2_shape_4_2_3_1_float) {
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;

tim::vx::ShapeType io_shape({4, 2, 3, 1});
tim::vx::ShapeType param_shape({1,1,3,1});
tim::vx::ShapeType param_shape({1, 1, 3, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
io_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32,
Expand Down
6 changes: 4 additions & 2 deletions src/tim/vx/ops/logsoftmax_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
TEST(LogSoftmax, shape_6_1_float_axis_0) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;

tim::vx::ShapeType io_shape({6, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
Expand Down Expand Up @@ -57,12 +58,13 @@ TEST(LogSoftmax, shape_6_1_float_axis_0) {

std::vector<float> output(golden.size() * sizeof(float));
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f));
EXPECT_TRUE(ArraysMatch(golden, output, tolerance));
}

TEST(LogSoftmax, shape_3_6_1_float_axis_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;

tim::vx::ShapeType io_shape({3, 6, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
Expand Down Expand Up @@ -100,7 +102,7 @@ TEST(LogSoftmax, shape_3_6_1_float_axis_1) {

std::vector<float> output(golden.size() * sizeof(float));
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f));
EXPECT_TRUE(ArraysMatch(golden, output, tolerance));
}

TEST(LogSoftmax, shape_3_6_1_uint8_axis_1) {
Expand Down

0 comments on commit 3e95e96

Please sign in to comment.