Skip to content

Commit

Permalink
got stuck on CMake
Browse files Browse the repository at this point in the history
  • Loading branch information
crasm committed Jan 20, 2024
1 parent 6c459d6 commit d8b8ec6
Show file tree
Hide file tree
Showing 7 changed files with 39 additions and 26 deletions.
7 changes: 5 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ TEST_TARGETS = \
tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt \
tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama \
tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama tests/test-tokenizer-1-bpe tests/test-rope \
tests/test-backend-ops tests/test-autorelease
tests/test-backend-ops tests/test-model-load-cancel tests/test-autorelease

# Code coverage output files
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
Expand Down Expand Up @@ -748,5 +748,8 @@ tests/test-c.o: tests/test-c.c llama.h
tests/test-backend-ops: tests/test-backend-ops.cpp ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

tests/test-autorelease: tests/test-autorelease.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
tests/test-model-load-cancel: tests/test-model-load-cancel.cpp ggml.o llama.o tests/get_model.cpp $(COMMON_DEPS) $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

tests/test-autorelease: tests/test-autorelease.cpp ggml.o llama.o tests/get_model.cpp $(COMMON_DEPS) $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
2 changes: 0 additions & 2 deletions ci/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -238,8 +238,6 @@ function gg_run_open_llama_3b_v2 {

wiki_test_60="${path_wiki}/wiki.test-60.raw"

./bin/test-autorelease ${model_f16}

./bin/quantize ${model_f16} ${model_q8_0} q8_0
./bin/quantize ${model_f16} ${model_q4_0} q4_0
./bin/quantize ${model_f16} ${model_q4_1} q4_1
Expand Down
2 changes: 1 addition & 1 deletion tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,11 @@ llama_build_and_test_executable(test-llama-grammar.cpp)
llama_build_and_test_executable(test-grad0.cpp)
# llama_build_and_test_executable(test-opt.cpp) # SLOW
llama_build_and_test_executable(test-backend-ops.cpp)
llama_build_and_test_executable(test-autorelease.cpp)

llama_build_and_test_executable(test-rope.cpp)

llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
llama_build_and_test_executable_with_label(test-autorelease.cpp "model")

# dummy executable - not installed
get_filename_component(TEST_TARGET test-c.c NAME_WE)
Expand Down
26 changes: 26 additions & 0 deletions tests/get_model.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#include <cstdio>
#include <cstdlib>
#include <cstring>

#include "get_model.h"

char * get_model_or_exit(int argc, char *argv[]) {
char * makelevel = getenv("MAKELEVEL");
if (makelevel != nullptr && atoi(makelevel) > 0) {
fprintf(stderr, "Detected being run in Make. Skipping this test.\n");
exit(EXIT_SUCCESS);
}

char * model_path;
if (argc > 1) {
model_path = argv[1];
} else {
model_path = getenv("GG_RUN_CTEST_MODELFILE");
if (!model_path || strlen(model_path) == 0) {
fprintf(stderr, "error: no model file provided\n");
exit(EXIT_FAILURE);
}
}

return model_path;
}
1 change: 1 addition & 0 deletions tests/get_model.h
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
char * get_model_or_exit(int, char*[]);
12 changes: 4 additions & 8 deletions tests/test-autorelease.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,15 @@
#include <thread>

#include "llama.h"
#include "get_model.h"

// This creates a new context inside a pthread and then tries to exit cleanly.
int main(int argc, char ** argv) {
if (argc < 2) {
printf("Usage: %s model.gguf\n", argv[0]);
return 0; // intentionally return success
}
auto * model_path = get_model_or_exit(argc, argv);

const std::string fname = argv[1];

std::thread([&fname]() {
std::thread([&model_path]() {
llama_backend_init(false);
auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params());
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
llama_free(ctx);
llama_free_model(model);
Expand Down
15 changes: 2 additions & 13 deletions tests/test-model-load-cancel.cpp
Original file line number Diff line number Diff line change
@@ -1,21 +1,10 @@
#include "llama.h"
#include "get_model.h"

#include <cstdio>
#include <cstdlib>
#include <cstring>

int main(int argc, char *argv[] ) {
char * model_path;
if (argc > 1) {
model_path = argv[1];
} else {
model_path = getenv("GG_RUN_CTEST_MODELFILE");
if (!model_path || strlen(model_path) == 0) {
fprintf(stderr, "error: no model file provided\n");
exit(1);
}
}

auto * model_path = get_model_or_exit(argc, argv);
auto * file = fopen(model_path, "r");
if (file == nullptr) {
fprintf(stderr, "no model at '%s' found\n", model_path);
Expand Down

0 comments on commit d8b8ec6

Please sign in to comment.