File tree 2 files changed +58
-1
lines changed
2 files changed +58
-1
lines changed Original file line number Diff line number Diff line change @@ -15,6 +15,14 @@ set(CMAKE_CXX_STANDARD 17)
15
15
set (CMAKE_CXX_STANDARD_REQUIRED ON )
16
16
set (CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON )
17
17
18
+ # Detect if the architecture is ARM (AArch64)
19
+ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|ARM64|arm64|armv8|armv7)" )
20
+ message (STATUS "Building on ARM: Enabling -fPIC" )
21
+ set (CMAKE_POSITION_INDEPENDENT_CODE ON )
22
+ else ()
23
+ message (STATUS "Building on x86: No -fPIC required" )
24
+ endif ()
25
+
18
26
ucm_print_flags()
19
27
20
28
# Source files
@@ -170,4 +178,4 @@ target_link_libraries(test_inference PRIVATE ${TARGET_NAME})
170
178
# Put the test in the bin directory
171
179
set_target_properties (test_inference PROPERTIES
172
180
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR} /bin"
173
- )
181
+ )
Original file line number Diff line number Diff line change
1
+ ## 🚀 Setup
2
+
3
+ To ensure compatibility, this project uses ** llama.cpp version b4514** ``.
4
+
5
+ ### 1️⃣ Clone the Repository
6
+
7
+ ``` sh
8
+ git clone https://github.com/genta-technology/inference-personal
9
+ cd inference-personal
10
+ ```
11
+
12
+ ### 2️⃣ Setup ` llama.cpp `
13
+
14
+ ``` sh
15
+ cd external
16
+ git clone https://github.com/ggml-org/llama.cpp.git
17
+ cd llama.cpp
18
+ git checkout b4514
19
+ git switch -c b4514
20
+ git branch # Ensure the branch is `b4514` or lower
21
+ git submodule update --init --recursive
22
+ git lfs install
23
+ git lfs pull
24
+ ```
25
+
26
+ ## 🛠️ Build Instructions
27
+
28
+ ### 1️⃣ Create Build Directory
29
+
30
+ ``` sh
31
+ cd ../..
32
+ mkdir build && cd build
33
+ ```
34
+
35
+ ### 2️⃣ Compile the Project
36
+
37
+ ``` sh
38
+ cmake ..
39
+ make
40
+ ```
41
+
42
+ ## ✅ Running Tests
43
+
44
+ After a successful build, run the test binary:
45
+
46
+ ``` sh
47
+ ./bin/test
48
+ ```
49
+
You can’t perform that action at this time.
0 commit comments