Skip to content

Commit

Permalink
Release llamafile v0.8.10
Browse files Browse the repository at this point in the history
  • Loading branch information
jart committed Jul 23, 2024
1 parent 115315c commit f7c6ef4
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 5 deletions.
6 changes: 3 additions & 3 deletions build/config.mk
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#── vi: set noet ft=make ts=8 sw=8 fenc=utf-8 :vi ────────────────────┘

PREFIX = /usr/local
COSMOCC = .cosmocc/3.5.9
COSMOCC = .cosmocc/3.6.0
TOOLCHAIN = $(COSMOCC)/bin/cosmo

AR = $(TOOLCHAIN)ar
Expand Down Expand Up @@ -52,5 +52,5 @@ clean:; rm -rf o
.PHONY: distclean
distclean:; rm -rf o .cosmocc

.cosmocc/3.5.9:
build/download-cosmocc.sh $@ 3.5.9 1f66831de4bf2d82e138d8993e9ee84a7559afc47aeeb2e2de51872401790a0a
.cosmocc/3.6.0:
build/download-cosmocc.sh $@ 3.6.0 4918c45ac3e0972ff260e2a249e25716881e39fb679d5e714ae216a2ef6c3f7e
2 changes: 1 addition & 1 deletion llama.cpp/ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -9709,6 +9709,7 @@ static __device__ void rope_yarn(
}
*cos_theta = cosf(theta) * mscale;
*sin_theta = sinf(theta) * mscale;
(void)ggml_cuda_hmax;
}

// rope == RoPE == rotary positional embedding
Expand Down Expand Up @@ -13856,7 +13857,6 @@ GGML_API GGML_CALL int ggml_backend_cuda_reg_devices() {
(void)exit_;
(void)fast_fp16_available;
(void)fp16_mma_available;
(void)ggml_cuda_hmax;
(void)set_ggml_graph_node_properties;
(void)ggml_graph_node_has_matching_properties;
}
2 changes: 1 addition & 1 deletion llamafile/version.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#define LLAMAFILE_MAJOR 0
#define LLAMAFILE_MINOR 8
#define LLAMAFILE_PATCH 9
#define LLAMAFILE_PATCH 10
#define LLAMAFILE_VERSION \
(100000000 * LLAMAFILE_MAJOR + 1000000 * LLAMAFILE_MINOR + LLAMAFILE_PATCH)

Expand Down

0 comments on commit f7c6ef4

Please sign in to comment.