Skip to content

Commit

Permalink
Add use_mmap flag to server
Browse files Browse the repository at this point in the history
  • Loading branch information
abetlen committed Apr 19, 2023
1 parent 207ebbc commit e4647c7
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion llama_cpp/server/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@ class Settings(BaseSettings):
model: str
n_ctx: int = 2048
n_batch: int = 8
n_threads: int = ((os.cpu_count() or 2) // 2) or 1
n_threads: int = max((os.cpu_count() or 2) // 2, 1)
f16_kv: bool = True
use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out...
use_mmap: bool = True
embedding: bool = True
last_n_tokens_size: int = 64
logits_all: bool = False
Expand All @@ -54,6 +55,7 @@ class Settings(BaseSettings):
settings.model,
f16_kv=settings.f16_kv,
use_mlock=settings.use_mlock,
use_mmap=settings.use_mmap,
embedding=settings.embedding,
logits_all=settings.logits_all,
n_threads=settings.n_threads,
Expand Down

0 comments on commit e4647c7

Please sign in to comment.