diff --git a/inference.py b/inference.py index 13b1658..9a2fa7b 100644 --- a/inference.py +++ b/inference.py @@ -1,3 +1,4 @@ +import os from ctransformers import AutoModelForCausalLM, AutoConfig @@ -31,7 +32,7 @@ def generate(llm, system_prompt, user_prompt): seed=42, reset=True, # reset history (cache) stream=True, # streaming per word/token - threads=24, # adjust for your CPU + threads=os.cpu_count() / 2, # adjust for your CPU stop=["<|im_end|>", "|<"], )