Skip to content

Commit

Permalink
CHORE: Specify xoscar version (#457)
Browse files Browse the repository at this point in the history
  • Loading branch information
aresnow1 authored Sep 16, 2023
1 parent f8df104 commit fd64f71
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 1 deletion.
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ zip_safe = False
include_package_data = True
packages = find:
install_requires =
xoscar
xoscar>=0.1.2
xorbits
gradio>=3.39.0
click
Expand Down
8 changes: 8 additions & 0 deletions xinference/model/llm/ggml/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,15 @@ def _convert_ggml_to_gguf(self, model_path: str) -> str:

def load(self):
try:
import llama_cpp
from llama_cpp import Llama

if llama_cpp.__version__ < "0.2.0":
raise ValueError(
"The llama_cpp version must be greater than 0.2.0. "
"Please upgrade your version via `pip install -U llama_cpp` or refer to "
"https://github.com/abetlen/llama-cpp-python#installation-with-openblas--cublas--clblast--metal."
)
except ImportError:
error_message = "Failed to import module 'llama_cpp'"
installation_guide = [
Expand Down

0 comments on commit fd64f71

Please sign in to comment.