Skip to content

Commit

Permalink
Update model registry handles used in tests.
Browse files Browse the repository at this point in the history
  • Loading branch information
rmitsch committed Jan 26, 2024
1 parent cd082ed commit b83aa0a
Show file tree
Hide file tree
Showing 23 changed files with 82 additions and 55 deletions.
2 changes: 2 additions & 0 deletions spacy_llm/models/hf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
from .llama2 import llama2_hf
from .mistral import mistral_hf
from .openllama import openllama_hf
from .registry import huggingface_v1
from .stablelm import stablelm_hf

__all__ = [
"HuggingFace",
"dolly_hf",
"falcon_hf",
"huggingface_v1",
"llama2_hf",
"mistral_hf",
"openllama_hf",
Expand Down
6 changes: 4 additions & 2 deletions spacy_llm/models/hf/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ def huggingface_v1(
}

for model_cls, context_length in model_context_lengths.items():
if name in getattr(model_cls, "MODEL_NAMES", {}):
model_names = getattr(model_cls, "MODEL_NAMES")
if model_names and name in model_names.__args__:
return model_cls(
name=name,
config_init=config_init,
Expand All @@ -43,5 +44,6 @@ def huggingface_v1(
)

raise ValueError(
f"Name {name} could not be associated with any of the supported models. Please check https://spacy.io/api/large-language-models#models-hf to ensure the specified model name is correct."
f"Name {name} could not be associated with any of the supported models. Please check "
f"https://spacy.io/api/large-language-models#models-hf to ensure the specified model name is correct."
)
2 changes: 1 addition & 1 deletion spacy_llm/models/rest/openai/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
_DEFAULT_TEMPERATURE = 0.0


@registry.llm_models("spacy.OpenAI.v")
@registry.llm_models("spacy.OpenAI.v1")
def openai_v1(
name: str,
config: Dict[Any, Any] = SimpleFrozenDict(temperature=_DEFAULT_TEMPERATURE),
Expand Down
6 changes: 3 additions & 3 deletions spacy_llm/tests/models/test_dolly.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.Dolly.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "dolly-v2-3b",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -32,7 +32,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = "spacy.Dolly.v1"
@llm_models = "spacy.HuggingFace.v1"
name = "dolly-v2-3b"
"""

Expand Down Expand Up @@ -66,6 +66,6 @@ def test_invalid_model():
orig_config = Config().from_str(_NLP_CONFIG)
config = copy.deepcopy(orig_config)
config["components"]["llm"]["model"]["name"] = "dolly-the-sheep"
with pytest.raises(ValueError, match="unexpected value; permitted"):
with pytest.raises(ValueError, match="could not be associated"):
spacy.util.load_model_from_config(config, auto_fill=True)
torch.cuda.empty_cache()
4 changes: 2 additions & 2 deletions spacy_llm/tests/models/test_falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.Falcon.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "falcon-rw-1b",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -32,7 +32,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = "spacy.Falcon.v1"
@llm_models = "spacy.HuggingFace.v1"
name = "falcon-rw-1b"
"""

Expand Down
12 changes: 6 additions & 6 deletions spacy_llm/tests/models/test_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@

@pytest.mark.gpu
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
@pytest.mark.parametrize(
"model", (("spacy.Dolly.v1", "dolly-v2-3b"), ("spacy.Llama2.v1", "Llama-2-7b-hf"))
)
@pytest.mark.parametrize("model", ("dolly-v2-3b", "Llama-2-7b-hf"))
def test_device_config_conflict(model: Tuple[str, str]):
"""Test device configuration."""
nlp = spacy.blank("en")
model, name = model
cfg = {**_PIPE_CFG, **{"model": {"@llm_models": model, "name": name}}}
cfg = {
**_PIPE_CFG,
**{"model": {"@llm_models": "spacy.HuggingFace.v1", "name": model}},
}

# Set device only.
cfg["model"]["config_init"] = {"device": "cpu"} # type: ignore[index]
Expand Down Expand Up @@ -58,7 +58,7 @@ def test_torch_dtype():
nlp = spacy.blank("en")
cfg = {
**_PIPE_CFG,
**{"model": {"@llm_models": "spacy.Dolly.v1", "name": "dolly-v2-3b"}},
**{"model": {"@llm_models": "spacy.HuggingFace.v1", "name": "dolly-v2-3b"}},
}

# Should be converted to torch.float16.
Expand Down
4 changes: 2 additions & 2 deletions spacy_llm/tests/models/test_llama2.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.Llama2.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "Llama-2-7b-hf",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -32,7 +32,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = "spacy.Llama2.v1"
@llm_models = "spacy.HuggingFace.v1"
name = "Llama-2-7b-hf"
"""

Expand Down
4 changes: 2 additions & 2 deletions spacy_llm/tests/models/test_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.Mistral.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "Mistral-7B-v0.1",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -31,7 +31,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = "spacy.Mistral.v1"
@llm_models = "spacy.HuggingFace.v1"
name = "Mistral-7B-v0.1"
"""

Expand Down
4 changes: 2 additions & 2 deletions spacy_llm/tests/models/test_openllama.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.OpenLLaMA.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "open_llama_3b",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -32,7 +32,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = spacy.OpenLLaMA.v1
@llm_models = spacy.HuggingFace.v1
name = open_llama_3b
"""

Expand Down
7 changes: 4 additions & 3 deletions spacy_llm/tests/models/test_palm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from spacy_llm.models.rest.palm import palm_bison

from ...models.rest.palm.registry import google_v1
from ..compat import has_palm_key


Expand All @@ -11,7 +12,7 @@
@pytest.mark.parametrize("name", ("text-bison-001", "chat-bison-001"))
def test_palm_api_response_is_correct(name: str):
"""Check if we're getting the response from the correct structure"""
model = palm_bison(name=name)
model = google_v1(name=name)
prompt = "The number of stars in the universe is"
num_prompts = 3 # arbitrary number to check multiple inputs
responses = list(model([prompt] * num_prompts))
Expand All @@ -30,7 +31,7 @@ def test_palm_api_response_n_generations():
the very first output.
"""
candidate_count = 3
model = palm_bison(config={"candidate_count": candidate_count})
model = google_v1(config={"candidate_count": candidate_count})

prompt = "The number of stars in the universe is"
num_prompts = 3
Expand All @@ -57,4 +58,4 @@ def test_palm_error_unsupported_model():
"""Ensure graceful handling of error when model is not supported"""
incorrect_model = "x-gpt-3.5-turbo"
with pytest.raises(ValueError, match="Model 'x-gpt-3.5-turbo' is not supported"):
palm_bison(name=incorrect_model)
google_v1(name=incorrect_model)
6 changes: 3 additions & 3 deletions spacy_llm/tests/models/test_rest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

PIPE_CFG = {
"model": {
"@llm_models": "spacy.GPT-3-5.v2",
"@llm_models": "spacy.OpenAI.v1",
},
"task": {"@llm_tasks": "spacy.TextCat.v1", "labels": "POSITIVE,NEGATIVE"},
}
Expand Down Expand Up @@ -53,12 +53,12 @@ def test_initialization():
def test_model_error_handling():
"""Test error handling for wrong model."""
nlp = spacy.blank("en")
with pytest.raises(ValueError, match="Could not find function 'spacy.gpt-3.5x.v1'"):
with pytest.raises(ValueError, match="is not available"):
nlp.add_pipe(
"llm",
config={
"task": {"@llm_tasks": "spacy.NoOp.v1"},
"model": {"@llm_models": "spacy.gpt-3.5x.v1"},
"model": {"@llm_models": "spacy.OpenAI.v1", "name": "GPT-3.5-x"},
},
)

Expand Down
4 changes: 2 additions & 2 deletions spacy_llm/tests/models/test_stablelm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

_PIPE_CFG = {
"model": {
"@llm_models": "spacy.StableLM.v1",
"@llm_models": "spacy.HuggingFace.v1",
"name": "stablelm-base-alpha-3b",
},
"task": {"@llm_tasks": "spacy.NoOp.v1"},
Expand All @@ -31,7 +31,7 @@
@llm_tasks = "spacy.NoOp.v1"
[components.llm.model]
@llm_models = "spacy.StableLM.v1"
@llm_models = "spacy.HuggingFace.v1"
name = "stablelm-base-alpha-3b"
"""

Expand Down
9 changes: 6 additions & 3 deletions spacy_llm/tests/tasks/test_entity_linker.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,8 @@ def zeroshot_cfg_string():
@llm_tasks = "spacy.EntityLinker.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v1"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
config = {"temperature": 0}
[initialize]
Expand Down Expand Up @@ -179,7 +180,8 @@ def fewshot_cfg_string():
path = {str((Path(__file__).parent / "examples" / "entity_linker.yml"))}
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v1"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
config = {{"temperature": 0}}
[initialize]
Expand Down Expand Up @@ -224,7 +226,8 @@ def ext_template_cfg_string():
path = {str((Path(__file__).parent / "templates" / "entity_linker.jinja2"))}
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v1"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
config = {{"temperature": 0}}
[initialize]
Expand Down
9 changes: 6 additions & 3 deletions spacy_llm/tests/tasks/test_lemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ def zeroshot_cfg_string():
@llm_tasks = "spacy.Lemma.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand All @@ -81,7 +82,8 @@ def fewshot_cfg_string():
path = {str((Path(__file__).parent / "examples" / "lemma.yml"))}
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand All @@ -107,7 +109,8 @@ def ext_template_cfg_string():
path = {str((Path(__file__).parent / "templates" / "lemma.jinja2"))}
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down
3 changes: 2 additions & 1 deletion spacy_llm/tests/tasks/test_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ def fewshot_cfg_string_v3_lds():
@misc = "spacy.LowercaseNormalizer.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down
3 changes: 2 additions & 1 deletion spacy_llm/tests/tasks/test_raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ def zeroshot_cfg_string():
@llm_tasks = "spacy.Raw.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v3"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down
6 changes: 4 additions & 2 deletions spacy_llm/tests/tasks/test_rel.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ def zeroshot_cfg_string():
labels = "LivesIn,Visits"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
[initialize]
vectors = "en_core_web_md"
Expand Down Expand Up @@ -72,7 +73,8 @@ def fewshot_cfg_string():
path = {str(EXAMPLES_DIR / "rel.jsonl")}
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
[initialize]
vectors = "en_core_web_md"
Expand Down
3 changes: 2 additions & 1 deletion spacy_llm/tests/tasks/test_sentiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ def zeroshot_cfg_string():
@llm_tasks = "spacy.Sentiment.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down
6 changes: 4 additions & 2 deletions spacy_llm/tests/tasks/test_spancat.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ def fewshot_cfg_string():
@misc = "spacy.LowercaseNormalizer.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down Expand Up @@ -118,7 +119,8 @@ def ext_template_cfg_string():
@misc = "spacy.LowercaseNormalizer.v1"
[components.llm.model]
@llm_models = "spacy.GPT-3-5.v2"
@llm_models = "spacy.OpenAI.v1"
name = "gpt-3.5-turbo"
"""


Expand Down
Loading

0 comments on commit b83aa0a

Please sign in to comment.