diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index f98ed37..132c312 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -1,6 +1,6 @@ name: Pylint -on: [ push ] +on: [ pull_request ] jobs: build: diff --git a/tests/test_local_llm.py b/tests/test_local_llm.py index c3d9142..28bee0d 100644 --- a/tests/test_local_llm.py +++ b/tests/test_local_llm.py @@ -2,17 +2,28 @@ class DummyModel: - def __call__(self, *_args, **_kwargs): - return {"choices": [{"text": "ok"}]} + """Simple callable model stub for local LLM tests.""" + def __init__(self): + self.last_kwargs = {} -def test_normalize_max_tokens(): - assert local_llm._normalize_max_tokens(-1) == local_llm.LOCAL_LLM_MAX_TOKENS - assert local_llm._normalize_max_tokens("bad") == local_llm.LOCAL_LLM_MAX_TOKENS - assert local_llm._normalize_max_tokens(1_000_000) == local_llm.LOCAL_LLM_CONTEXT + def __call__(self, *_args, **kwargs): + self.last_kwargs = kwargs + return {"choices": [{"text": "ok"}]} def test_ask_local_llm_empty_prompt(monkeypatch): - monkeypatch.setattr(local_llm, "LLM_MODEL", DummyModel()) + dummy_model = DummyModel() + monkeypatch.setattr(local_llm, "LLM_MODEL", dummy_model) result = local_llm.ask_local_llm(" ") assert result == "[Local LLM error: Empty prompt]" + + +def test_ask_local_llm_normalizes_max_tokens(monkeypatch): + dummy_model = DummyModel() + monkeypatch.setattr(local_llm, "LLM_MODEL", dummy_model) + + result = local_llm.ask_local_llm("build plan", max_tokens=-1) + + assert result == "ok" + assert dummy_model.last_kwargs["max_tokens"] == local_llm.LOCAL_LLM_MAX_TOKENS \ No newline at end of file