Skip to content
61 changes: 23 additions & 38 deletions .github/scripts/cla_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import os
import re
import sys

import requests

# Configuration
Expand Down Expand Up @@ -85,11 +86,7 @@ def load_cla_signers() -> dict:
sys.exit(1)


def is_signer(
username: str | None,
email: str,
signers: dict
) -> tuple[bool, str | None]:
def is_signer(username: str | None, email: str, signers: dict) -> tuple[bool, str | None]:
"""
Check if a user has signed the CLA.
Returns (is_signed, signing_entity).
Expand Down Expand Up @@ -129,23 +126,15 @@ def is_signer(
return False, None


def get_pr_authors(
owner: str,
repo: str,
pr_number: int,
token: str
) -> list[dict]:
def get_pr_authors(owner: str, repo: str, pr_number: int, token: str) -> list[dict]:
"""
Get all unique authors from PR commits.
Returns list of {username, email, name, source}.
"""
authors = {}

# Get PR commits
commits = github_request(
f"repos/{owner}/{repo}/pulls/{pr_number}/commits?per_page=100",
token
)
commits = github_request(f"repos/{owner}/{repo}/pulls/{pr_number}/commits?per_page=100", token)

for commit in commits:
sha = commit["sha"]
Expand All @@ -167,7 +156,7 @@ def get_pr_authors(
"username": author_username,
"email": author_email,
"name": author_name,
"source": f"commit {sha[:7]}"
"source": f"commit {sha[:7]}",
}

# Committer (if different)
Expand All @@ -185,7 +174,7 @@ def get_pr_authors(
"username": committer_username,
"email": committer_email,
"name": committer_name,
"source": f"committer {sha[:7]}"
"source": f"committer {sha[:7]}",
}

# Co-authors from commit message
Expand All @@ -197,7 +186,7 @@ def get_pr_authors(
"username": None,
"email": co_email,
"name": co_name,
"source": f"co-author {sha[:7]}"
"source": f"co-author {sha[:7]}",
}

return list(authors.values())
Expand All @@ -209,7 +198,7 @@ def post_comment(
pr_number: int,
token: str,
missing_authors: list[dict],
signed_authors: list[tuple[dict, str]]
signed_authors: list[tuple[dict, str]],
) -> None:
"""Post or update CLA status comment on PR."""
# Build comment body
Expand Down Expand Up @@ -250,8 +239,7 @@ def post_comment(

# Check for existing CLA comment to update
comments = github_request(
f"repos/{owner}/{repo}/issues/{pr_number}/comments?per_page=100",
token
f"repos/{owner}/{repo}/issues/{pr_number}/comments?per_page=100", token
)

cla_comment_id = None
Expand All @@ -269,23 +257,17 @@ def post_comment(
requests.patch(
f"{GITHUB_API}/repos/{owner}/{repo}/issues/comments/{cla_comment_id}",
headers=headers,
json={"body": comment_body}
json={"body": comment_body},
)
else:
# Create new comment
github_post(
f"repos/{owner}/{repo}/issues/{pr_number}/comments",
token,
{"body": comment_body}
f"repos/{owner}/{repo}/issues/{pr_number}/comments", token, {"body": comment_body}
)


def post_success_comment(
owner: str,
repo: str,
pr_number: int,
token: str,
signed_authors: list[tuple[dict, str]]
owner: str, repo: str, pr_number: int, token: str, signed_authors: list[tuple[dict, str]]
) -> None:
"""Post success comment or update existing CLA comment."""
lines = ["## CLA Verification Passed\n\n"]
Expand All @@ -306,8 +288,7 @@ def post_success_comment(

# Check for existing CLA comment to update
comments = github_request(
f"repos/{owner}/{repo}/issues/{pr_number}/comments?per_page=100",
token
f"repos/{owner}/{repo}/issues/{pr_number}/comments?per_page=100", token
)

for comment in comments:
Expand All @@ -320,17 +301,15 @@ def post_success_comment(
requests.patch(
f"{GITHUB_API}/repos/{owner}/{repo}/issues/comments/{comment['id']}",
headers=headers,
json={"body": comment_body}
json={"body": comment_body},
)
return

# No existing comment - only post if there were multiple authors
# (single author PRs don't need a "you signed" comment)
if len(signed_authors) > 1:
github_post(
f"repos/{owner}/{repo}/issues/{pr_number}/comments",
token,
{"body": comment_body}
f"repos/{owner}/{repo}/issues/{pr_number}/comments", token, {"body": comment_body}
)


Expand Down Expand Up @@ -358,8 +337,14 @@ def main():

# Allowlist for bots
bot_patterns = [
"dependabot", "github-actions", "renovate", "codecov",
"sonarcloud", "coderabbitai", "sonarqubecloud", "noreply@github.com"
"dependabot",
"github-actions",
"renovate",
"codecov",
"sonarcloud",
"coderabbitai",
"sonarqubecloud",
"noreply@github.com",
]

for author in authors:
Expand Down
40 changes: 39 additions & 1 deletion cortex/ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,49 @@ def _default_model(self) -> str:
elif self.provider == "claude":
return "claude-sonnet-4-20250514"
elif self.provider == "ollama":
return "llama3.2"
return self._get_ollama_model()
elif self.provider == "fake":
return "fake"
return "gpt-4"

def _get_ollama_model(self) -> str:
"""Determine which Ollama model to use.

The model name is resolved using the following precedence:

1. If the ``OLLAMA_MODEL`` environment variable is set, its value is
returned.
2. Otherwise, if ``~/.cortex/config.json`` exists and contains an
``"ollama_model"`` key, that value is returned.
3. If neither of the above sources provides a model name, the
hard-coded default ``"llama3.2"`` is used.

Any errors encountered while reading or parsing the configuration
file are silently ignored, and the resolution continues to the next
step in the precedence chain.
"""
# Try environment variable first
env_model = os.environ.get("OLLAMA_MODEL")
if env_model:
return env_model

# Try config file
try:
from pathlib import Path

config_file = Path.home() / ".cortex" / "config.json"
if config_file.exists():
with open(config_file) as f:
config = json.load(f)
model = config.get("ollama_model")
if model:
return model
except Exception:
pass # Ignore errors reading config

# Default to llama3.2
return "llama3.2"

def _initialize_client(self):
if self.provider == "openai":
try:
Expand Down
2 changes: 1 addition & 1 deletion cortex/dependency_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def format_installation_instructions(missing: list[str]) -> str:
╰─────────────────────────────────────────────────────────────────╯

Cortex requires the following packages that are not installed:
{', '.join(missing)}
{", ".join(missing)}

To fix this, run ONE of the following:

Expand Down
34 changes: 29 additions & 5 deletions cortex/first_run_wizard.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,16 +371,40 @@ def _setup_ollama(self) -> StepResult:
print("\n✗ Failed to install Ollama")
return StepResult(success=True, data={"api_provider": "none"})

# Pull a small model
print("\nPulling llama3.2 model (this may take a few minutes)...")
# Let user choose model or use default
print("\nWhich Ollama model would you like to use?")
print(" 1. llama3.2 (2GB) - Recommended for most users")
print(" 2. llama3.2:1b (1.3GB) - Faster, less RAM")
print(" 3. mistral (4GB) - Alternative quality model")
print(" 4. phi3 (2.3GB) - Microsoft's efficient model")
print(" 5. Custom (enter your own)")

model_choices = {
"1": "llama3.2",
"2": "llama3.2:1b",
"3": "mistral",
"4": "phi3",
}

choice = self._prompt("\nEnter choice [1]: ", default="1")

if choice == "5":
model_name = self._prompt("Enter model name: ", default="llama3.2")
else:
model_name = model_choices[choice]

# Pull the selected model
print(f"\nPulling {model_name} model (this may take a few minutes)...")
try:
subprocess.run(["ollama", "pull", "llama3.2"], check=True)
subprocess.run(["ollama", "pull", model_name], check=True)
print("\n✓ Model ready!")
except subprocess.CalledProcessError:
print("\n⚠ Could not pull model - you can do this later with: ollama pull llama3.2")
print(
f"\n⚠ Could not pull model - you can do this later with: ollama pull {model_name}"
)

self.config["api_provider"] = "ollama"
self.config["ollama_model"] = "llama3.2"
self.config["ollama_model"] = model_name

return StepResult(success=True, data={"api_provider": "ollama"})

Expand Down
3 changes: 2 additions & 1 deletion cortex/llm/interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ def _initialize_client(self):

ollama_base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
self.client = OpenAI(
api_key="ollama", base_url=f"{ollama_base_url}/v1" # Dummy key, not used
api_key="ollama",
base_url=f"{ollama_base_url}/v1", # Dummy key, not used
)
except ImportError:
raise ImportError("OpenAI package not installed. Run: pip install openai")
Expand Down
25 changes: 24 additions & 1 deletion tests/test_ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,8 +240,31 @@ def test_default_model_claude(self):

def test_default_model_ollama(self):
"""Test default model for Ollama."""
# Test with environment variable

# Save and clear any existing OLLAMA_MODEL
original_model = os.environ.get("OLLAMA_MODEL")

# Test with custom env variable
os.environ["OLLAMA_MODEL"] = "test-model"
handler = AskHandler(api_key="test", provider="ollama")
self.assertEqual(handler.model, "llama3.2")
self.assertEqual(handler.model, "test-model")

# Clean up
if original_model is not None:
os.environ["OLLAMA_MODEL"] = original_model
else:
os.environ.pop("OLLAMA_MODEL", None)

# Test deterministic default behavior when no env var or config file exists.
# Point the home directory to a temporary location without ~/.cortex/config.json
with (
tempfile.TemporaryDirectory() as tmpdir,
patch("os.path.expanduser", return_value=tmpdir),
):
Comment on lines +261 to +264
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Critical: Wrong patch target breaks test isolation.

Line 263 patches os.path.expanduser, but the implementation in cortex/ask.py uses Path.home() to locate the config file:

config_file = Path.home() / ".cortex" / "config.json"

Patching os.path.expanduser won't affect Path.home(), so the test will still read from the real ~/.cortex/config.json instead of the temporary directory, making the test non-deterministic and environment-dependent.

🔎 Proposed fix
+        from pathlib import Path
+        
         # Test deterministic default behavior when no env var or config file exists.
         # Point the home directory to a temporary location without ~/.cortex/config.json
         with (
             tempfile.TemporaryDirectory() as tmpdir,
-            patch("os.path.expanduser", return_value=tmpdir),
+            patch("pathlib.Path.home", return_value=Path(tmpdir)),
         ):
             handler2 = AskHandler(api_key="test", provider="ollama")
             # When no env var and no config file exist, AskHandler should use its built-in default.
             self.assertEqual(handler2.model, "llama3.2")
🤖 Prompt for AI Agents
In tests/test_ask.py around lines 261 to 264, the test currently patches
os.path.expanduser but cortex/ask.py uses Path.home(), so the test still reads
the real ~/.cortex/config.json; change the patch to target pathlib.Path.home
(e.g. patch("pathlib.Path.home", return_value=Path(tmpdir)) or
patch.object(Path, "home", return_value=Path(tmpdir))) and ensure tmpdir is
converted to a Path before returning so the code under test resolves the config
file inside the temporary directory.

handler2 = AskHandler(api_key="test", provider="ollama")
# When no env var and no config file exist, AskHandler should use its built-in default.
self.assertEqual(handler2.model, "llama3.2")

def test_default_model_fake(self):
"""Test default model for fake provider."""
Expand Down
18 changes: 10 additions & 8 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,16 @@ def test_print_success(self, mock_stdout):
self.assertTrue(True)

@patch.dict(os.environ, {}, clear=True)
def test_install_no_api_key(self):
# When no API key is set, the CLI falls back to Ollama.
# If Ollama is running, this should succeed. If not, it should fail.
# We'll mock Ollama to be unavailable to test the failure case.
with patch("cortex.llm.interpreter.CommandInterpreter.parse") as mock_parse:
mock_parse.side_effect = RuntimeError("Ollama not available")
result = self.cli.install("docker")
self.assertEqual(result, 1)
@patch("cortex.cli.CommandInterpreter")
def test_install_no_api_key(self, mock_interpreter_class):
# Should work with Ollama (no API key needed)
mock_interpreter = Mock()
mock_interpreter.parse.return_value = ["apt update", "apt install docker"]
mock_interpreter_class.return_value = mock_interpreter

result = self.cli.install("docker")
# Should succeed with Ollama as fallback provider
self.assertEqual(result, 0)

@patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True)
@patch("cortex.cli.CommandInterpreter")
Expand Down
Loading
Loading