Tech Stack Advisor - Code Viewer

← Back to File Tree

base.py

Language: python | Path: backend/src/agents/base.py | Lines: 121
"""Base agent class with shared functionality."""
from typing import Any, Protocol
from anthropic import Anthropic
from ..core.config import settings
from ..core.logging import get_logger, usage_tracker


class Tool(Protocol):
    """Protocol for agent tools."""

    name: str
    description: str

    def execute(self, **kwargs: Any) -> dict[str, Any]:
        """Execute the tool with given parameters."""
        ...


class BaseAgent:
    """Base class for all specialized agents."""

    def __init__(self, name: str, role: str, tools: list[Tool] | None = None) -> None:
        """Initialize the agent.

        Args:
            name: Agent name
            role: Agent role description
            tools: Optional list of tools available to this agent
        """
        self.name = name
        self.role = role
        self.tools = tools or []
        self.logger = get_logger(f"agent.{name}")
        self.client = Anthropic(api_key=settings.anthropic_api_key)

    def get_system_prompt(self) -> str:
        """Get the system prompt for this agent.

        Returns:
            System prompt string
        """
        tools_desc = "\n".join([f"- {t.name}: {t.description}" for t in self.tools])
        return f"""You are a {self.role}.

Your available tools:
{tools_desc}

Provide concise, actionable recommendations based on the user's requirements.
Focus on practical solutions suitable for their scale and use case.
"""

    async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
        """Analyze the context and provide recommendations.

        Args:
            context: Context dictionary with user requirements

        Returns:
            Dictionary with recommendations
        """
        raise NotImplementedError("Subclasses must implement analyze()")

    def _call_llm(self, user_prompt: str, system_prompt: str | None = None, api_key: str | None = None) -> str:
        """Call the LLM with the given prompts.

        Args:
            user_prompt: User prompt
            system_prompt: Optional system prompt (defaults to agent's system prompt)
            api_key: Optional user-provided API key (bypasses shared budget)

        Returns:
            LLM response text
        """
        if system_prompt is None:
            system_prompt = self.get_system_prompt()

        # Use custom API key if provided, otherwise use shared client
        using_custom_key = api_key is not None
        client = Anthropic(api_key=api_key) if using_custom_key else self.client

        self.logger.info(
            "llm_call",
            agent=self.name,
            prompt_length=len(user_prompt),
            using_custom_key=using_custom_key,
        )

        response = client.messages.create(
            model=settings.model_name,
            max_tokens=settings.max_tokens,
            temperature=settings.model_temperature,
            system=system_prompt,
            messages=[{"role": "user", "content": user_prompt}],
        )

        # Only track usage if using shared budget (not custom API key)
        if not using_custom_key:
            usage_tracker.log_request(
                input_tokens=response.usage.input_tokens,
                output_tokens=response.usage.output_tokens,
                model=settings.model_name,
            )

        return response.content[0].text

    def _execute_tool(self, tool_name: str, **kwargs: Any) -> dict[str, Any]:
        """Execute a tool by name.

        Args:
            tool_name: Name of the tool to execute
            **kwargs: Tool parameters

        Returns:
            Tool execution results
        """
        for tool in self.tools:
            if tool.name == tool_name:
                self.logger.info("tool_execution", tool=tool_name, params=kwargs)
                return tool.execute(**kwargs)

        raise ValueError(f"Tool {tool_name} not found")