Tech Stack Advisor - Code Viewer

← Back to File Tree

conversation.py

Language: python | Path: backend/src/agents/conversation.py | Lines: 208
"""Conversation manager for intelligent multi-turn interactions."""
import json
from typing import Any
from .base import BaseAgent
from ..core.logging import get_logger

logger = get_logger(__name__)


class ConversationManager(BaseAgent):
    """Agent that manages conversational interactions to gather project requirements."""

    def __init__(self) -> None:
        """Initialize the conversation manager."""
        super().__init__(
            name="conversation_manager",
            role="conversational assistant that asks intelligent follow-up questions to understand project requirements",
            tools=[],
        )

    async def start_conversation(self, initial_input: str, api_key: str | None = None) -> dict[str, Any]:
        """Start a new conversation based on initial user input.

        Args:
            initial_input: Initial user description
            api_key: Optional user-provided API key

        Returns:
            Dictionary with next question and extracted context
        """
        self.logger.info("conversation_start", initial_input=initial_input[:100])

        prompt = f"""You are a tech stack advisor having a conversation with a user to understand their project requirements.

Initial user input: "{initial_input}"

Your task is to:
1. Extract any information already provided
2. Generate the MOST IMPORTANT follow-up question to ask next with structured choices when applicable
3. Determine how complete the information is (0-100%)

Required information to gather:
- Application type (e.g., social media, e-commerce, analytics dashboard)
- Daily active users (DAU) or expected scale
- Key features/functionality
- Data sensitivity/compliance requirements (optional)
- Budget constraints (optional)
- Existing technology preferences (optional)

Respond with a JSON object:
{{
    "extracted_context": {{
        "app_type": "social media platform" or null,
        "dau": 100000 or null,
        "key_features": ["real-time messaging", "content feeds"] or null,
        "data_sensitivity": "high" or null,
        "compliance": ["gdpr", "hipaa"] or null,
        "budget_constraint": 5000 or null,
        "tech_preferences": ["python", "aws"] or null
    }},
    "next_question": "What is your expected number of daily active users?",
    "question_type": "choice" or "text",
    "options": ["Option 1", "Option 2", "Option 3", "None/Skip"] or null,
    "completion_percentage": 30,
    "reasoning": "Brief explanation of why this question is important"
}}

Guidelines for structured questions:
- Use question_type="choice" with options array for: DAU ranges, compliance frameworks, common features, tech preferences
- Use question_type="text" for: open-ended descriptions, custom requirements
- Always include "None" or "Skip" or "Not sure" as last option for choice questions
- Prioritize the most impactful information first (app type, scale, features)

Example choice questions:
- DAU: ["Less than 1,000", "1,000 - 10,000", "10,000 - 100,000", "100,000 - 1M", "Over 1 million", "Not sure yet"]
- Compliance: ["GDPR", "HIPAA", "PCI DSS", "SOC 2", "None needed", "Not sure"]
- Features for e-commerce: ["Product catalog", "Payment processing", "User reviews", "Inventory management", "Recommendations", "Other"]
- Tech preferences: ["AWS", "Azure", "GCP", "No preference"]

Completion is 100% when: app_type, dau, and key_features are known
"""

        response = self._call_llm(prompt, api_key=api_key)

        try:
            # Parse JSON response
            result = json.loads(response)
            self.logger.info(
                "conversation_question_generated",
                completion=result.get("completion_percentage", 0)
            )
            return result
        except json.JSONDecodeError:
            # Fallback if JSON parsing fails
            self.logger.error("conversation_json_parse_error", response=response[:200])
            return {
                "extracted_context": {},
                "next_question": "Could you tell me more about what type of application you're building?",
                "completion_percentage": 0,
                "reasoning": "Failed to parse response, asking basic question"
            }

    async def continue_conversation(
        self,
        conversation_history: list[dict[str, str]],
        user_response: str,
        current_context: dict[str, Any],
        api_key: str | None = None
    ) -> dict[str, Any]:
        """Continue the conversation with a new user response.

        Args:
            conversation_history: List of previous messages
            user_response: Latest user response
            current_context: Currently extracted context
            api_key: Optional user-provided API key

        Returns:
            Dictionary with next question and updated context, or ready_for_recommendation flag
        """
        self.logger.info("conversation_continue", response=user_response[:100])

        # Format conversation history
        history_text = "\n".join([
            f"{msg['role'].upper()}: {msg['content']}"
            for msg in conversation_history[-6:]  # Last 3 exchanges
        ])

        prompt = f"""You are a tech stack advisor having a conversation with a user to understand their project requirements.

Conversation history:
{history_text}
USER: {user_response}

Current extracted context:
{json.dumps(current_context, indent=2)}

Your task is to:
1. Update the extracted context based on the user's latest response
2. Determine if we have enough information to generate recommendations
3. If not, generate the MOST IMPORTANT follow-up question with structured choices when applicable

Required information for recommendations:
- Application type (REQUIRED)
- Daily active users / scale (REQUIRED)
- Key features/functionality (REQUIRED)

Optional but helpful:
- Data sensitivity/compliance requirements
- Budget constraints
- Existing technology preferences

Respond with a JSON object:
{{
    "extracted_context": {{
        "app_type": "e-commerce platform" or null,
        "dau": 50000 or null,
        "key_features": ["product catalog", "checkout", "recommendations"] or null,
        "data_sensitivity": "high" or null,
        "compliance": ["pci_dss"] or null,
        "budget_constraint": 3000 or null,
        "tech_preferences": ["python", "react"] or null
    }},
    "ready_for_recommendation": false,
    "next_question": "What key features will your e-commerce platform need?" or null,
    "question_type": "choice" or "text",
    "options": ["Option 1", "Option 2", "Option 3", "None/Skip"] or null,
    "completion_percentage": 60,
    "reasoning": "Brief explanation"
}}

Guidelines for structured questions:
- Use question_type="choice" with options array for: DAU ranges, compliance frameworks, common features, tech preferences
- Use question_type="text" for: open-ended descriptions, custom requirements
- Always include "None" or "Skip" or "Not sure" as last option for choice questions
- Update ALL fields in extracted_context, even if they were previously set
- Set ready_for_recommendation to true ONLY when app_type, dau, and key_features are all known
- If ready_for_recommendation is true, set next_question to null
- Ask only ONE question at a time
- Be conversational and acknowledge what the user just said
- Extract numbers from vague descriptions (e.g., "small startup" → 1000-10000 DAU, use midpoint)

Example choice questions:
- DAU: ["Less than 1,000", "1,000 - 10,000", "10,000 - 100,000", "100,000 - 1M", "Over 1 million", "Not sure yet"]
- Compliance: ["GDPR", "HIPAA", "PCI DSS", "SOC 2", "None needed", "Not sure"]
- Features for e-commerce: ["Product catalog", "Payment processing", "User reviews", "Inventory management", "Recommendations", "Other"]
- Tech preferences: ["AWS", "Azure", "GCP", "No preference"]
"""

        response = self._call_llm(prompt, api_key=api_key)

        try:
            result = json.loads(response)
            self.logger.info(
                "conversation_update",
                completion=result.get("completion_percentage", 0),
                ready=result.get("ready_for_recommendation", False)
            )
            return result
        except json.JSONDecodeError:
            self.logger.error("conversation_json_parse_error", response=response[:200])
            return {
                "extracted_context": current_context,
                "ready_for_recommendation": False,
                "next_question": "Could you provide more details about your project?",
                "completion_percentage": 0,
                "reasoning": "Failed to parse response"
            }