Tech Stack Advisor - Code Viewer

← Back to File Tree

embeddings.py

Language: python | Path: backend/src/rag/embeddings.py | Lines: 77
"""Embeddings utility using sentence-transformers."""
from typing import List
from sentence_transformers import SentenceTransformer
from ..core.logging import get_logger

logger = get_logger(__name__)


class EmbeddingModel:
    """Wrapper for sentence-transformers embedding model."""

    def __init__(self, model_name: str = "all-MiniLM-L6-v2") -> None:
        """Initialize the embedding model.

        Args:
            model_name: Name of the sentence-transformers model
                Default: "all-MiniLM-L6-v2" (384 dims, fast, good quality)
                Alternatives:
                - "all-mpnet-base-v2" (768 dims, best quality, slower)
                - "paraphrase-MiniLM-L3-v2" (384 dims, fastest)
        """
        self.model_name = model_name
        logger.info("loading_embedding_model", model=model_name)
        self.model = SentenceTransformer(model_name)
        self.dimension = self.model.get_sentence_embedding_dimension()
        logger.info(
            "embedding_model_loaded",
            model=model_name,
            dimension=self.dimension,
        )

    def embed_text(self, text: str) -> List[float]:
        """Generate embedding for a single text.

        Args:
            text: Text to embed

        Returns:
            Embedding vector as list of floats
        """
        embedding = self.model.encode(text, convert_to_numpy=True)
        return embedding.tolist()

    def embed_batch(self, texts: List[str], batch_size: int = 32) -> List[List[float]]:
        """Generate embeddings for multiple texts.

        Args:
            texts: List of texts to embed
            batch_size: Batch size for processing

        Returns:
            List of embedding vectors
        """
        logger.info("embedding_batch", count=len(texts), batch_size=batch_size)
        embeddings = self.model.encode(
            texts,
            batch_size=batch_size,
            convert_to_numpy=True,
            show_progress_bar=len(texts) > 100,
        )
        return embeddings.tolist()


# Global embedding model instance (lazy loaded)
_embedding_model: EmbeddingModel | None = None


def get_embedding_model() -> EmbeddingModel:
    """Get the global embedding model instance.

    Returns:
        Singleton embedding model instance
    """
    global _embedding_model
    if _embedding_model is None:
        _embedding_model = EmbeddingModel()
    return _embedding_model