Skip to content

API Reference

config_manager

Configuration manager for AI models.

ConfigurationManager

Manages model configurations stored in JSON.

Source code in cli/config_manager.py
class ConfigurationManager:
    """Manages model configurations stored in JSON."""

    def __init__(self, file_path: Optional[str] = None):
        if file_path is None:
            config_dir = os.path.join(os.path.expanduser("~"), ".config", "ai_model_manager")
            os.makedirs(config_dir, exist_ok=True)
            file_path = os.path.join(config_dir, "models_config.json")

        self.file_path = file_path

        if not os.path.exists(self.file_path):
            self._create_default_config()

    def _create_default_config(self) -> None:
        """Create default configuration file."""
        default_config_path = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), "config", "default_config.json"
        )

        try:
            with open(default_config_path, "r") as f:
                default_config = json.load(f)
        except (FileNotFoundError, json.JSONDecodeError):
            # Fallback if JSON file not found
            default_config = {"selected_model": None, "models": {}}

        self._write_config(default_config)
        print(f"Configuration file created at: {self.file_path}")

    def _read_config(self) -> Dict[str, Any]:
        """Read configuration from file."""
        try:
            with open(self.file_path, "r") as f:
                return json.load(f)
        except (FileNotFoundError, json.JSONDecodeError):
            return {"selected_model": None, "models": {}}

    def _write_config(self, data: Dict[str, Any]) -> None:
        """Write configuration to file."""
        with open(self.file_path, "w") as f:
            json.dump(data, f, indent=4)

    def get_model_config(self, model_name: str) -> Optional[Dict[str, Any]]:
        """Get configuration for a specific model."""
        config = self._read_config()
        return config.get("models", {}).get(model_name)

    def set_model_config(
        self,
        model_name: str,
        provider: str,
        api_key: str,
        temperature: float = 0.7,
        max_tokens: Optional[int] = None,
    ) -> None:
        """Set or update configuration for a model."""
        config = self._read_config()

        if "models" not in config:
            config["models"] = {}

        config["models"][model_name] = {
            "provider": provider,
            "api_key": api_key,
            "temperature": temperature,
            "max_tokens": max_tokens,
        }

        self._write_config(config)
        print(f"Model '{model_name}' configured successfully.")

    def remove_model(self, model_name: str) -> None:
        """Remove a model from configuration."""
        config = self._read_config()

        if model_name not in config.get("models", {}):
            raise ValueError(f"Model '{model_name}' not found in configuration.")

        del config["models"][model_name]

        # Clear selected model if it was the removed one
        if config.get("selected_model") == model_name:
            config["selected_model"] = None

        self._write_config(config)
        print(f"Model '{model_name}' removed successfully.")

    def get_selected_model(self) -> Optional[str]:
        """Get the currently selected model name."""
        config = self._read_config()
        return config.get("selected_model")

    def set_selected_model(self, model_name: str) -> None:
        """Set the active model."""
        config = self._read_config()
        model_config = config.get("models", {}).get(model_name)

        if not model_config:
            raise ValueError(f"Model '{model_name}' is not configured.")

        if not model_config.get("api_key"):
            raise ValueError(f"Model '{model_name}' has no API key configured.")

        config["selected_model"] = model_name
        self._write_config(config)
        print(f"Selected model: {model_name}")

    def list_models(self) -> Dict[str, Dict[str, Any]]:
        """Get all configured models."""
        config = self._read_config()
        return config.get("models", {})

get_model_config(model_name)

Get configuration for a specific model.

Source code in cli/config_manager.py
def get_model_config(self, model_name: str) -> Optional[Dict[str, Any]]:
    """Get configuration for a specific model."""
    config = self._read_config()
    return config.get("models", {}).get(model_name)

get_selected_model()

Get the currently selected model name.

Source code in cli/config_manager.py
def get_selected_model(self) -> Optional[str]:
    """Get the currently selected model name."""
    config = self._read_config()
    return config.get("selected_model")

list_models()

Get all configured models.

Source code in cli/config_manager.py
def list_models(self) -> Dict[str, Dict[str, Any]]:
    """Get all configured models."""
    config = self._read_config()
    return config.get("models", {})

remove_model(model_name)

Remove a model from configuration.

Source code in cli/config_manager.py
def remove_model(self, model_name: str) -> None:
    """Remove a model from configuration."""
    config = self._read_config()

    if model_name not in config.get("models", {}):
        raise ValueError(f"Model '{model_name}' not found in configuration.")

    del config["models"][model_name]

    # Clear selected model if it was the removed one
    if config.get("selected_model") == model_name:
        config["selected_model"] = None

    self._write_config(config)
    print(f"Model '{model_name}' removed successfully.")

set_model_config(model_name, provider, api_key, temperature=0.7, max_tokens=None)

Set or update configuration for a model.

Source code in cli/config_manager.py
def set_model_config(
    self,
    model_name: str,
    provider: str,
    api_key: str,
    temperature: float = 0.7,
    max_tokens: Optional[int] = None,
) -> None:
    """Set or update configuration for a model."""
    config = self._read_config()

    if "models" not in config:
        config["models"] = {}

    config["models"][model_name] = {
        "provider": provider,
        "api_key": api_key,
        "temperature": temperature,
        "max_tokens": max_tokens,
    }

    self._write_config(config)
    print(f"Model '{model_name}' configured successfully.")

set_selected_model(model_name)

Set the active model.

Source code in cli/config_manager.py
def set_selected_model(self, model_name: str) -> None:
    """Set the active model."""
    config = self._read_config()
    model_config = config.get("models", {}).get(model_name)

    if not model_config:
        raise ValueError(f"Model '{model_name}' is not configured.")

    if not model_config.get("api_key"):
        raise ValueError(f"Model '{model_name}' has no API key configured.")

    config["selected_model"] = model_name
    self._write_config(config)
    print(f"Selected model: {model_name}")

llm

LLM interaction module.

generate_response(prompt, llm_service, history)

Generate and display AI response.

Source code in cli/llm.py
def generate_response(prompt: str, llm_service: LLMService, history: ChatHistory):
    """Generate and display AI response."""
    system_instruction = (
        "System prompt: Give response in short and MD format, "
        "if asked for commands then give commands and don't explain too much"
    )

    full_prompt = f"{prompt}\n{system_instruction}"
    history.append("user", full_prompt)

    # Get conversation history as context
    flat_prompt = history.get_prompt()

    # Generate response using LLM service
    response = llm_service.generate(flat_prompt)

    # Save response to history
    history.append("assistant", response or "")

    # Display formatted output
    prettify_llm_output(response)

llm_service

Service for LLM generation and orchestration.

LLMService

Service for generating LLM responses.

Source code in cli/llm_service.py
class LLMService:
    """Service for generating LLM responses."""

    def __init__(self, config_manager: Optional[ConfigurationManager] = None):
        """
        Initialize LLM service.

        Args:
            config_manager: Configuration manager instance
        """
        self.config_manager = config_manager or ConfigurationManager()
        self._provider_cache: dict[str, ILLMProvider] = {}

    def _get_provider(self, model_name: str) -> ILLMProvider:
        """Get or create a provider instance for the model."""
        if model_name in self._provider_cache:
            return self._provider_cache[model_name]

        # Get model configuration
        model_config_dict = self.config_manager.get_model_config(model_name)
        if not model_config_dict:
            raise ValueError(f"Model '{model_name}' is not configured.")

        if not model_config_dict.get("api_key"):
            raise ValueError(f"Model '{model_name}' has no API key configured.")

        # Create ModelConfig object
        model_config = ModelConfig(
            name=model_name,
            provider=model_config_dict["provider"],
            api_key=model_config_dict["api_key"],
            temperature=model_config_dict.get("temperature", 0.7),
            max_tokens=model_config_dict.get("max_tokens"),
        )

        # Create provider using factory
        provider = LLMProviderFactory.create(model_config)

        # Cache the provider
        self._provider_cache[model_name] = provider

        return provider

    def generate(
        self,
        prompt: str,
        model_name: Optional[str] = None,
        show_spinner: bool = True,
        **kwargs,
    ) -> str:
        """Generate response from LLM."""
        if model_name is None:
            model_name = self.config_manager.get_selected_model()
            if not model_name:
                # Fall back to default
                model_name = "gemini-2.5-flash"

        # Get provider
        provider = self._get_provider(model_name)

        # Generate with optional spinner
        if show_spinner:
            stop_spinner = threading.Event()
            spinner_thread = threading.Thread(target=spin_loader, args=(stop_spinner,))
            spinner_thread.start()

            try:
                response = provider.generate(prompt, **kwargs)
            finally:
                stop_spinner.set()
                spinner_thread.join()
        else:
            response = provider.generate(prompt, **kwargs)

        return response

__init__(config_manager=None)

Initialize LLM service.

Parameters:

Name Type Description Default
config_manager Optional[ConfigurationManager]

Configuration manager instance

None
Source code in cli/llm_service.py
def __init__(self, config_manager: Optional[ConfigurationManager] = None):
    """
    Initialize LLM service.

    Args:
        config_manager: Configuration manager instance
    """
    self.config_manager = config_manager or ConfigurationManager()
    self._provider_cache: dict[str, ILLMProvider] = {}

generate(prompt, model_name=None, show_spinner=True, **kwargs)

Generate response from LLM.

Source code in cli/llm_service.py
def generate(
    self,
    prompt: str,
    model_name: Optional[str] = None,
    show_spinner: bool = True,
    **kwargs,
) -> str:
    """Generate response from LLM."""
    if model_name is None:
        model_name = self.config_manager.get_selected_model()
        if not model_name:
            # Fall back to default
            model_name = "gemini-2.5-flash"

    # Get provider
    provider = self._get_provider(model_name)

    # Generate with optional spinner
    if show_spinner:
        stop_spinner = threading.Event()
        spinner_thread = threading.Thread(target=spin_loader, args=(stop_spinner,))
        spinner_thread.start()

        try:
            response = provider.generate(prompt, **kwargs)
        finally:
            stop_spinner.set()
            spinner_thread.join()
    else:
        response = provider.generate(prompt, **kwargs)

    return response

models

Models package for LLM abstraction layer. Provides interfaces and implementations for different LLM providers.

ILLMProvider

Bases: ABC

Interface for LLM providers.

Source code in cli/models/base.py
class ILLMProvider(ABC):
    """Interface for LLM providers."""

    @abstractmethod
    def generate(self, prompt: str, **kwargs) -> str:
        """Generate a response from the LLM."""
        pass

    @abstractmethod
    def get_provider_name(self) -> str:
        """Get the name of the provider."""
        pass

    @abstractmethod
    def validate_config(self) -> bool:
        """Validate the model configuration."""
        pass

generate(prompt, **kwargs) abstractmethod

Generate a response from the LLM.

Source code in cli/models/base.py
@abstractmethod
def generate(self, prompt: str, **kwargs) -> str:
    """Generate a response from the LLM."""
    pass

get_provider_name() abstractmethod

Get the name of the provider.

Source code in cli/models/base.py
@abstractmethod
def get_provider_name(self) -> str:
    """Get the name of the provider."""
    pass

validate_config() abstractmethod

Validate the model configuration.

Source code in cli/models/base.py
@abstractmethod
def validate_config(self) -> bool:
    """Validate the model configuration."""
    pass

LLMProviderFactory

Factory for creating LLM provider instances.

Source code in cli/models/factory.py
class LLMProviderFactory:
    """Factory for creating LLM provider instances."""

    _providers: Dict[str, Type[ILLMProvider]] = {
        "google": GeminiProvider,
        "gemini": GeminiProvider,
        "openai": OpenAIProvider,
        "anthropic": AnthropicProvider,
        "claude": AnthropicProvider,
    }

    _model_patterns = {
        "gemini": "google",
        "gpt": "openai",
        "claude": "anthropic",
    }

    @classmethod
    def create(cls, config: ModelConfig) -> ILLMProvider:
        """Create an LLM provider instance based on configuration."""
        provider_key = config.provider.lower()

        if provider_key in cls._providers:
            return cls._providers[provider_key](config)

        inferred_provider = cls._infer_provider_from_model(config.name)
        if inferred_provider:
            return cls._providers[inferred_provider](config)

        raise ValueError(
            f"Unsupported provider: {config.provider}. "
            f"Available: {', '.join(cls.get_supported_providers())}"
        )

    @classmethod
    def _infer_provider_from_model(cls, model_name: str) -> str | None:
        """Infer provider from model name."""
        model_lower = model_name.lower()
        for pattern, provider in cls._model_patterns.items():
            if pattern in model_lower:
                return provider
        return None

    @classmethod
    def get_supported_providers(cls) -> list[str]:
        """Get list of supported provider names."""
        return list(set(cls._providers.keys()))

create(config) classmethod

Create an LLM provider instance based on configuration.

Source code in cli/models/factory.py
@classmethod
def create(cls, config: ModelConfig) -> ILLMProvider:
    """Create an LLM provider instance based on configuration."""
    provider_key = config.provider.lower()

    if provider_key in cls._providers:
        return cls._providers[provider_key](config)

    inferred_provider = cls._infer_provider_from_model(config.name)
    if inferred_provider:
        return cls._providers[inferred_provider](config)

    raise ValueError(
        f"Unsupported provider: {config.provider}. "
        f"Available: {', '.join(cls.get_supported_providers())}"
    )

get_supported_providers() classmethod

Get list of supported provider names.

Source code in cli/models/factory.py
@classmethod
def get_supported_providers(cls) -> list[str]:
    """Get list of supported provider names."""
    return list(set(cls._providers.keys()))

ModelConfig dataclass

Configuration for an LLM model.

Source code in cli/models/base.py
@dataclass
class ModelConfig:
    """Configuration for an LLM model."""

    name: str
    provider: str
    api_key: str
    temperature: float = 0.7
    max_tokens: Optional[int] = None

anthropic_provider

Anthropic Claude provider implementation using LangChain.

AnthropicProvider

Bases: BaseLLMProvider

Anthropic Claude LLM provider.

Source code in cli/models/anthropic_provider.py
class AnthropicProvider(BaseLLMProvider):
    """Anthropic Claude LLM provider."""

    def _create_llm(self) -> BaseChatModel:
        kwargs = {
            "model": self.config.name,
            "anthropic_api_key": self.config.api_key,
            "temperature": self.config.temperature,
        }
        if self.config.max_tokens:
            kwargs["max_tokens"] = self.config.max_tokens
        return ChatAnthropic(**kwargs)

    def get_provider_name(self) -> str:
        return "Anthropic Claude"

base

Base interfaces and abstractions for LLM providers.

ILLMProvider

Bases: ABC

Interface for LLM providers.

Source code in cli/models/base.py
class ILLMProvider(ABC):
    """Interface for LLM providers."""

    @abstractmethod
    def generate(self, prompt: str, **kwargs) -> str:
        """Generate a response from the LLM."""
        pass

    @abstractmethod
    def get_provider_name(self) -> str:
        """Get the name of the provider."""
        pass

    @abstractmethod
    def validate_config(self) -> bool:
        """Validate the model configuration."""
        pass
generate(prompt, **kwargs) abstractmethod

Generate a response from the LLM.

Source code in cli/models/base.py
@abstractmethod
def generate(self, prompt: str, **kwargs) -> str:
    """Generate a response from the LLM."""
    pass
get_provider_name() abstractmethod

Get the name of the provider.

Source code in cli/models/base.py
@abstractmethod
def get_provider_name(self) -> str:
    """Get the name of the provider."""
    pass
validate_config() abstractmethod

Validate the model configuration.

Source code in cli/models/base.py
@abstractmethod
def validate_config(self) -> bool:
    """Validate the model configuration."""
    pass

ModelConfig dataclass

Configuration for an LLM model.

Source code in cli/models/base.py
@dataclass
class ModelConfig:
    """Configuration for an LLM model."""

    name: str
    provider: str
    api_key: str
    temperature: float = 0.7
    max_tokens: Optional[int] = None

base_provider

Base provider implementation with common functionality.

BaseLLMProvider

Bases: ILLMProvider

Base implementation for LLM providers.

Source code in cli/models/base_provider.py
class BaseLLMProvider(ILLMProvider):
    """Base implementation for LLM providers."""

    def __init__(self, config: ModelConfig):
        self.config = config
        self.validate_config()
        self.llm: BaseChatModel = self._create_llm()

    def _create_llm(self) -> BaseChatModel:
        """Create LLM instance. Must be implemented by subclasses."""
        raise NotImplementedError("Subclasses must implement _create_llm")

    def generate(self, prompt: str, **kwargs) -> str:
        """Generate response from LLM."""
        if "temperature" in kwargs:
            self.llm.temperature = kwargs["temperature"]
        response = self.llm.invoke(prompt)
        return response.content

    def validate_config(self) -> bool:
        """Validate configuration."""
        if not self.config.api_key:
            raise ValueError(f"API key is required for {self.get_provider_name()}")
        if not self.config.name:
            raise ValueError("Model name is required")
        return True
generate(prompt, **kwargs)

Generate response from LLM.

Source code in cli/models/base_provider.py
def generate(self, prompt: str, **kwargs) -> str:
    """Generate response from LLM."""
    if "temperature" in kwargs:
        self.llm.temperature = kwargs["temperature"]
    response = self.llm.invoke(prompt)
    return response.content
validate_config()

Validate configuration.

Source code in cli/models/base_provider.py
def validate_config(self) -> bool:
    """Validate configuration."""
    if not self.config.api_key:
        raise ValueError(f"API key is required for {self.get_provider_name()}")
    if not self.config.name:
        raise ValueError("Model name is required")
    return True

factory

Factory for creating LLM provider instances.

LLMProviderFactory

Factory for creating LLM provider instances.

Source code in cli/models/factory.py
class LLMProviderFactory:
    """Factory for creating LLM provider instances."""

    _providers: Dict[str, Type[ILLMProvider]] = {
        "google": GeminiProvider,
        "gemini": GeminiProvider,
        "openai": OpenAIProvider,
        "anthropic": AnthropicProvider,
        "claude": AnthropicProvider,
    }

    _model_patterns = {
        "gemini": "google",
        "gpt": "openai",
        "claude": "anthropic",
    }

    @classmethod
    def create(cls, config: ModelConfig) -> ILLMProvider:
        """Create an LLM provider instance based on configuration."""
        provider_key = config.provider.lower()

        if provider_key in cls._providers:
            return cls._providers[provider_key](config)

        inferred_provider = cls._infer_provider_from_model(config.name)
        if inferred_provider:
            return cls._providers[inferred_provider](config)

        raise ValueError(
            f"Unsupported provider: {config.provider}. "
            f"Available: {', '.join(cls.get_supported_providers())}"
        )

    @classmethod
    def _infer_provider_from_model(cls, model_name: str) -> str | None:
        """Infer provider from model name."""
        model_lower = model_name.lower()
        for pattern, provider in cls._model_patterns.items():
            if pattern in model_lower:
                return provider
        return None

    @classmethod
    def get_supported_providers(cls) -> list[str]:
        """Get list of supported provider names."""
        return list(set(cls._providers.keys()))
create(config) classmethod

Create an LLM provider instance based on configuration.

Source code in cli/models/factory.py
@classmethod
def create(cls, config: ModelConfig) -> ILLMProvider:
    """Create an LLM provider instance based on configuration."""
    provider_key = config.provider.lower()

    if provider_key in cls._providers:
        return cls._providers[provider_key](config)

    inferred_provider = cls._infer_provider_from_model(config.name)
    if inferred_provider:
        return cls._providers[inferred_provider](config)

    raise ValueError(
        f"Unsupported provider: {config.provider}. "
        f"Available: {', '.join(cls.get_supported_providers())}"
    )
get_supported_providers() classmethod

Get list of supported provider names.

Source code in cli/models/factory.py
@classmethod
def get_supported_providers(cls) -> list[str]:
    """Get list of supported provider names."""
    return list(set(cls._providers.keys()))

gemini_provider

Google Gemini provider implementation using LangChain.

GeminiProvider

Bases: BaseLLMProvider

Google Gemini LLM provider.

Source code in cli/models/gemini_provider.py
class GeminiProvider(BaseLLMProvider):
    """Google Gemini LLM provider."""

    def _create_llm(self) -> BaseChatModel:
        kwargs = {
            "model": self.config.name,
            "google_api_key": self.config.api_key,
            "temperature": self.config.temperature,
        }
        if self.config.max_tokens:
            kwargs["max_output_tokens"] = self.config.max_tokens
        return ChatGoogleGenerativeAI(**kwargs)

    def get_provider_name(self) -> str:
        return "Google Gemini"

openai_provider

OpenAI provider implementation using LangChain.

OpenAIProvider

Bases: BaseLLMProvider

OpenAI LLM provider.

Source code in cli/models/openai_provider.py
class OpenAIProvider(BaseLLMProvider):
    """OpenAI LLM provider."""

    def _create_llm(self) -> BaseChatModel:
        kwargs = {
            "model": self.config.name,
            "openai_api_key": self.config.api_key,
            "temperature": self.config.temperature,
        }
        if self.config.max_tokens:
            kwargs["max_tokens"] = self.config.max_tokens
        return ChatOpenAI(**kwargs)

    def get_provider_name(self) -> str:
        return "OpenAI"

prompt

CLI entry point for Vertex-CLI.

main()

Main CLI entry point.

Source code in cli/prompt.py
def main():
    """Main CLI entry point."""
    raw = sys.argv[1:]
    known_cmds = ["chat", "debug", "config", "list", "remove", "select"]

    # Initialize services
    config_manager = ConfigurationManager()
    llm_service = LLMService(config_manager)
    history = ChatHistory(HISTORY_FILE)

    # Setup shortcut
    if raw and raw[0] == "--setup":
        config_manager._create_default_config()
        print("Default configuration created.")
        return

    # Default chat if no subcommand
    if raw and raw[0] not in known_cmds:
        prompt_text = " ".join(raw)
        generate_response(prompt_text, llm_service, history)
        return

    # Subcommand parsing
    parser = argparse.ArgumentParser(
        prog="tex", description="CLI for interacting with multiple LLMs via LangChain"
    )
    parser.add_argument("--setup", action="store_true", help="Create default config")
    subparsers = parser.add_subparsers(dest="command")

    # chat
    chat_parser = subparsers.add_parser("chat", help="Send a prompt to the LLM")
    chat_parser.add_argument("text", nargs="+", help="Prompt text")

    # debug
    debug_parser = subparsers.add_parser("debug", help="Debug recent bash commands")
    debug_parser.add_argument(
        "-n",
        "--number",
        type=int,
        default=DEFAULT_BASH_HISTORY_COUNT,
        help="Number of recent commands",
    )
    debug_parser.add_argument("-p", "--prompt", type=str, help="Additional explanation prompt")

    # config
    config_parser = subparsers.add_parser(
        "config", help="Configure a model (usage: config <model> <api_key>)"
    )
    config_parser.add_argument("model", help="Model name (e.g., gemini-2.5-flash, gpt-4)")
    config_parser.add_argument("key", help="API key")
    config_parser.add_argument(
        "--provider", type=str, help="Provider name (auto-detected if not specified)"
    )
    config_parser.add_argument(
        "--temperature", type=float, default=0.7, help="Temperature (0.0-1.0)"
    )

    # list
    subparsers.add_parser("list", help="List configured models")

    # remove
    remove_parser = subparsers.add_parser("remove", help="Remove a configured model")
    remove_parser.add_argument("model", help="Model name")

    # select
    select_parser = subparsers.add_parser("select", help="Select active model")
    select_parser.add_argument("model", help="Model name")

    args = parser.parse_args(raw)

    if args.setup:
        config_manager._create_default_config()
        print("Default configuration created.")

    elif args.command == "chat":
        prompt_text = " ".join(args.text)
        generate_response(prompt_text, llm_service, history)

    elif args.command == "debug":
        bash = get_bash_history(args.number)
        dprompt = f"{bash}{args.prompt or ''} "
        dprompt += "output what is wrong with the commands used and suggest correct ones"
        generate_response(dprompt, llm_service, history)

    elif args.command == "config":
        # Auto-detect provider if not specified
        provider = args.provider
        if not provider:
            model_lower = args.model.lower()
            if "gemini" in model_lower:
                provider = "google"
            elif "gpt" in model_lower or "openai" in model_lower:
                provider = "openai"
            elif "claude" in model_lower:
                provider = "anthropic"
            else:
                print(f"Could not auto-detect provider for '{args.model}'")
                print("Please specify provider with --provider")
                print("Available: google, openai, anthropic")
                return

        config_manager.set_model_config(
            model_name=args.model,
            provider=provider,
            api_key=args.key,
            temperature=args.temperature,
        )

    elif args.command == "list":
        print("\nConfigured models:")
        print("-" * 60)
        models = config_manager.list_models()
        selected = config_manager.get_selected_model()

        if not models:
            print("No models configured.")
        else:
            for name, config in models.items():
                selected_marker = " [SELECTED]" if name == selected else ""
                api_key_status = "✓" if config.get("api_key") else "✗"
                print(f"{name}{selected_marker}")
                print(f"  Provider: {config.get('provider', 'N/A')}")
                print(f"  API Key: {api_key_status}")
                print(f"  Temperature: {config.get('temperature', 0.7)}")
                print()

    elif args.command == "remove":
        try:
            config_manager.remove_model(args.model)
        except ValueError as e:
            print(f"Error: {e}")

    elif args.command == "select":
        try:
            config_manager.set_selected_model(args.model)
        except ValueError as e:
            print(f"Error: {e}")

    else:
        parser.print_help()

utils

install_requirements()

Install required dependencies.

Source code in cli/utils.py
def install_requirements():
    """Install required dependencies."""
    dependencies = [
        "rich>=14.0.0",
        "langchain>=0.1.0",
        "langchain-google-genai>=1.0.0",
        "langchain-openai>=0.0.5",
        "langchain-anthropic>=0.1.0",
    ]
    for package in dependencies:
        subprocess.run([sys.executable, "-m", "pip", "install", package])

prettify_llm_output(response)

Prettify LLM output using Rich markdown.

Source code in cli/utils.py
def prettify_llm_output(response):
    """Prettify LLM output using Rich markdown."""
    console = Console()
    md = Markdown(response.strip())
    console.print("\n", md, "\n")

spin_loader(stop_event)

Display a spinning loader.

Source code in cli/utils.py
def spin_loader(stop_event):
    """Display a spinning loader."""
    spinner = itertools.cycle(["-", "/", "|", "\\"])
    while not stop_event.is_set():
        sys.stdout.write(next(spinner))
        sys.stdout.flush()
        time.sleep(0.1)
        sys.stdout.write("\b")
    sys.stdout.write(" ")
    sys.stdout.flush()