Skip to content

API Reference

ai_model_manager

AIModelManager

A class to manage AI model configurations stored in a JSON file. Allows adding, removing, selecting models, and generating output via LLM APIs.

Source code in cli/ai_model_manager.py
class AIModelManager:
    """
    A class to manage AI model configurations stored in a JSON file.
    Allows adding, removing, selecting models, and generating output via LLM APIs.
    """

    def __init__(self, file_path=None):
        """
        Initialize the AIModelManager instance.
        Creates the default configuration file if it doesn't exist.

        Args:
            file_path (str, optional): Path to the JSON configuration file.
                If None, defaults to ~/.config/ai_model_manager/models_api.json.
        """
        if file_path is None:
            config_dir = os.path.join(
                os.path.expanduser("~"), ".config", "ai_model_manager"
            )
            os.makedirs(config_dir, exist_ok=True)
            file_path = os.path.join(config_dir, "models_api.json")

        self.file_path = file_path

        if not os.path.exists(self.file_path):
            self.create_default_file()

    def _read_json(self):
        """
        Internal method to read JSON configuration data from file.

        Returns:
            dict: Parsed JSON data or an empty dict if file not found.
        """
        try:
            with open(self.file_path, "r") as f:
                return json.load(f)
        except FileNotFoundError:
            return {}

    def _write_json(self, data):
        """
        Internal method to write JSON data to file.

        Args:
            data (dict): Data to write into the JSON file.
        """
        with open(self.file_path, "w") as f:
            json.dump(data, f, indent=4)

    def create_default_file(self):
        """
        Creates a configuration file with default model entries.
        Includes a pre-set API key for `gemini-1.5-flash`.
        """
        default_config = {
            "selected_model": None,
            "gemini-1.5-flash": "AIzaSyCSXtRAITXfGuarMHI1j-0QyKkoT9mUfz8",
            "gemini-1.5-interactive": None,
            "gemini-1.5-creative": None,
        }
        self._write_json(default_config)
        print(f"Config file created at: {self.file_path}")

    def load(self):
        """
        Loads the current configuration from the JSON file.

        Returns:
            dict: Configuration data including selected model and keys.
        """
        return self._read_json()

    def configure_model(self, model_name, api_key):
        """
        Adds or updates a model entry with a given API key.

        Args:
            model_name (str): The name of the model to configure.
            api_key (str): The API key associated with the model.
        """
        data = self._read_json()
        data[model_name] = api_key
        self._write_json(data)
        print("Model added successfully.")

    def remove_model(self, model_name):
        """
        Removes a model from the configuration.

        Args:
            model_name (str): The name of the model to remove.

        Raises:
            ValueError: If the model is not found in the configuration.
        """
        data = self._read_json()
        if model_name in data:
            del data[model_name]
            self._write_json(data)
            print(f"Model '{model_name}' removed successfully.")
        else:
            raise ValueError(f"Model '{model_name}' not found.")

    def get_api_key(self, model_name):
        """
        Retrieves the API key for a given model.

        Args:
            model_name (str): The name of the model.

        Returns:
            str: The API key associated with the model.

        Raises:
            ValueError: If the model is not available or lacks an API key.
        """
        data = self._read_json()
        if model_name in data and data[model_name]:
            return data[model_name]
        raise ValueError(f"Model '{model_name}' is not available or has no API key.")

    def list_models(self):
        """
        Prints the list of models and their corresponding API keys (if available).
        """
        data = self._read_json()
        if data:
            for model, key in data.items():
                print(f"{model}: {key}")
        else:
            print("No models found.")

    def select_model(self, model_name):
        """
        Sets the specified model as the default selected model.

        Args:
            model_name (str): The name of the model to select.

        Prints a message based on whether the selection was successful.
        """
        data = self._read_json()
        if model_name in data and data[model_name]:
            data["selected_model"] = model_name
            self._write_json(data)
            print(f"Selected model: {model_name}")
        else:
            print("No models found or no API key for that model.")

    def generate_output(self, model_name, prompt_by_user):
        """
        Generates LLM output using the specified model and user prompt.

        This method displays a spinner while waiting for a response from the model.

        Args:
            model_name (str): The name of the model to use.
            prompt_by_user (str): The prompt to send to the model.

        Returns:
            str: The model's output.
        """
        stop_spinner = threading.Event()
        spinner_thread = threading.Thread(target=spin_loader, args=(stop_spinner,))
        spinner_thread.start()

        output = gemini_api_output(model_name, prompt_by_user)

        stop_spinner.set()
        spinner_thread.join()

        return output

__init__(file_path=None)

Initialize the AIModelManager instance. Creates the default configuration file if it doesn't exist.

Parameters:

Name Type Description Default
file_path str

Path to the JSON configuration file. If None, defaults to ~/.config/ai_model_manager/models_api.json.

None
Source code in cli/ai_model_manager.py
def __init__(self, file_path=None):
    """
    Initialize the AIModelManager instance.
    Creates the default configuration file if it doesn't exist.

    Args:
        file_path (str, optional): Path to the JSON configuration file.
            If None, defaults to ~/.config/ai_model_manager/models_api.json.
    """
    if file_path is None:
        config_dir = os.path.join(
            os.path.expanduser("~"), ".config", "ai_model_manager"
        )
        os.makedirs(config_dir, exist_ok=True)
        file_path = os.path.join(config_dir, "models_api.json")

    self.file_path = file_path

    if not os.path.exists(self.file_path):
        self.create_default_file()

configure_model(model_name, api_key)

Adds or updates a model entry with a given API key.

Parameters:

Name Type Description Default
model_name str

The name of the model to configure.

required
api_key str

The API key associated with the model.

required
Source code in cli/ai_model_manager.py
def configure_model(self, model_name, api_key):
    """
    Adds or updates a model entry with a given API key.

    Args:
        model_name (str): The name of the model to configure.
        api_key (str): The API key associated with the model.
    """
    data = self._read_json()
    data[model_name] = api_key
    self._write_json(data)
    print("Model added successfully.")

create_default_file()

Creates a configuration file with default model entries. Includes a pre-set API key for gemini-1.5-flash.

Source code in cli/ai_model_manager.py
def create_default_file(self):
    """
    Creates a configuration file with default model entries.
    Includes a pre-set API key for `gemini-1.5-flash`.
    """
    default_config = {
        "selected_model": None,
        "gemini-1.5-flash": "AIzaSyCSXtRAITXfGuarMHI1j-0QyKkoT9mUfz8",
        "gemini-1.5-interactive": None,
        "gemini-1.5-creative": None,
    }
    self._write_json(default_config)
    print(f"Config file created at: {self.file_path}")

generate_output(model_name, prompt_by_user)

Generates LLM output using the specified model and user prompt.

This method displays a spinner while waiting for a response from the model.

Parameters:

Name Type Description Default
model_name str

The name of the model to use.

required
prompt_by_user str

The prompt to send to the model.

required

Returns:

Name Type Description
str

The model's output.

Source code in cli/ai_model_manager.py
def generate_output(self, model_name, prompt_by_user):
    """
    Generates LLM output using the specified model and user prompt.

    This method displays a spinner while waiting for a response from the model.

    Args:
        model_name (str): The name of the model to use.
        prompt_by_user (str): The prompt to send to the model.

    Returns:
        str: The model's output.
    """
    stop_spinner = threading.Event()
    spinner_thread = threading.Thread(target=spin_loader, args=(stop_spinner,))
    spinner_thread.start()

    output = gemini_api_output(model_name, prompt_by_user)

    stop_spinner.set()
    spinner_thread.join()

    return output

get_api_key(model_name)

Retrieves the API key for a given model.

Parameters:

Name Type Description Default
model_name str

The name of the model.

required

Returns:

Name Type Description
str

The API key associated with the model.

Raises:

Type Description
ValueError

If the model is not available or lacks an API key.

Source code in cli/ai_model_manager.py
def get_api_key(self, model_name):
    """
    Retrieves the API key for a given model.

    Args:
        model_name (str): The name of the model.

    Returns:
        str: The API key associated with the model.

    Raises:
        ValueError: If the model is not available or lacks an API key.
    """
    data = self._read_json()
    if model_name in data and data[model_name]:
        return data[model_name]
    raise ValueError(f"Model '{model_name}' is not available or has no API key.")

list_models()

Prints the list of models and their corresponding API keys (if available).

Source code in cli/ai_model_manager.py
def list_models(self):
    """
    Prints the list of models and their corresponding API keys (if available).
    """
    data = self._read_json()
    if data:
        for model, key in data.items():
            print(f"{model}: {key}")
    else:
        print("No models found.")

load()

Loads the current configuration from the JSON file.

Returns:

Name Type Description
dict

Configuration data including selected model and keys.

Source code in cli/ai_model_manager.py
def load(self):
    """
    Loads the current configuration from the JSON file.

    Returns:
        dict: Configuration data including selected model and keys.
    """
    return self._read_json()

remove_model(model_name)

Removes a model from the configuration.

Parameters:

Name Type Description Default
model_name str

The name of the model to remove.

required

Raises:

Type Description
ValueError

If the model is not found in the configuration.

Source code in cli/ai_model_manager.py
def remove_model(self, model_name):
    """
    Removes a model from the configuration.

    Args:
        model_name (str): The name of the model to remove.

    Raises:
        ValueError: If the model is not found in the configuration.
    """
    data = self._read_json()
    if model_name in data:
        del data[model_name]
        self._write_json(data)
        print(f"Model '{model_name}' removed successfully.")
    else:
        raise ValueError(f"Model '{model_name}' not found.")

select_model(model_name)

Sets the specified model as the default selected model.

Parameters:

Name Type Description Default
model_name str

The name of the model to select.

required

Prints a message based on whether the selection was successful.

Source code in cli/ai_model_manager.py
def select_model(self, model_name):
    """
    Sets the specified model as the default selected model.

    Args:
        model_name (str): The name of the model to select.

    Prints a message based on whether the selection was successful.
    """
    data = self._read_json()
    if model_name in data and data[model_name]:
        data["selected_model"] = model_name
        self._write_json(data)
        print(f"Selected model: {model_name}")
    else:
        print("No models found or no API key for that model.")

llm

gemini_api_output(model_name, prompt_by_user)

Generate AI response using specified model.

Parameters:

Name Type Description Default
model_name str

Name of the AI model to use.

required
prompt_by_user str

User's input prompt.

required

Returns:

Name Type Description
str

Generated response text.

Source code in cli/llm.py
def gemini_api_output(model_name, prompt_by_user):
    """
    Generate AI response using specified model.

    Args:
        model_name (str): Name of the AI model to use.
        prompt_by_user (str): User's input prompt.

    Returns:
        str: Generated response text.
    """
    from cli.ai_model_manager import AIModelManager

    manager = AIModelManager()
    api_key = manager.get_api_key(model_name)

    client = genai.Client(api_key=api_key)
    response = client.models.generate_content(model=model_name, contents=prompt_by_user)

    return response.text

prettify_llm_output

prettify_llm_output(response)

Prettifies the output from a language model response by stripping leading and trailing whitespace and code block markers, then prints it as Markdown to the console.

Parameters:

Name Type Description Default
response str

The raw response from the language model.

required

Returns:

Type Description

None

Source code in cli/prettify_llm_output.py
def prettify_llm_output(response):
    """
    Prettifies the output from a language model response by stripping leading
    and trailing whitespace and code block markers, then prints it as Markdown
    to the console.

    Args:
        response (str): The raw response from the language model.

    Returns:
        None
    """
    markdown_output = response.strip().strip("```")
    console = Console()
    md = Markdown(markdown_output)
    print()
    console.print(md)
    print()

prompt

debug_last_command_line_prompt(prompt_by_user, all_input_flags)

Analyzes and debugs the last few command-line commands using the LLM.

Parameters:

Name Type Description Default
prompt_by_user str or None

Optional user prompt to append.

required
all_input_flags list

List of input flags provided in the CLI.

required
Source code in cli/prompt.py
def debug_last_command_line_prompt(prompt_by_user, all_input_flags):
    """
    Analyzes and debugs the last few command-line commands using the LLM.

    Args:
        prompt_by_user (str or None): Optional user prompt to append.
        all_input_flags (list): List of input flags provided in the CLI.
    """
    last_number_of_commands = (
        int(all_input_flags[2]) if len(all_input_flags) == 3 else 3
    )
    if prompt_by_user:
        prompt_by_vertex = (
            last_command_line_prompt(last_number_of_commands)
            + prompt_by_user
            + " basically output what is wrong with the commands used and suggest right ones"
        )
    else:
        prompt_by_vertex = (
            last_command_line_prompt(last_number_of_commands)
            + " output what is wrong with the commands used and suggest right ones, don’t explain about tex command"
        )
    print("Prompt by vertex:", prompt_by_vertex)
    print()
    prompt_for_llm(prompt_by_vertex)

handle_all_quries()

Main logic to handle prompt input, debugging, or flag-based commands from the CLI.

Source code in cli/prompt.py
def handle_all_quries():
    """
    Main logic to handle prompt input, debugging, or flag-based commands from the CLI.
    """
    prompt_by_user, all_input_flags = user_command_line_prompt()
    if prompt_by_user:
        prompt_for_llm(prompt_by_user)
    elif len(all_input_flags) > 1 and all_input_flags[1] == "debug":
        debug_last_command_line_prompt(prompt_by_user, all_input_flags)
    handle_input_flags(all_input_flags)

handle_input_flags(all_input_flags)

Handles input flags such as configuring, removing, selecting, or listing models.

Parameters:

Name Type Description Default
all_input_flags list

List of input flags from the CLI.

required
Source code in cli/prompt.py
def handle_input_flags(all_input_flags):
    """
    Handles input flags such as configuring, removing, selecting, or listing models.

    Args:
        all_input_flags (list): List of input flags from the CLI.
    """
    if all_input_flags:
        if not all_input_flags[0] == "":
            print(
                "Prompt should be quoted in double quotes, and the flags must be spaced out"
            )

        for flag in all_input_flags:
            flags_list = flag.split(" ")
            if flag.startswith("config"):
                manager.configure_model(flags_list[1], flags_list[2])
                print(
                    f"Configured model: {flags_list[1]} with API key: {flags_list[2]}"
                )
            elif flag == "list":
                print("Listing all models:")
                manager.list_models()
            elif flag.startswith("remove"):
                print("Removing model:", flags_list[1])
                manager.remove_model(flags_list[1])
            elif flag.startswith("select"):
                manager.select_model(flags_list[1])
            elif flag == "help":
                print("Usage: python3 main.py <prompt>")
                print("Example: python3 main.py 'How are you?'")
                print("Flags are: --config <model_name> <api_key>, remove <model_name>")
                print()

last_command_line_prompt(last_number_of_commands)

Retrieves the last N commands from the user's bash history.

Parameters:

Name Type Description Default
last_number_of_commands int

Number of recent commands to retrieve.

required

Returns:

Name Type Description
str

The last N commands as a single string.

Source code in cli/prompt.py
def last_command_line_prompt(last_number_of_commands):
    """
    Retrieves the last N commands from the user's bash history.

    Args:
        last_number_of_commands (int): Number of recent commands to retrieve.

    Returns:
        str: The last N commands as a single string.
    """
    history_file = os.path.expanduser("~/.bash_history")
    with open(history_file, "r") as file:
        history_lines = file.readlines()
    last_commands = history_lines[-last_number_of_commands:]
    return "".join(last_commands)

main()

Entry point for the CLI tool. Initializes config file or processes CLI inputs.

Source code in cli/prompt.py
def main():
    """
    Entry point for the CLI tool. Initializes config file or processes CLI inputs.
    """
    if len(sys.argv) > 1 and sys.argv[1] == "--setup":
        manager.create_default_file()
    else:
        handle_all_quries()

prompt_for_llm(prompt_for_llm)

Sends a prompt to the selected LLM model and prints the response.

Parameters:

Name Type Description Default
prompt_for_llm str

The prompt to send to the model.

required
Source code in cli/prompt.py
def prompt_for_llm(prompt_for_llm):
    """
    Sends a prompt to the selected LLM model and prints the response.

    Args:
        prompt_for_llm (str): The prompt to send to the model.
    """
    prompt_for_llm += " give response in short form, if asked for commands then give commands and dont explain too much"
    models_api_dict = manager.load()
    model_name = models_api_dict["selected_model"] or "gemini-1.5-flash"
    response = manager.generate_output(model_name, prompt_for_llm)
    prettify_llm_output(response)

user_command_line_prompt()

Parses command-line arguments to separate the user's prompt and any additional flags.

Returns:

Name Type Description
tuple

A tuple containing the user's prompt (str or None) and a list of input flags.

Source code in cli/prompt.py
def user_command_line_prompt():
    """
    Parses command-line arguments to separate the user's prompt and any additional flags.

    Returns:
        tuple: A tuple containing the user's prompt (str or None) and a list of input flags.
    """
    args = [x for x in sys.argv]

    if len(args) > 1 and not args[1].startswith("--"):
        prompt_by_user = args[1]
        entire_cmd_command = " ".join(args[2:])
    else:
        prompt_by_user = None
        entire_cmd_command = " ".join(args[1:])

    all_input_flags = entire_cmd_command.split("--")
    all_input_flags = [x.strip() for x in all_input_flags]

    return prompt_by_user, all_input_flags

utils

install_requirements()

Installs the required dependencies for the application.

Source code in cli/utils.py
def install_requirements():
    """
    Installs the required dependencies for the application.
    """

    dependencies = ["rich==14.0.0", "google-genai"]
    for package in dependencies:
        subprocess.run([sys.executable, "-m", "pip", "install", package])

spin_loader(stop_event)

Displays a spinning loader in the console until the stop_event is set.

Parameters:

Name Type Description Default
stop_event Event

An event object used to signal the loader to stop.

required
Source code in cli/utils.py
def spin_loader(stop_event):
    """
    Displays a spinning loader in the console until the stop_event is set.

    Args:
        stop_event (threading.Event): An event object used to signal the loader to stop.
    """
    spinner = itertools.cycle(["-", "/", "|", "\\"])
    while not stop_event.is_set():
        sys.stdout.write(next(spinner))
        sys.stdout.flush()
        time.sleep(0.1)
        sys.stdout.write("\b")
    sys.stdout.write(" ")
    sys.stdout.flush()