diff --git a/docs/cli_reference.md b/docs/cli_reference.md index 20c06029..d3e403b7 100644 --- a/docs/cli_reference.md +++ b/docs/cli_reference.md @@ -1,252 +1,882 @@ # CLI Reference -You may use the `llama-stack-client` to query information about the distribution. +Welcome to the llama-stack-client CLI - a command-line interface for interacting with Llama Stack -#### `llama-stack-client` -```bash -$ llama-stack-client -h +``` +Usage: llama-stack-client [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--version**: Show the version and exit. [default: False] + +* **--endpoint**: Llama Stack distribution endpoint [default: ] + +* **--api-key**: Llama Stack distribution API key [default: ] + +* **--config**: Path to config file + +**Commands** + +* **configure**: Configure Llama Stack Client CLI. + +* **datasets**: Manage datasets. + +* **eval**: Run evaluation tasks. + +* **eval_tasks**: Manage evaluation tasks. + +* **inference**: Inference (chat). -usage: llama-stack-client [-h] {models,memory_banks,shields} ... +* **inspect**: Inspect server configuration. -Welcome to the LlamaStackClient CLI +* **models**: Manage GenAI models. -options: - -h, --help show this help message and exit +* **post_training**: Post-training. + +* **providers**: Manage API providers. + +* **scoring_functions**: Manage scoring functions. + +* **shields**: Manage safety shield services. + +* **toolgroups**: Manage available tool groups. + +* **vector_dbs**: Manage vector databases. + + + +## configure + +Configure Llama Stack Client CLI. -subcommands: - {models,memory_banks,shields} ``` +Usage: llama-stack-client configure [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--endpoint**: Llama Stack distribution endpoint [default: ] + +* **--api-key**: Llama Stack distribution API key [default: ] + + + +## datasets + +Manage datasets. -#### `llama-stack-client configure` -```bash -$ llama-stack-client configure -> Enter the host name of the Llama Stack distribution server: localhost -> Enter the port number of the Llama Stack distribution server: 5000 -Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:5000 ``` +Usage: llama-stack-client datasets [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] +**Commands** -#### `llama-stack-client providers list` -```bash -$ llama-stack-client providers list +* **list**: Show available datasets on distribution... + +* **register**: Create a new dataset + + + +### list + +Show available datasets on distribution endpoint + +``` +Usage: llama-stack-client datasets list [OPTIONS] ``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Create a new dataset + ``` -+-----------+----------------+-----------------+ -| API | Provider ID | Provider Type | -+===========+================+=================+ -| scoring | meta0 | meta-reference | -+-----------+----------------+-----------------+ -| datasetio | meta0 | meta-reference | -+-----------+----------------+-----------------+ -| inference | tgi0 | remote::tgi | -+-----------+----------------+-----------------+ -| memory | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| agents | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| telemetry | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| safety | meta-reference | meta-reference | -+-----------+----------------+-----------------+ +Usage: llama-stack-client datasets register [OPTIONS] ``` -#### `llama-stack-client models list` -```bash -$ llama-stack-client models list +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--dataset-id**: Id of the dataset + +* **--provider-id**: Provider ID for the dataset + +* **--provider-dataset-id**: Provider's dataset ID + +* **--metadata**: Metadata of the dataset + +* **--url**: URL of the dataset + +* **--dataset-path**: Local file path to the dataset. If specified, upload dataset via URL + +* **--schema**: JSON schema of the dataset + + + +## eval + +Run evaluation tasks. + +``` +Usage: llama-stack-client eval [OPTIONS] COMMAND [ARGS]... ``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **run-benchmark**: Run a evaluation benchmark task + +* **run-scoring**: Run scoring from application datasets + + + +### run-benchmark + +Run a evaluation benchmark task + ``` -+----------------------+----------------------+---------------+----------------------------------------------------------+ -| identifier | llama_model | provider_id | metadata | -+======================+======================+===============+==========================================================+ -| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | tgi0 | {'huggingface_repo': 'meta-llama/Llama-3.1-8B-Instruct'} | -+----------------------+----------------------+---------------+----------------------------------------------------------+ +Usage: llama-stack-client eval run-benchmark [OPTIONS] BENCHMARK_IDS... ``` -#### `llama-stack-client models get` -```bash -$ llama-stack-client models get Llama3.1-8B-Instruct +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--model-id**: model id to run the benchmark eval on + +* **--output-dir**: Path to the dump eval results output directory + +* **--num-examples**: Number of examples to evaluate on, useful for debugging + +* **--temperature**: temperature in the sampling params to run generation [default: 0.0] + +* **--max-tokens**: max-tokens in the sampling params to run generation [default: 4096] + +* **--top-p**: top-p in the sampling params to run generation [default: 0.9] + +* **--repeat-penalty**: repeat-penalty in the sampling params to run generation [default: 1.0] + +* **--visualize**: Visualize evaluation results after completion [default: False] + +**Arguments** + +* **BENCHMARK_IDS** + + + +### run-scoring + +Run scoring from application datasets + +``` +Usage: llama-stack-client eval run-scoring [OPTIONS] SCORING_FUNCTION_IDS... ``` +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--dataset-id**: Pre-registered dataset_id to score (from llama-stack-client datasets list) + +* **--dataset-path**: Path to the dataset file to score + +* **--scoring-params-config**: Path to the scoring params config file in JSON format + +* **--num-examples**: Number of examples to evaluate on, useful for debugging + +* **--output-dir**: Path to the dump eval results output directory + +* **--visualize**: Visualize evaluation results after completion [default: False] + +**Arguments** + +* **SCORING_FUNCTION_IDS** + + + +## eval-tasks + +Manage evaluation tasks. + ``` -+----------------------+----------------------+----------------------------------------------------------+---------------+ -| identifier | llama_model | metadata | provider_id | -+======================+======================+==========================================================+===============+ -| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | {'huggingface_repo': 'meta-llama/Llama-3.1-8B-Instruct'} | tgi0 | -+----------------------+----------------------+----------------------------------------------------------+---------------+ +Usage: llama-stack-client eval-tasks [OPTIONS] COMMAND [ARGS]... ``` +**Options** + +* **-h, --help**: Show this message and exit. [default: False] -```bash -$ llama-stack-client models get Random-Model +**Commands** -Model RandomModel is not found at distribution endpoint host:port. Please ensure endpoint is serving specified model. +* **list**: Show available eval tasks on distribution... + +* **register**: Register a new eval task + + + +### list + +Show available eval tasks on distribution endpoint + +``` +Usage: llama-stack-client eval-tasks list [OPTIONS] ``` -#### `llama-stack-client models register` +**Options** -```bash -$ llama-stack-client models register [--provider-id ] [--provider-model-id ] [--metadata ] +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Register a new eval task + +``` +Usage: llama-stack-client eval-tasks register [OPTIONS] ``` -#### `llama-stack-client models update` +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--eval-task-id**: ID of the eval task + +* **--dataset-id**: ID of the dataset to evaluate + +* **--scoring-functions**: Scoring functions to use for evaluation + +* **--provider-id**: Provider ID for the eval task + +* **--provider-eval-task-id**: Provider's eval task ID + +* **--metadata**: Metadata for the eval task in JSON format + -```bash -$ llama-stack-client models update [--provider-id ] [--provider-model-id ] [--metadata ] + +## inference + +Inference (chat). + +``` +Usage: llama-stack-client inference [OPTIONS] COMMAND [ARGS]... ``` -#### `llama-stack-client models delete` +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **chat-completion**: Show available inference chat completion... + + + +### chat-completion + +Show available inference chat completion endpoints on distribution endpoint -```bash -$ llama-stack-client models delete ``` +Usage: llama-stack-client inference chat-completion [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] -#### `llama-stack-client vector_dbs list` -```bash -$ llama-stack-client vector_dbs list +* **--message**: Message + +* **--stream**: Streaming [default: False] + +* **--session**: Start a Chat Session [default: False] + +* **--model-id**: Model ID + + + +## inspect + +Inspect server configuration. + +``` +Usage: llama-stack-client inspect [OPTIONS] COMMAND [ARGS]... ``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **version**: Show available providers on distribution... + + + +### version + +Show available providers on distribution endpoint + ``` -+--------------+----------------+---------------------+---------------+------------------------+ -| identifier | provider_id | provider_resource_id| vector_db_type| params | -+==============+================+=====================+===============+========================+ -| test_bank | meta-reference | test_bank | vector | embedding_model: all-MiniLM-L6-v2 - embedding_dimension: 384| -+--------------+----------------+---------------------+---------------+------------------------+ +Usage: llama-stack-client inspect version [OPTIONS] ``` -#### `llama-stack-client vector_dbs register` -```bash -$ llama-stack-client vector_dbs register [--provider-id ] [--provider-vector-db-id ] [--embedding-model ] [--embedding-dimension ] +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +## models + +Manage GenAI models. + +``` +Usage: llama-stack-client models [OPTIONS] COMMAND [ARGS]... ``` -Options: -- `--provider-id`: Optional. Provider ID for the vector db -- `--provider-vector-db-id`: Optional. Provider's vector db ID -- `--embedding-model`: Optional. Embedding model to use. Default: "all-MiniLM-L6-v2" -- `--embedding-dimension`: Optional. Dimension of embeddings. Default: 384 +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **get**: Show available llama models at distribution... + +* **list**: Show available llama models at distribution... + +* **register**: Register a new model at distribution endpoint + +* **unregister**: Unregister a model from distribution endpoint + + + +### get + +Show available llama models at distribution endpoint -#### `llama-stack-client vector_dbs unregister` -```bash -$ llama-stack-client vector_dbs unregister ``` +Usage: llama-stack-client models get [OPTIONS] MODEL_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Arguments** + +* **MODEL_ID** + + + +### list + +Show available llama models at distribution endpoint -#### `llama-stack-client shields list` -```bash -$ llama-stack-client shields list ``` +Usage: llama-stack-client models list [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Register a new model at distribution endpoint ``` -+--------------+----------+----------------+-------------+ -| identifier | params | provider_id | type | -+==============+==========+================+=============+ -| llama_guard | {} | meta-reference | llama_guard | -+--------------+----------+----------------+-------------+ +Usage: llama-stack-client models register [OPTIONS] MODEL_ID ``` -#### `llama-stack-client shields register` -```bash -$ llama-stack-client shields register --shield-id [--provider-id ] [--provider-shield-id ] [--params ] +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--provider-id**: Provider ID for the model + +* **--provider-model-id**: Provider's model ID + +* **--metadata**: JSON metadata for the model + +**Arguments** + +* **MODEL_ID** + + + +### unregister + +Unregister a model from distribution endpoint + +``` +Usage: llama-stack-client models unregister [OPTIONS] MODEL_ID ``` -Options: -- `--shield-id`: Required. ID of the shield -- `--provider-id`: Optional. Provider ID for the shield -- `--provider-shield-id`: Optional. Provider's shield ID -- `--params`: Optional. JSON configuration parameters for the shield +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Arguments** + +* **MODEL_ID** + + + +## post-training + +Post-training. -#### `llama-stack-client eval_tasks list` -```bash -$ llama-stack-client eval_tasks list ``` +Usage: llama-stack-client post-training [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **artifacts**: Get the training artifacts of a specific post... + +* **cancel**: Cancel the training job + +* **list**: Show the list of available post training jobs -#### `llama-stack-client eval_tasks register` -```bash -$ llama-stack-client eval_tasks register --eval-task-id --dataset-id --scoring-functions [ ...] [--provider-id ] [--provider-eval-task-id ] [--metadata ] +* **status**: Show the status of a specific post training... + +* **supervised_fine_tune**: Kick off a supervised fine tune job + + + +### artifacts + +Get the training artifacts of a specific post training job + +``` +Usage: llama-stack-client post-training artifacts [OPTIONS] ``` -Options: -- `--eval-task-id`: Required. ID of the eval task -- `--dataset-id`: Required. ID of the dataset to evaluate -- `--scoring-functions`: Required. One or more scoring functions to use for evaluation -- `--provider-id`: Optional. Provider ID for the eval task -- `--provider-eval-task-id`: Optional. Provider's eval task ID -- `--metadata`: Optional. Metadata for the eval task in JSON format +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--job-uuid**: Job UUID + + + +### cancel + +Cancel the training job + +``` +Usage: llama-stack-client post-training cancel [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--job-uuid**: Job UUID + + + +### list + +Show the list of available post training jobs + +``` +Usage: llama-stack-client post-training list [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### status + +Show the status of a specific post training job + +``` +Usage: llama-stack-client post-training status [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--job-uuid**: Job UUID + + + +### supervised_fine_tune + +Kick off a supervised fine tune job + +``` +Usage: llama-stack-client post-training supervised_fine_tune + [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--job-uuid**: Job UUID + +* **--model**: Model ID + +* **--algorithm-config**: Algorithm Config + +* **--training-config**: Training Config + +* **--checkpoint-dir**: Checkpoint Config + + + +## providers + +Manage API providers. -#### `llama-stack-client eval run-benchmark` -```bash -$ llama-stack-client eval run-benchmark [ ...] --eval-task-config --output-dir [--num-examples ] [--visualize] ``` +Usage: llama-stack-client providers [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **list**: Show available providers on distribution... + + -Options: -- `--eval-task-config`: Required. Path to the eval task config file in JSON format -- `--output-dir`: Required. Path to the directory where evaluation results will be saved -- `--num-examples`: Optional. Number of examples to evaluate (useful for debugging) -- `--visualize`: Optional flag. If set, visualizes evaluation results after completion +### list -Example eval_benchmark_config.json: -```json -{ - "type": "benchmark", - "eval_candidate": { - "type": "model", - "model": "Llama3.1-405B-Instruct", - "sampling_params": { - "strategy": "greedy", - "temperature": 0, - "top_p": 0.95, - "top_k": 0, - "max_tokens": 0, - "repetition_penalty": 1.0 - } - } -} +Show available providers on distribution endpoint + +``` +Usage: llama-stack-client providers list [OPTIONS] ``` -#### `llama-stack-client eval run-scoring` -```bash -$ llama-stack-client eval run-scoring --eval-task-config --output-dir [--num-examples ] [--visualize] +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +## scoring-functions + +Manage scoring functions. + +``` +Usage: llama-stack-client scoring-functions [OPTIONS] COMMAND [ARGS]... ``` -Options: -- `--eval-task-config`: Required. Path to the eval task config file in JSON format -- `--output-dir`: Required. Path to the directory where scoring results will be saved -- `--num-examples`: Optional. Number of examples to evaluate (useful for debugging) -- `--visualize`: Optional flag. If set, visualizes scoring results after completion +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **list**: Show available scoring functions on... + +* **register**: Register a new scoring function + -#### `llama-stack-client toolgroups list` -```bash -$ llama-stack-client toolgroups list + +### list + +Show available scoring functions on distribution endpoint + +``` +Usage: llama-stack-client scoring-functions list [OPTIONS] ``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Register a new scoring function + ``` -+---------------------------+------------------+------+---------------+ -| identifier | provider_id | args | mcp_endpoint | -+===========================+==================+======+===============+ -| builtin::code_interpreter | code-interpreter | None | None | -+---------------------------+------------------+------+---------------+ -| builtin::rag | rag-runtime | None | None | -+---------------------------+------------------+------+---------------+ -| builtin::websearch | tavily-search | None | None | -+---------------------------+------------------+------+---------------+ +Usage: llama-stack-client scoring-functions register [OPTIONS] ``` -#### `llama-stack-client toolgroups get` -```bash -$ llama-stack-client toolgroups get +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--scoring-fn-id**: Id of the scoring function + +* **--description**: Description of the scoring function + +* **--return-type**: Return type of the scoring function + +* **--provider-id**: Provider ID for the scoring function + +* **--provider-scoring-fn-id**: Provider's scoring function ID + +* **--params**: Parameters for the scoring function in JSON format + + + +## shields + +Manage safety shield services. + ``` +Usage: llama-stack-client shields [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **list**: Show available safety shields on distribution... -Shows detailed information about a specific toolgroup. If the toolgroup is not found, displays an error message. +* **register**: Register a new safety shield + + + +### list + +Show available safety shields on distribution endpoint -#### `llama-stack-client toolgroups register` -```bash -$ llama-stack-client toolgroups register [--provider-id ] [--provider-toolgroup-id ] [--mcp-config ] [--args ] ``` +Usage: llama-stack-client shields list [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] -Options: -- `--provider-id`: Optional. Provider ID for the toolgroup -- `--provider-toolgroup-id`: Optional. Provider's toolgroup ID -- `--mcp-config`: Optional. JSON configuration for the MCP endpoint -- `--args`: Optional. JSON arguments for the toolgroup -#### `llama-stack-client toolgroups unregister` -```bash -$ llama-stack-client toolgroups unregister + +### register + +Register a new safety shield + +``` +Usage: llama-stack-client shields register [OPTIONS] ``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--shield-id**: Id of the shield + +* **--provider-id**: Provider ID for the shield + +* **--provider-shield-id**: Provider's shield ID + +* **--params**: JSON configuration parameters for the shield + + + +## toolgroups + +Manage available tool groups. + +``` +Usage: llama-stack-client toolgroups [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **get**: Show available llama toolgroups at... + +* **list**: Show available llama toolgroups at... + +* **register**: Register a new toolgroup at distribution... + +* **unregister**: Unregister a toolgroup from distribution... + + + +### get + +Show available llama toolgroups at distribution endpoint + +``` +Usage: llama-stack-client toolgroups get [OPTIONS] TOOLGROUP_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Arguments** + +* **TOOLGROUP_ID** + + + +### list + +Show available llama toolgroups at distribution endpoint + +``` +Usage: llama-stack-client toolgroups list [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Register a new toolgroup at distribution endpoint + +``` +Usage: llama-stack-client toolgroups register [OPTIONS] TOOLGROUP_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--provider-id**: Provider ID for the toolgroup + +* **--provider-toolgroup-id**: Provider's toolgroup ID + +* **--mcp-config**: JSON mcp_config for the toolgroup + +* **--args**: JSON args for the toolgroup + +**Arguments** + +* **TOOLGROUP_ID** + + + +### unregister + +Unregister a toolgroup from distribution endpoint + +``` +Usage: llama-stack-client toolgroups unregister [OPTIONS] TOOLGROUP_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Arguments** + +* **TOOLGROUP_ID** + + + +## vector-dbs + +Manage vector databases. + +``` +Usage: llama-stack-client vector-dbs [OPTIONS] COMMAND [ARGS]... +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Commands** + +* **list**: Show available vector dbs on distribution... + +* **register**: Create a new vector db + +* **unregister**: Delete a vector db + + + +### list + +Show available vector dbs on distribution endpoint + +``` +Usage: llama-stack-client vector-dbs list [OPTIONS] +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + + + +### register + +Create a new vector db + +``` +Usage: llama-stack-client vector-dbs register [OPTIONS] VECTOR_DB_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +* **--provider-id**: Provider ID for the vector db + +* **--provider-vector-db-id**: Provider's vector db ID + +* **--embedding-model**: Embedding model (for vector type) [default: all-MiniLM-L6-v2] + +* **--embedding-dimension**: Embedding dimension (for vector type) [default: 384] + +**Arguments** + +* **VECTOR_DB_ID** + + + +### unregister + +Delete a vector db + +``` +Usage: llama-stack-client vector-dbs unregister [OPTIONS] VECTOR_DB_ID +``` + +**Options** + +* **-h, --help**: Show this message and exit. [default: False] + +**Arguments** + +* **VECTOR_DB_ID** diff --git a/scripts/gen_cli_doc.py b/scripts/gen_cli_doc.py new file mode 100644 index 00000000..859b88d2 --- /dev/null +++ b/scripts/gen_cli_doc.py @@ -0,0 +1,82 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import os +from pathlib import Path + +import click +from llama_stack_client.lib.cli.llama_stack_client import llama_stack_client + + +def generate_markdown_docs(command, parent=None, level=1): + """Generate markdown documentation for a click command.""" + ctx = click.Context(command, info_name=command.name, parent=parent) + + # Start with the command name as a header + prefix = "#" * level + if level == 1: + doc = [f"{prefix} CLI Reference\n"] + else: + doc = [f"{prefix} {command.name}\n"] + + # Add command help docstring + if command.help: + doc.append(f"{command.help}\n") + + # Add usage + doc.append(f"```\n{command.get_usage(ctx)}\n```\n") + + # Add options if present + has_options = False + for param in command.get_params(ctx): + if isinstance(param, click.Option): + if not has_options: + doc.append("**Options**\n") + has_options = True + opts = ", ".join(param.opts) + help_text = param.help or "" + default = f" [default: {param.default}]" if param.default is not None else "" + doc.append(f"* **{opts}**: {help_text}{default}\n") + + # Add arguments if present + has_arguments = False + for param in command.get_params(ctx): + if isinstance(param, click.Argument): + if not has_arguments: + doc.append("**Arguments**\n") + has_arguments = True + name = param.name.upper() + doc.append(f"* **{name}**\n") + + # If this is a group with commands, add subcommands + if isinstance(command, click.Group): + doc.append("**Commands**\n") + for cmd_name in command.list_commands(ctx): + cmd = command.get_command(ctx, cmd_name) + cmd_help = cmd.get_short_help_str() if cmd else "" + doc.append(f"* **{cmd_name}**: {cmd_help}\n") + + # Add detailed subcommand documentation + for cmd_name in command.list_commands(ctx): + cmd = command.get_command(ctx, cmd_name) + if cmd: + doc.append("\n") + doc.extend(generate_markdown_docs(cmd, ctx, level + 1)) + + return doc + + +if __name__ == "__main__": + # Generate the docs + markdown_lines = generate_markdown_docs(llama_stack_client) + markdown = "\n".join(markdown_lines) + + # Write to file + file_path = Path(__file__).parent.parent / "docs" / "cli_reference.md" + with open(file_path, "w") as f: + f.write(markdown) + + print(f"Documentation generated in {file_path}") diff --git a/src/llama_stack_client/lib/cli/llama_stack_client.py b/src/llama_stack_client/lib/cli/llama_stack_client.py index d2b86528..54c46aaa 100644 --- a/src/llama_stack_client/lib/cli/llama_stack_client.py +++ b/src/llama_stack_client/lib/cli/llama_stack_client.py @@ -35,8 +35,8 @@ @click.option("--api-key", type=str, help="Llama Stack distribution API key", default="") @click.option("--config", type=str, help="Path to config file", default=None) @click.pass_context -def cli(ctx, endpoint: str, api_key: str, config: str | None): - """Welcome to the LlamaStackClient CLI""" +def llama_stack_client(ctx, endpoint: str, api_key: str, config: str | None): + """Welcome to the llama-stack-client CLI - a command-line interface for interacting with Llama Stack""" ctx.ensure_object(dict) # If no config provided, check default location @@ -80,23 +80,23 @@ def cli(ctx, endpoint: str, api_key: str, config: str | None): # Register all subcommands -cli.add_command(models, "models") -cli.add_command(vector_dbs, "vector_dbs") -cli.add_command(shields, "shields") -cli.add_command(eval_tasks, "eval_tasks") -cli.add_command(providers, "providers") -cli.add_command(datasets, "datasets") -cli.add_command(configure, "configure") -cli.add_command(scoring_functions, "scoring_functions") -cli.add_command(eval, "eval") -cli.add_command(inference, "inference") -cli.add_command(post_training, "post_training") -cli.add_command(inspect, "inspect") -cli.add_command(toolgroups, "toolgroups") +llama_stack_client.add_command(models, "models") +llama_stack_client.add_command(vector_dbs, "vector_dbs") +llama_stack_client.add_command(shields, "shields") +llama_stack_client.add_command(eval_tasks, "eval_tasks") +llama_stack_client.add_command(providers, "providers") +llama_stack_client.add_command(datasets, "datasets") +llama_stack_client.add_command(configure, "configure") +llama_stack_client.add_command(scoring_functions, "scoring_functions") +llama_stack_client.add_command(eval, "eval") +llama_stack_client.add_command(inference, "inference") +llama_stack_client.add_command(post_training, "post_training") +llama_stack_client.add_command(inspect, "inspect") +llama_stack_client.add_command(toolgroups, "toolgroups") def main(): - cli() + llama_stack_client() if __name__ == "__main__":