diff --git a/crates/goose/src/providers/factory.rs b/crates/goose/src/providers/factory.rs index aa364ad3e992..e440d772e8fb 100644 --- a/crates/goose/src/providers/factory.rs +++ b/crates/goose/src/providers/factory.rs @@ -9,7 +9,6 @@ use super::{ databricks::DatabricksProvider, gcpvertexai::GcpVertexAIProvider, gemini_cli::GeminiCliProvider, - githubcopilot::GithubCopilotProvider, google::GoogleProvider, groq::GroqProvider, lead_worker::LeadWorkerProvider, @@ -48,7 +47,7 @@ pub fn providers() -> Vec { DatabricksProvider::metadata(), GcpVertexAIProvider::metadata(), GeminiCliProvider::metadata(), - GithubCopilotProvider::metadata(), + // GithubCopilotProvider::metadata(), GoogleProvider::metadata(), GroqProvider::metadata(), OllamaProvider::metadata(), @@ -135,7 +134,7 @@ fn create_provider(name: &str, model: ModelConfig) -> Result> "sagemaker_tgi" => Ok(Arc::new(SageMakerTgiProvider::from_env(model)?)), "venice" => Ok(Arc::new(VeniceProvider::from_env(model)?)), "snowflake" => Ok(Arc::new(SnowflakeProvider::from_env(model)?)), - "github_copilot" => Ok(Arc::new(GithubCopilotProvider::from_env(model)?)), + // "github_copilot" => Ok(Arc::new(GithubCopilotProvider::from_env(model)?)), "xai" => Ok(Arc::new(XaiProvider::from_env(model)?)), _ => Err(anyhow::anyhow!("Unknown provider: {}", name)), } diff --git a/documentation/docs/getting-started/providers.md b/documentation/docs/getting-started/providers.md index 7a0adf52bb43..a05838657cb6 100644 --- a/documentation/docs/getting-started/providers.md +++ b/documentation/docs/getting-started/providers.md @@ -26,7 +26,6 @@ Goose relies heavily on tool calling capabilities and currently works best with | [Databricks](https://www.databricks.com/) | Unified data analytics and AI platform for building and deploying models. | `DATABRICKS_HOST`, `DATABRICKS_TOKEN` | | [Gemini](https://ai.google.dev/gemini-api/docs) | Advanced LLMs by Google with multimodal capabilities (text, images). | `GOOGLE_API_KEY` | | [GCP Vertex AI](https://cloud.google.com/vertex-ai) | Google Cloud's Vertex AI platform, supporting Gemini and Claude models. **Credentials must be [configured in advance](https://cloud.google.com/vertex-ai/docs/authentication).** | `GCP_PROJECT_ID`, `GCP_LOCATION` and optional `GCP_MAX_RETRIES` (6), `GCP_INITIAL_RETRY_INTERVAL_MS` (5000), `GCP_BACKOFF_MULTIPLIER` (2.0), `GCP_MAX_RETRY_INTERVAL_MS` (320_000). | -| [GitHub Copilot](https://docs.github.com/en/copilot/using-github-copilot/ai-models) | Access to GitHub Copilot's chat models including gpt-4o, o1, o3-mini, and Claude models. Uses device code authentication flow for secure access. | Uses GitHub device code authentication flow (no API key needed) | | [Groq](https://groq.com/) | High-performance inference hardware and tools for LLMs. | `GROQ_API_KEY` | | [Ollama](https://ollama.com/) | Local model runner supporting Qwen, Llama, DeepSeek, and other open-source models. **Because this provider runs locally, you must first [download and run a model](/docs/getting-started/providers#local-llms).** | `OLLAMA_HOST` | | [Ramalama](https://ramalama.ai/) | Local model using native [OCI](https://opencontainers.org/) container runtimes, [CNCF](https://www.cncf.io/) tools, and supporting models as OCI artifacts. Ramalama API an compatible alternative to Ollama and can be used with the Goose Ollama provider. Supports Qwen, Llama, DeepSeek, and other open-source models. **Because this provider runs locally, you must first [download and run a model](/docs/getting-started/providers#local-llms).** | `OLLAMA_HOST` |