|
1 | | -# LLM Configuration |
2 | | -# For local Ollama (default) |
3 | | -# I have found that llama3.1:8b and granite3.3:8b give the best results, |
4 | | -# with a slight edge to granite. |
5 | | -LLM_ENDPOINT=http://localhost:11434/v1 |
6 | | -MODEL_NAME=granite3.3:8b |
7 | | - |
8 | | -# For OpenAI |
| 1 | +# Sippy AI Agent Configuration |
| 2 | +# Copy this file to .env and configure for your setup |
| 3 | + |
| 4 | +# ============================================================================= |
| 5 | +# Model Configuration - Choose ONE of the following setups: |
| 6 | +# ============================================================================= |
| 7 | + |
| 8 | +# ----------------------------------------------------------------------------- |
| 9 | +# Option 1: Local Ollama |
| 10 | +# ----------------------------------------------------------------------------- |
| 11 | +# LLM_ENDPOINT=http://localhost:11434/v1 |
| 12 | +# MODEL_NAME=llama3.1:8b |
| 13 | + |
| 14 | +# Other popular Ollama models: |
| 15 | +# MODEL_NAME=llama3.2:latest |
| 16 | +# MODEL_NAME=mistral:latest |
| 17 | + |
| 18 | +# ----------------------------------------------------------------------------- |
| 19 | +# Option 2: OpenAI |
| 20 | +# ----------------------------------------------------------------------------- |
9 | 21 | # LLM_ENDPOINT=https://api.openai.com/v1 |
10 | | -# MODEL_NAME=gpt-4 |
11 | | -# OPENAI_API_KEY=your_openai_api_key_here |
| 22 | +# MODEL_NAME=gpt-4o |
| 23 | +# OPENAI_API_KEY=sk-your-openai-api-key-here |
12 | 24 |
|
13 | | -# For Google Gemini |
| 25 | +# Other OpenAI models: |
| 26 | +# MODEL_NAME=gpt-4o-mini |
| 27 | +# MODEL_NAME=gpt-4-turbo |
| 28 | +# MODEL_NAME=gpt-3.5-turbo |
| 29 | + |
| 30 | +# ----------------------------------------------------------------------------- |
| 31 | +# Option 3: Google Gemini via AI Studio API |
| 32 | +# ----------------------------------------------------------------------------- |
| 33 | +# MODEL_NAME=gemini-1.5-pro |
| 34 | +# GOOGLE_API_KEY=your-google-api-key-here |
| 35 | + |
| 36 | +# OR use service account credentials: |
14 | 37 | # MODEL_NAME=gemini-2.5-flash |
15 | | -# GOOGLE_API_KEY=your_google_api_key_here |
16 | | -# or GOOGLE_APPLICATION_CREDENTIALS=path_to_json_credentials |
| 38 | +# GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-key.json |
| 39 | + |
| 40 | +# ----------------------------------------------------------------------------- |
| 41 | +# Option 4: Claude via Google Vertex AI (Recommended for Claude) |
| 42 | +# ----------------------------------------------------------------------------- |
| 43 | +# Using gcloud auth (recommended for local development): |
| 44 | +# MODEL_NAME=claude-sonnet-4-5 |
| 45 | +# GOOGLE_PROJECT_ID=your-gcp-project-id |
| 46 | +# GOOGLE_LOCATION=us-central1 |
17 | 47 |
|
18 | | -# Sippy API Configuration (for future use) |
19 | | -SIPPY_API_URL=https://sippy.dptools.openshift.org |
| 48 | +# OR using service account credentials: |
| 49 | +# MODEL_NAME=claude-sonnet-4-5 |
| 50 | +# GOOGLE_PROJECT_ID=your-gcp-project-id |
| 51 | +# GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-key.json |
| 52 | +# GOOGLE_LOCATION=us-central1 |
20 | 53 |
|
21 | | -# Sippy database connection for accessing data |
22 | | -#SIPPY_READ_ONLY_DATABASE_DSN=postgresql://readonly_user:password@host:5432/sippy |
| 54 | +# ============================================================================= |
| 55 | +# Model Parameters |
| 56 | +# ============================================================================= |
| 57 | +TEMPERATURE=0.0 |
23 | 58 |
|
24 | | -# Jira Configuration (for known incident tracking) |
| 59 | +# Token budget for Claude's extended thinking feature (only used when --thinking is enabled) |
| 60 | +# EXTENDED_THINKING_BUDGET=10000 |
| 61 | + |
| 62 | +# ============================================================================= |
| 63 | +# Sippy Configuration |
| 64 | +# ============================================================================= |
| 65 | +# Sippy API URL (required for most tools to work) |
| 66 | +SIPPY_API_URL=https://sippy.dptools.openshift.org/api |
| 67 | + |
| 68 | +# Optional: Database access for advanced SQL queries (use read-only user!) |
| 69 | +# SIPPY_READ_ONLY_DATABASE_DSN=postgresql://readonly_user:password@host:5432/sippy |
| 70 | + |
| 71 | +# ============================================================================= |
| 72 | +# Jira Configuration (Optional - for incident tracking) |
| 73 | +# ============================================================================= |
25 | 74 | JIRA_URL=https://issues.redhat.com |
26 | 75 |
|
27 | | -# Specify the MCP configuration to use. |
28 | | -MCP_CONFIG_FILE=mcp_config.json |
| 76 | +# ============================================================================= |
| 77 | +# Agent Behavior |
| 78 | +# ============================================================================= |
| 79 | +# Maximum number of tool call iterations before stopping |
| 80 | +MAX_ITERATIONS=15 |
| 81 | + |
| 82 | +# Maximum execution time in seconds (default: 300 = 5 minutes) |
| 83 | +MAX_EXECUTION_TIME=300 |
| 84 | + |
| 85 | +# AI Persona (default, zorp, etc.) |
| 86 | +PERSONA=default |
| 87 | + |
| 88 | +# ============================================================================= |
| 89 | +# MCP (Model Context Protocol) Integration (Optional) |
| 90 | +# ============================================================================= |
| 91 | +# Path to MCP servers configuration file |
| 92 | +# MCP_CONFIG_FILE=mcp_config.json |
0 commit comments