Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: improve formatting of .env.example for better readability #897

Merged
merged 9 commits into from
Dec 14, 2024
215 changes: 106 additions & 109 deletions .env.example
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

weird github still showing the comments and options removed

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes

Original file line number Diff line number Diff line change
@@ -1,39 +1,38 @@
# Discord Configuration
DISCORD_APPLICATION_ID=
DISCORD_API_TOKEN= # Bot token
DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should join (optional)
DISCORD_API_TOKEN= # Bot token
DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should join (optional)

# AI Model API Keys
OPENAI_API_KEY= # OpenAI API key, starting with sk-
SMALL_OPENAI_MODEL= # Default: gpt-4o-mini
MEDIUM_OPENAI_MODEL= # Default: gpt-4o
LARGE_OPENAI_MODEL= # Default: gpt-4o
EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small
IMAGE_OPENAI_MODEL= # Default: dall-e-3
OPENAI_API_KEY= # OpenAI API key, starting with sk-
SMALL_OPENAI_MODEL= # Default: gpt-4o-mini
MEDIUM_OPENAI_MODEL= # Default: gpt-4o
LARGE_OPENAI_MODEL= # Default: gpt-4o
EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small
IMAGE_OPENAI_MODEL= # Default: dall-e-3

# Eternal AI's Decentralized Inference API
ETERNALAI_URL=
ETERNALAI_MODEL= #Default: "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16"
ETERNALAI_MODEL= # Default: "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16"
ETERNALAI_API_KEY=

GROK_API_KEY= # GROK API Key
GROQ_API_KEY= # Starts with gsk_
GROK_API_KEY= # GROK API Key
GROQ_API_KEY= # Starts with gsk_
OPENROUTER_API_KEY=
GOOGLE_GENERATIVE_AI_API_KEY= # Gemini API key
GOOGLE_GENERATIVE_AI_API_KEY= # Gemini API key

ALI_BAILIAN_API_KEY= # Ali Bailian API Key
VOLENGINE_API_KEY= # VolEngine API Key
NANOGPT_API_KEY= # NanoGPT API Key
ALI_BAILIAN_API_KEY= # Ali Bailian API Key
NANOGPT_API_KEY= # NanoGPT API Key

HYPERBOLIC_API_KEY= # Hyperbolic API Key
HYPERBOLIC_API_KEY= # Hyperbolic API Key
HYPERBOLIC_MODEL=
IMAGE_HYPERBOLIC_MODEL= # Default: FLUX.1-dev
SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct
IMAGE_HYPERBOLIC_MODEL= # Default: FLUX.1-dev
SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct

# Speech Synthesis
ELEVENLABS_XI_API_KEY= # API key from elevenlabs
ELEVENLABS_XI_API_KEY= # API key from elevenlabs

# ElevenLabs Settings
ELEVENLABS_MODEL_ID=eleven_multilingual_v2
Expand All @@ -47,45 +46,47 @@ ELEVENLABS_OUTPUT_FORMAT=pcm_16000

# Twitter/X Configuration
TWITTER_DRY_RUN=false
TWITTER_USERNAME= # Account username
TWITTER_PASSWORD= # Account password
TWITTER_EMAIL= # Account email
TWITTER_USERNAME= # Account username
TWITTER_PASSWORD= # Account password
TWITTER_EMAIL= # Account email
TWITTER_2FA_SECRET=
TWITTER_COOKIES= # Account cookies
TWITTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for interactions
TWITTER_SEARCH_ENABLE=FALSE # Enable timeline search, WARNING this greatly increases your chance of getting banned
TWITTER_TARGET_USERS= # Comma separated list of Twitter user names to interact with

TWITTER_COOKIES= # Account cookies
TWITTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for interactions
TWITTER_SEARCH_ENABLE=FALSE # Enable timeline search, WARNING this greatly increases your chance of getting banned
TWITTER_TARGET_USERS= # Comma separated list of Twitter user names to interact with

X_SERVER_URL=
XAI_API_KEY=
XAI_MODEL=

# Post Interval Settings (in minutes)
POST_INTERVAL_MIN= # Default: 90
POST_INTERVAL_MAX= # Default: 180
POST_INTERVAL_MIN= # Default: 90
POST_INTERVAL_MAX= # Default: 180
POST_IMMEDIATELY=

# Twitter action processing configuration
ACTION_INTERVAL=300000 # Interval in milliseconds between action processing runs (default: 5 minutes)
ENABLE_ACTION_PROCESSING=false # Set to true to enable the action processing loop

# Feature Flags
IMAGE_GEN= # Set to TRUE to enable image generation
USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local
USE_OLLAMA_EMBEDDING= # Set to TRUE for OLLAMA/1024, leave blank for local
IMAGE_GEN= # Set to TRUE to enable image generation
USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local
USE_OLLAMA_EMBEDDING= # Set to TRUE for OLLAMA/1024, leave blank for local

# OpenRouter Models
OPENROUTER_MODEL= # Default: uses hermes 70b/405b
OPENROUTER_MODEL= # Default: uses hermes 70b/405b
SMALL_OPENROUTER_MODEL=
MEDIUM_OPENROUTER_MODEL=
LARGE_OPENROUTER_MODEL=

# REDPILL Configuration
# https://docs.red-pill.ai/get-started/supported-models
REDPILL_API_KEY= # REDPILL API Key
REDPILL_API_KEY= # REDPILL API Key
REDPILL_MODEL=
SMALL_REDPILL_MODEL= # Default: gpt-4o-mini
MEDIUM_REDPILL_MODEL= # Default: gpt-4o
LARGE_REDPILL_MODEL= # Default: gpt-4o
SMALL_REDPILL_MODEL= # Default: gpt-4o-mini
MEDIUM_REDPILL_MODEL= # Default: gpt-4o
LARGE_REDPILL_MODEL= # Default: gpt-4o

# Grok Configuration
SMALL_GROK_MODEL= # Default: grok-2-1212
Expand All @@ -94,63 +95,60 @@ LARGE_GROK_MODEL= # Default: grok-2-1212
EMBEDDING_GROK_MODEL= # Default: grok-2-1212

# Ollama Configuration
OLLAMA_SERVER_URL= # Default: localhost:11434
OLLAMA_SERVER_URL= # Default: localhost:11434
OLLAMA_MODEL=
OLLAMA_EMBEDDING_MODEL= # Default: mxbai-embed-large
SMALL_OLLAMA_MODEL= # Default: llama3.2
MEDIUM_OLLAMA_MODEL= # Default: hermes3
LARGE_OLLAMA_MODEL= # Default: hermes3:70b
OLLAMA_EMBEDDING_MODEL= # Default: mxbai-embed-large
SMALL_OLLAMA_MODEL= # Default: llama3.2
MEDIUM_OLLAMA_MODEL= # Default: hermes3
LARGE_OLLAMA_MODEL= # Default: hermes3:70b

# Google Configuration
GOOGLE_MODEL=
SMALL_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
MEDIUM_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
LARGE_GOOGLE_MODEL= # Default: gemini-1.5-pro-latest
EMBEDDING_GOOGLE_MODEL= # Default: text-embedding-004
SMALL_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
MEDIUM_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
LARGE_GOOGLE_MODEL= # Default: gemini-1.5-pro-latest
EMBEDDING_GOOGLE_MODEL= # Default: text-embedding-004

# Groq Configuration
SMALL_GROQ_MODEL= # Default: llama-3.1-8b-instant
MEDIUM_GROQ_MODEL= # Default: llama-3.3-70b-versatile
LARGE_GROQ_MODEL= # Default: llama-3.2-90b-vision-preview
EMBEDDING_GROQ_MODEL= # Default: llama-3.1-8b-instant

# NanoGPT Configuration
SMALL_NANOGPT_MODEL= # Default: gpt-4o-mini
MEDIUM_NANOGPT_MODEL= # Default: gpt-4o
LARGE_NANOGPT_MODEL= # Default: gpt-4o

#LlamaLocal Configuration
LLAMALOCAL_PATH= # Default: "" which is the current directory in plugin-node/dist/ which gets destroyed and recreated on every build
SMALL_GROQ_MODEL= # Default: llama-3.1-8b-instant
MEDIUM_GROQ_MODEL= # Default: llama-3.3-70b-versatile
LARGE_GROQ_MODEL= # Default: llama-3.2-90b-vision-preview
EMBEDDING_GROQ_MODEL= # Default: llama-3.1-8b-instant

# API Keys
ANTHROPIC_API_KEY= # For Claude
SMALL_ANTHROPIC_MODEL= # Default: claude-3-haiku-20240307
MEDIUM_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022
LARGE_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022
# LlamaLocal Configuration
LLAMALOCAL_PATH= # Default: "" which is the current directory in plugin-node/dist/ which gets destroyed and recreated on every build

HEURIST_API_KEY= # Get from https://heurist.ai/dev-access

# Heurist Models
SMALL_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
MEDIUM_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
LARGE_HEURIST_MODEL= # Default: meta-llama/llama-3.1-405b-instruct
HEURIST_IMAGE_MODEL= # Default: PepeXL
# NanoGPT Configuration
SMALL_NANOGPT_MODEL= # Default: gpt-4o-mini
MEDIUM_NANOGPT_MODEL= # Default: gpt-4o
LARGE_NANOGPT_MODEL= # Default: gpt-4o

# Anthropic Configuration
ANTHROPIC_API_KEY= # For Claude
SMALL_ANTHROPIC_MODEL= # Default: claude-3-haiku-20240307
MEDIUM_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022
LARGE_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022

# Heurist Configuration
HEURIST_API_KEY= # Get from https://heurist.ai/dev-access
SMALL_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
MEDIUM_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct
LARGE_HEURIST_MODEL= # Default: meta-llama/llama-3.1-405b-instruct
HEURIST_IMAGE_MODEL= # Default: PepeXL

# Gaianet Configuration
GAIANET_MODEL=
GAIANET_SERVER_URL=

SMALL_GAIANET_MODEL= # Default: llama3b
SMALL_GAIANET_SERVER_URL= # Default: https://llama3b.gaia.domains/v1

MEDIUM_GAIANET_MODEL= # Default: llama
MEDIUM_GAIANET_SERVER_URL= # Default: https://llama8b.gaia.domains/v1

LARGE_GAIANET_MODEL= # Default: qwen72b
LARGE_GAIANET_SERVER_URL= # Default: https://qwen72b.gaia.domains/v1
SMALL_GAIANET_MODEL= # Default: llama3b
SMALL_GAIANET_SERVER_URL= # Default: https://llama3b.gaia.domains/v1
MEDIUM_GAIANET_MODEL= # Default: llama
MEDIUM_GAIANET_SERVER_URL= # Default: https://llama8b.gaia.domains/v1
LARGE_GAIANET_MODEL= # Default: qwen72b
LARGE_GAIANET_SERVER_URL= # Default: https://qwen72b.gaia.domains/v1

GAIANET_EMBEDDING_MODEL=
USE_GAIANET_EMBEDDING= # Set to TRUE for GAIANET/768, leave blank for local
USE_GAIANET_EMBEDDING= # Set to TRUE for GAIANET/768, leave blank for local

# EVM
EVM_PRIVATE_KEY=
Expand Down Expand Up @@ -191,29 +189,28 @@ STARKNET_RPC_URL=
INTIFACE_WEBSOCKET_URL=ws://localhost:12345

# Farcaster Neynar Configuration
FARCASTER_FID= # the FID associated with the account your are sending casts from
FARCASTER_FID= # The FID associated with the account your are sending casts from
FARCASTER_NEYNAR_API_KEY= # Neynar API key: https://neynar.com/
FARCASTER_NEYNAR_SIGNER_UUID= # signer for the account you are sending casts from. create a signer here: https://dev.neynar.com/app
FARCASTER_NEYNAR_SIGNER_UUID= # Signer for the account you are sending casts from. Create a signer here: https://dev.neynar.com/app
FARCASTER_DRY_RUN=false # Set to true if you want to run the bot without actually publishing casts
FARCASTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for farcaster interactions (replies and mentions)

# Coinbase
COINBASE_COMMERCE_KEY= # from coinbase developer portal
COINBASE_API_KEY= # from coinbase developer portal
COINBASE_PRIVATE_KEY= # from coinbase developer portal
# if not configured it will be generated and written to runtime.character.settings.secrets.COINBASE_GENERATED_WALLET_ID and runtime.character.settings.secrets.COINBASE_GENERATED_WALLET_HEX_SEED
guzus marked this conversation as resolved.
Show resolved Hide resolved
COINBASE_GENERATED_WALLET_ID= # not your address but the wallet id from generating a wallet through the plugin
COINBASE_GENERATED_WALLET_HEX_SEED= # not your address but the wallet hex seed from generating a wallet through the plugin and calling export
guzus marked this conversation as resolved.
Show resolved Hide resolved
# for webhook plugin the uri you want to send the webhook to for dummy ones use https://webhook.site
guzus marked this conversation as resolved.
Show resolved Hide resolved
COINBASE_NOTIFICATION_URI=
COINBASE_COMMERCE_KEY= # From Coinbase developer portal
COINBASE_API_KEY= # From Coinbase developer portal
COINBASE_PRIVATE_KEY= # From Coinbase developer portal
COINBASE_GENERATED_WALLET_ID= # Not your address but the wallet ID from generating a wallet through the plugin
COINBASE_GENERATED_WALLET_HEX_SEED= # Not your address but the wallet hex seed from generating a wallet through the plugin and calling export
COINBASE_NOTIFICATION_URI= # For webhook plugin the uri you want to send the webhook to for dummy ones use https://webhook.site

# Conflux Configuration
CONFLUX_CORE_PRIVATE_KEY=
CONFLUX_CORE_SPACE_RPC_URL=
CONFLUX_ESPACE_PRIVATE_KEY=
CONFLUX_ESPACE_RPC_URL=
CONFLUX_MEME_CONTRACT_ADDRESS=

#ZeroG
# ZeroG
ZEROG_INDEXER_RPC=
ZEROG_EVM_RPC=
ZEROG_PRIVATE_KEY=
Expand All @@ -225,43 +222,43 @@ ZEROG_FLOW_ADDRESS=
# - DOCKER: Uses simulator at host.docker.internal:8090 (for docker development)
# - PRODUCTION: No simulator, uses production endpoints
# Defaults to OFF if not specified
TEE_MODE=OFF #LOCAL|DOCKER|PRODUCTION
WALLET_SECRET_SALT= # ONLY DEFINE IF YOU WANT TO USE TEE Plugin, otherwise it will throw errors
TEE_MODE=OFF # LOCAL | DOCKER | PRODUCTION
WALLET_SECRET_SALT= # ONLY define if you want to use TEE Plugin, otherwise it will throw errors

# Galadriel Configuration
GALADRIEL_API_KEY=gal-* # Get from https://dashboard.galadriel.com/
GALADRIEL_API_KEY=gal-* # Get from https://dashboard.galadriel.com/

# Venice Configuration
VENICE_API_KEY= # generate from venice settings
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
IMAGE_VENICE_MODEL= # Default: fluently-xl
VENICE_API_KEY= # generate from venice settings
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
IMAGE_VENICE_MODEL= # Default: fluently-xl

# fal.ai Configuration
FAL_API_KEY=
FAL_AI_LORA_PATH=

# WhatsApp Cloud API Configuration
WHATSAPP_ACCESS_TOKEN= # Permanent access token from Facebook Developer Console
WHATSAPP_PHONE_NUMBER_ID= # Phone number ID from WhatsApp Business API
WHATSAPP_BUSINESS_ACCOUNT_ID= # Business Account ID from Facebook Business Manager
WHATSAPP_WEBHOOK_VERIFY_TOKEN= # Custom string for webhook verification
WHATSAPP_API_VERSION=v17.0 # WhatsApp API version (default: v17.0)
WHATSAPP_ACCESS_TOKEN= # Permanent access token from Facebook Developer Console
WHATSAPP_PHONE_NUMBER_ID= # Phone number ID from WhatsApp Business API
WHATSAPP_BUSINESS_ACCOUNT_ID= # Business Account ID from Facebook Business Manager
WHATSAPP_WEBHOOK_VERIFY_TOKEN= # Custom string for webhook verification
WHATSAPP_API_VERSION=v17.0 # WhatsApp API version (default: v17.0)

# Flow Blockchain Configuration
FLOW_ADDRESS=
FLOW_PRIVATE_KEY= # Private key for SHA3-256 + P256 ECDSA
FLOW_NETWORK= # Default: mainnet
FLOW_ENDPOINT_URL= # Default: https://mainnet.onflow.org
FLOW_PRIVATE_KEY= # Private key for SHA3-256 + P256 ECDSA
FLOW_NETWORK= # Default: mainnet
FLOW_ENDPOINT_URL= # Default: https://mainnet.onflow.org

# ICP
INTERNET_COMPUTER_PRIVATE_KEY=
INTERNET_COMPUTER_ADDRESS=

# Aptos
APTOS_PRIVATE_KEY= # Aptos private key
APTOS_NETWORK= # must be one of mainnet, testnet
APTOS_PRIVATE_KEY= # Aptos private key
APTOS_NETWORK= # Must be one of mainnet, testnet

# EchoChambers Configuration
ECHOCHAMBERS_API_URL=http://127.0.0.1:3333
Expand Down
Loading