Skip to content

Commit

Permalink
use ollama openai compat
Browse files Browse the repository at this point in the history
  • Loading branch information
pluja committed Feb 11, 2024
1 parent 5f9ebd1 commit fccef84
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 354 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
## Offline Usage with [Ollama](https://ollama.ai)

> [!IMPORTANT]
> You need at least Ollama v0.1.24 or greater
1. Install Ollama from [here](https://ollama.ai/download) (or use [ollama's docker image](https://hub.docker.com/r/ollama/ollama)).
2. Download models using `ollama pull <model-name>`.
- **Note**: If you haven't changed it, you will need to pull the default model: `ollama pull dolphin-mistral:latest`
Expand Down
88 changes: 85 additions & 3 deletions llm/llm.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
package llm

import (
"context"
"encoding/json"
"fmt"
"log"

openai "github.com/sashabaranov/go-openai"
"pluja.dev/maestro/db"
)

var SystemPrompt = `As an expert Shell command interpreter, your directives are:
- Ensure most optimal and direct solutions.
- Beware of the user's environment.
Expand All @@ -18,10 +28,23 @@ Adhere to this JSON response structure:
"comment": "Concise explanation or context"
}
]
}`
}
\n\nOnly JSON replies allowed. No other formats are accepted. You are an API. Be consistent.
`

type Llm struct {
Oai bool
Ollama Ollama
Openai OpenAI
}

type Llm interface {
Ask(text string, four bool) (Response, error)
type Ollama struct {
Endpoint string
Model string
}

type OpenAI struct {
Gpt4 bool
}

type Response struct {
Expand All @@ -32,3 +55,62 @@ type Command struct {
Command string `json:"command"`
Comment string `json:"comment"`
}

func (llm Llm) Ask(text string) (Response, error) {
client, model, err := llm.setupClientAndModel()
if err != nil {
return Response{}, err
}

ctx := context.Background()
req := llm.prepareCompletionRequest(text, model) // Pass the model here

resp, err := client.CreateChatCompletion(ctx, req)
if err != nil {
log.Printf("Completion error: %v\n", err)
return Response{}, err
}

var response Response
if err := json.Unmarshal([]byte(resp.Choices[0].Message.Content), &response); err != nil {
return Response{}, err
}
return response, nil
}

func (llm Llm) setupClientAndModel() (*openai.Client, string, error) {
if llm.Oai {
token, err := db.Badger.Get("oai-token")
if err != nil {
return nil, "", err
}
if token == "" {
return nil, "", fmt.Errorf("OpenAI API token not set. Please run `maestro -set-token <token>` first")
}
client := openai.NewClient(token)
model := openai.GPT3Dot5Turbo0125
if llm.Openai.Gpt4 {
model = openai.GPT4TurboPreview
}
return client, model, nil
}
openaiConfig := openai.DefaultConfig("ollama")
openaiConfig.BaseURL = fmt.Sprintf("%s/v1", llm.Ollama.Endpoint)
client := openai.NewClientWithConfig(openaiConfig)
return client, llm.Ollama.Model, nil
}

func (llm Llm) prepareCompletionRequest(text string, model string) openai.ChatCompletionRequest { // Accept model as a parameter
systemP := SystemPrompt

return openai.ChatCompletionRequest{
Model: model, // Use the model parameter
Messages: []openai.ChatCompletionMessage{
{Role: openai.ChatMessageRoleSystem, Content: systemP},
{Role: openai.ChatMessageRoleUser, Content: text},
},
ResponseFormat: &openai.ChatCompletionResponseFormat{
Type: openai.ChatCompletionResponseFormatTypeJSONObject,
},
}
}
196 changes: 0 additions & 196 deletions llm/ollama.go

This file was deleted.

69 changes: 0 additions & 69 deletions llm/openai.go

This file was deleted.

Loading

0 comments on commit fccef84

Please sign in to comment.