Skip to content

Commit

Permalink
Ollama API support (#87)
Browse files Browse the repository at this point in the history
* init

* Clean-up local llm settings
  • Loading branch information
UdaraJay authored May 18, 2024
1 parent c0ad9c5 commit 2a1d0b3
Show file tree
Hide file tree
Showing 21 changed files with 1,055 additions and 158 deletions.
10 changes: 6 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,16 @@
"electron-debug": "^3.2.0",
"electron-log": "^4.4.8",
"electron-updater": "^5.3.0",
"framer-motion": "^10.12.18",
"framer-motion": "^11.2.4",
"gray-matter": "^4.0.3",
"lunr": "^2.3.9",
"luxon": "^3.3.0",
"openai": "^4.44.0",
"react": "^19.0.0-beta-04b058868c-20240508",
"react-dom": "^19.0.0-beta-04b058868c-20240508",
"react-router-dom": "^6.11.2",
"react": "^19.0.0-beta-26f2496093-20240514",
"react-dom": "^19.0.0-beta-26f2496093-20240514",
"react-markdown": "^9.0.1",
"react-router": "^6.23.1",
"react-router-dom": "^6.23.1",
"react-textarea-autosize": "^8.5.3",
"react-virtuoso": "^4.7.10"
},
Expand Down
4 changes: 2 additions & 2 deletions release/app/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions src/main/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
net,
Menu,
nativeTheme,
session,
} from 'electron';
import MenuBuilder from './menu';
import { resolveHtmlPath } from './util';
Expand Down
8 changes: 4 additions & 4 deletions src/renderer/App.scss
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,8 @@
--base-green: #128212;
--base-red: rgb(255, 116, 85);
--base-red-light: #3d2323;
--active: #a6b776;
--active-hover: #ecffb8;
--active: #b9df4e;
--active-hover: #d0ff50;
--active-text: #251b24;
--border: #5f4263;
--bg: #251b24;
Expand Down Expand Up @@ -240,14 +240,14 @@
--nav-height: 52px;
--primary: #fff7e9;
--secondary: #aea392;
--base: #ff9634;
--base: #ff8818;
--base-text: hsl(34, 76%, 15%);
--base-hover: #df7000;
--base-yellow: #776b0e;
--base-green: #128212;
--base-red: rgb(255, 71, 71);
--base-red-light: #3d2323;
--active: #ffa72b;
--active: #ff8a2b;
--active-hover: rgb(255, 184, 85);
--active-text: #2a2420;
--border: #635342;
Expand Down
83 changes: 73 additions & 10 deletions src/renderer/context/AIContext.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,34 @@ import {
import OpenAI from 'openai';
import { usePilesContext } from './PilesContext';

const OLLAMA_URL = 'http://localhost:11434/v1';
const OPENAI_URL = 'https://api.openai.com/v1';

const defaultPrompt =
'You are an AI within a journaling app. Your job is to help the user reflect on their thoughts in a thoughtful and kind manner. The user can never directly address you or directly respond to you. Try not to repeat what the user said, instead try to seed new ideas, encourage or debate. Keep your responses concise, but meaningful.';

export const AIContext = createContext();

const getBaseUrl = () => {
return localStorage.getItem('baseUrl') ?? OPENAI_URL;
};

const getOllamaStatus = () => {
return JSON.parse(localStorage.getItem('ollamaEnabled')) ?? false;
};

const getModel = () => {
return localStorage.getItem('model') ?? 'gpt-4o';
};

export const AIContextProvider = ({ children }) => {
const { currentPile, updateCurrentPile } = usePilesContext();
const [ai, setAi] = useState(null);
const [prompt, setPrompt] = useState(defaultPrompt);
const [memory, setMemory] = useState([]);
const [ollama, setOllama] = useState(getOllamaStatus() ?? false);
const [model, setModelState] = useState(getModel());
const [baseUrl, setBaseUrlState] = useState(getBaseUrl());

// Sync AI settings from currentPile
useEffect(() => {
Expand All @@ -27,28 +45,45 @@ export const AIContextProvider = ({ children }) => {
setPrompt(currentPile.AIPrompt);
setupAi();
}
}, [currentPile]);
}, [currentPile, ollama, baseUrl]);

const setupAi = async () => {
const setupAi = useCallback(async () => {
const key = await getKey();

if (!key) return;

const openaiInstance = new OpenAI({
baseURL: getBaseUrl(),
apiKey: key,
baseURL: baseUrl,
apiKey: ollama == true ? 'ollama' : key,
dangerouslyAllowBrowser: true,
});

setAi(openaiInstance);
}, [ollama, baseUrl]);

const setBaseUrl = (baseUrl) => {
localStorage.setItem('baseUrl', baseUrl);
setBaseUrlState(baseUrl);
};

const getBaseUrl = () => {
return localStorage.getItem('baseUrl') ?? 'https://api.openai.com/v1';
const setModel = async (model) => {
localStorage.setItem('model', model);
setModelState(model);
};

const setBaseUrl = async (baseUrl) => {
localStorage.setItem('baseUrl', baseUrl);
await setupAi();
const toggleOllama = () => {
setOllama((prev) => {
if (!prev == true) {
localStorage.setItem('ollamaEnabled', true);
setModel('llama3');
setBaseUrl(OLLAMA_URL);
} else {
localStorage.setItem('ollamaEnabled', false);
setModel('gpt-4o');
setBaseUrl(OPENAI_URL);
}
return !prev;
});
};

const getKey = (accountName) => {
Expand All @@ -70,16 +105,44 @@ export const AIContextProvider = ({ children }) => {
});
};

const getResponse = useCallback(
async (stream = false, messages = [], callback = () => {}) => {
try {
const stream = await ai({
model: 'gpt-4-turbo',
maxTokens: 400,
messages: messages,
stream: true,
});

if (stream === true) {
for await (const part of stream) {
const token = part.choices[0].delta.content;
callback(token);
}
} else {
}
} catch (error) {
console.error(error.message);
}
},
[ai]
);

const AIContextValue = {
ai,
getBaseUrl,
baseUrl,
setBaseUrl,
prompt,
setPrompt,
setKey,
getKey,
deleteKey,
updateSettings,
ollama,
toggleOllama,
model,
setModel,
};

return (
Expand Down
15 changes: 15 additions & 0 deletions src/renderer/context/IndexContext.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@ export const IndexContextProvider = ({ children }) => {
const [filters, setFilters] = useState();
const [searchOpen, setSearchOpen] = useState(false);
const [index, setIndex] = useState(new Map());
const [latestThreads, setLatestThreads] = useState([]);

useEffect(() => {
if (currentPile) {
loadIndex(getCurrentPilePath());
loadLatestThreads();
}
}, [currentPile]);

Expand All @@ -42,6 +44,7 @@ export const IndexContextProvider = ({ children }) => {
.invoke('index-add', newEntryPath)
.then((index) => {
setIndex(index);
loadLatestThreads();
});
},
[currentPile]
Expand All @@ -54,6 +57,7 @@ export const IndexContextProvider = ({ children }) => {
const updateIndex = useCallback(async (filePath, data) => {
window.electron.ipc.invoke('index-update', filePath, data).then((index) => {
setIndex(index);
loadLatestThreads();
});
}, []);

Expand All @@ -71,6 +75,16 @@ export const IndexContextProvider = ({ children }) => {
return window.electron.ipc.invoke('index-vector-search', query, topN);
}, []);

const loadLatestThreads = useCallback(async (count = 25) => {
const items = await search('');
const latest = items.slice(0, count);

const entryFilePaths = latest.map((entry) => entry.ref);
const latestThreadsAsText = await getThreadsAsText(entryFilePaths);

setLatestThreads(latestThreadsAsText);
}, []);

const indexContextValue = {
index,
refreshIndex,
Expand All @@ -82,6 +96,7 @@ export const IndexContextProvider = ({ children }) => {
setSearchOpen,
vectorSearch,
getThreadsAsText,
latestThreads,
};

return (
Expand Down
57 changes: 35 additions & 22 deletions src/renderer/hooks/useChat.js
Original file line number Diff line number Diff line change
@@ -1,28 +1,38 @@
import { useState, useEffect, useCallback } from 'react';
import { useState, useEffect, useCallback, useMemo } from 'react';
import { useAIContext } from 'renderer/context/AIContext';
import { useIndexContext } from 'renderer/context/IndexContext';

const useChat = () => {
const { ai, prompt } = useAIContext();
const { vectorSearch, getThreadsAsText } = useIndexContext();
const STARTER = [
{
role: 'system',
content:
'You are a helpful assistant within a digital journaling app called Pile.',
},
{
role: 'system',
content:
'The user has provided a description of your personality:' + prompt,
},
{
role: 'system',
content: `You are about to start a conversation with the user, usually involving reflection or discussion about their thoughts in this journal. For each of their messages, the system will provide a list of relevant journal entries as context to you, be aware of it when you answer and use whatever is relevant and appropriate. You are a wise librarian of my thoughts, providing advice and counsel. You try to keep responses consise and get to the point quickly. You address the user as 'you', you don't need to know their name. You should engage with the user like you're a human. \nThe date and time is: ${new Date().toString()}. The user starts the conversation:`,
},
];
const [messages, setMessages] = useState(STARTER);
const { ai, prompt, model } = useAIContext();
const { vectorSearch, getThreadsAsText, latestThreads } = useIndexContext();
const STARTER = useMemo(() => {
return [
{
role: 'system',
content:
'You are a helpful assistant within a digital journaling app called Pile.',
},
{
role: 'system',
content:
'The user has provided a description of your personality:' + prompt,
},
{
role: 'system',
content: `You are about to start a conversation with the user, usually involving reflection or discussion about their thoughts in this journal. For each of their messages, the system will provide a list of relevant journal entries as context to you, be aware of it when you answer and use whatever is relevant and appropriate. You are a wise librarian of my thoughts, providing advice and counsel. You try to keep responses consise and get to the point quickly. Plain-text responses only. You address the user as 'you', you don't need to know their name. You should engage with the user like you're a human. When you mention time, always do it relative to the current time– \nthe date and time at this moment is: ${new Date().toString()}.`,
},
{
role: 'system',
content: `Here are the 10 latest journal entries from the user: \n\n${latestThreads}`,
},
{
role: 'system',
content: `The user starts the conversation:`,
},
];
}, [prompt]);

const [messages, setMessages] = useState(STARTER);
const resetMessages = () => setMessages(STARTER);

const addMessage = useCallback(
Expand All @@ -32,7 +42,10 @@ const useChat = () => {
content: messsage,
};

const relevantEntries = await vectorSearch(messsage, 50);
const lastSystemMessage = messages[messages.length - 1];
const augmentedMessages = `${lastSystemMessage.content} \n\n${messsage}`;

const relevantEntries = await vectorSearch(augmentedMessages, 50);
const entryFilePaths = relevantEntries.map((entry) => entry.ref);
const threadsAsText = await getThreadsAsText(entryFilePaths);
const system = {
Expand All @@ -49,7 +62,7 @@ const useChat = () => {
const getAIResponse = useCallback(async (messages, callback = () => {}) => {
setMessages(messages);
const stream = await ai.chat.completions.create({
model: 'gpt-4-turbo',
model: model,
max_tokens: 400,
messages: messages,
stream: true,
Expand Down
3 changes: 3 additions & 0 deletions src/renderer/icons/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,6 @@ export * from './img/PersonIcon';
export * from './img/HighlightIcon';
export * from './img/FolderIcon';
export * from './img/RelevantIcon';

// logos
export * from './logos/OllamaIcon';
33 changes: 33 additions & 0 deletions src/renderer/icons/logos/OllamaIcon.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions src/renderer/icons/logos/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Any logos used are trademarks of their respective owners and are used here solely to represent the corresponding APIs or Models in the interface. This usage is for identification purposes only and does not imply endorsement by or from the respective owners.
Loading

0 comments on commit 2a1d0b3

Please sign in to comment.