From 53274575ea77be81191a1f55c301182ae4e03f80 Mon Sep 17 00:00:00 2001 From: extremeheat Date: Tue, 7 May 2024 21:10:09 -0400 Subject: [PATCH 01/12] Add browser interface + experimental studio --- .gitignore | 1 + package.json | 3 + src/CompletionService.js | 8 +- src/service/browser.js | 153 ++++++++ src/service/server.js | 76 ++++ src/service/studio/index.html | 692 ++++++++++++++++++++++++++++++++++ src/tools.js | 61 +-- src/tools/misc.js | 61 +++ src/tools/tokens.js | 13 + src/util.js | 67 +++- 10 files changed, 1070 insertions(+), 65 deletions(-) create mode 100644 src/service/browser.js create mode 100644 src/service/server.js create mode 100644 src/service/studio/index.html create mode 100644 src/tools/misc.js create mode 100644 src/tools/tokens.js diff --git a/.gitignore b/.gitignore index 5e15658..efe9c36 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ node_modules package-lock.json playground +dist test/*.html src/tools/repos __* diff --git a/package.json b/package.json index 0296f56..5ead975 100644 --- a/package.json +++ b/package.json @@ -11,6 +11,7 @@ "test": "npm run mocha", "pretest": "npm run lint", "mocha": "mocha --bail test/*.test.js", + "buildWeb": "browserify src/service/browser.js -o dist/langxlang.js", "lint": "standard", "fix": "standard --fix" }, @@ -34,6 +35,8 @@ }, "homepage": "https://github.com/extremeheat/LXL#readme", "devDependencies": { + "basic-ipc": "^0.1.1", + "browserify": "^17.0.0", "langxlang": "file:.", "mocha": "^10.0.0", "standard": "^17.0.0" diff --git a/src/CompletionService.js b/src/CompletionService.js index 4dbbbbc..89151a0 100644 --- a/src/CompletionService.js +++ b/src/CompletionService.js @@ -41,7 +41,9 @@ class CompletionService { } if (this.geminiApiKey) { const geminiList = await gemini.listModels(this.geminiApiKey) - Object.assign(geminiModels, Object.fromEntries(geminiList.map((e) => ([e.name, e])))) + Object.assign(geminiModels, Object.fromEntries(geminiList + .filter((e) => e.name.startsWith('models/')) + .map((e) => ([e.name.replace('models/', ''), e])))) } return { openai: openaiModels, google: geminiModels } } @@ -107,6 +109,10 @@ class CompletionService { const msg = structuredClone(entry) if (msg.role === 'model') msg.role = 'assistant' if (msg.role === 'guidance') msg.role = 'assistant' + if (msg.text != null) { + delete msg.text + msg.content = entry.text + } return msg }).filter((msg) => msg.content), { diff --git a/src/service/browser.js b/src/service/browser.js new file mode 100644 index 0000000..1344269 --- /dev/null +++ b/src/service/browser.js @@ -0,0 +1,153 @@ +// Exports for the browser bundle +const { EventEmitter } = require('events') +const { getModelInfo } = require('../util') +const ipc = require('basic-ipc/browser') + +const mdp = require('../tools/mdp') +const stripping = require('../tools/stripping') +const tokenizer = require('../tools/tokens') +const misc = require('../tools/misc') + +function setValue (id, to) { + const el = document.getElementById(id) + if (el) el.value = to +} +function getValue (id, parser) { + const el = document.getElementById(id) + return el ? (parser ? parser(el.value) : el.value) : null +} +function setRangeBounds (id, min, max) { + const el = document.getElementById(id) + if (el) { + el.min = min + el.max = max + } +} + +class Session extends EventEmitter { + /** @type {import('basic-ipc').ClientEx} */ + client + + constructor (options) { + super() + this.serverAddress = options.serverAddress + } + + async connect () { + this.client = ipc.createClient({ ws: { url: this.serverAddress } }) + console.log('Client', this.client) + window.ipcClient = this.client + this.ready = this.client.waitForReady() + await this.ready + const response = await this.client.request('hello', {}) + this.setModelsList(response.models) + this.emit('ready') + } + + updateForModel (model) { + const info = getModelInfo(model) + this.setGenerationOptionToDefaults(info) + if (this.bindings?.generationOptions?.model) { + setValue(this.bindings.generationOptions.model, model) + } + } + + setModelsList (models) { + this.models = models + this.emit('modelsListUpdate', models) + // if (this.bindings.generationOptions.model) { + // const el = document.getElementById(this.bindings.generationOptions.model) + // const currentlySelected = el.value + // el.innerHTML = '' + // for (const model of models) { + // const option = document.createElement('option') + // option.value = JSON.stringify({ service: model.service, model: model.model }) + // option.textContent = model.displayName + // el.appendChild(option) + // } + // // re-select the previously selected model + // if (currentlySelected) { + // setValue(this.bindings.generationOptions.model, currentlySelected) + // } else { + // console.log('No model selected, setting to default', el.selectedIndex) + // el.selectedIndex = 0 + // window.el = el + // } + // } + } + + setGenerationOptionToDefaults (opts) { + if (opts.outputTokens != null) { + setRangeBounds('maxTokens', 1, opts.outputTokens) + setValue('maxTokens', opts.outputTokens) + } + } + + setGenerationOpt (key, value) { + if (key === 'model') { + // nop + } else if (this.bindings.generationOptions[key]) { + setValue(this.bindings.generationOptions[key], value) + setValue('text-' + this.bindings.generationOptions[key], value) + } + } + + _listenRadio (radioId, textId) { + const el = document.getElementById(radioId) + el.addEventListener('change', () => { + // update the accompanying text adjacent to the radio slider + setValue(textId, el.value) + }) + } + + bindForm (options) { + this.bindings = options + if (options.generationOptions) { + const opts = options.generationOptions + if (opts.temperature) this._listenRadio(opts.temperature, 'text-' + opts.temperature) + if (opts.maxTokens) this._listenRadio(opts.maxTokens, 'text-' + opts.maxTokens) + if (opts.topP) this._listenRadio(opts.topP, 'text-' + opts.topP) + if (opts.topK) this._listenRadio(opts.topK, 'text-' + opts.topK) + } + } + + getBoundedGenerationOptions () { + if (!this.bindings?.generationOptions) return {} + const opts = this.bindings.generationOptions + return { + maxTokens: getValue(opts.maxTokens, parseInt), + temperature: getValue(opts.temperature, parseFloat), + topP: getValue(opts.topP, parseFloat), + topK: getValue(opts.topK, parseFloat), + model: getValue(opts.model, JSON.parse) + } + } + + async sendChatCompletionRequest (messages, genOpts, chunkCb) { + await this.ready + const opts = { ...this.getBoundedGenerationOptions(), ...genOpts } + const response = await this.client.request('chatCompletion', { + service: opts.model.service, + model: opts.model.model, + messages, + generationOptions: opts + }, chunkCb, 1000 * 60 * 2) + return response + } +} + +function createSession (options) { + const session = new Session(options) + session.connect() + return session +} + +window.lxl = { + createSession, + tools: { + stripping, + tokenizer, + _segmentPromptByRoles: mdp.segmentByRoles, + ...misc + } +} diff --git a/src/service/server.js b/src/service/server.js new file mode 100644 index 0000000..793ab66 --- /dev/null +++ b/src/service/server.js @@ -0,0 +1,76 @@ +// @ts-check +const ipc = require('basic-ipc') +const { toTitleCase, getModelInfo } = require('../util') + +async function main (port, services) { + /** @type {import('basic-ipc').ServerEx} */ + const server = ipc.createServer({ + ws: { port } + }) + + server.on('listening', () => { + console.log('Listening on port', port) + }) + + const servingModels = [] + for (const serviceName in services) { + const service = services[serviceName] + const models = await service.listModels() + for (const author in models) { + for (const modelName in models[author]) { + try { + var modelInfo = getModelInfo(modelName) // eslint-disable-line no-var + } catch { + console.log('Skip', modelName, 'due to error') + continue + } + servingModels.push({ + service: serviceName, + author, + model: modelName, + displayName: [serviceName, toTitleCase(author), modelName].filter(e => !!e).join(': '), + details: modelInfo + ? { + maxOutputTokens: modelInfo.outputTokens || modelInfo.contextWindow, + maxInputTokens: modelInfo.inputTokens + } + : null + }) + } + } + // console.log('Service', serviceName, 'has models', models) + } + + server.on('join', function (client) { + client.receive('hello', (/** @type {Record} */ message, /** @type {import('basic-ipc').MessageCreator} */ resp) => { + resp.sendResponse({ + models: servingModels + }) + }) + + client.receive('chatCompletion', (req, resp) => { + const { service, model, messages } = req + const completionService = services[service || ''] + if (!completionService) { + resp.sendResponse({ error: `No service for ${service}` }) + return + } + completionService.requestChatCompletion(model, { messages }, (chunk) => { + resp.sendChunk(chunk) + }) + .then((result) => { + resp.sendResponse({ result }) + }) + .catch((err) => { + console.error('Error in chatCompletion', err) + resp.sendResponse({ error: err.message }) + }) + }) + }) +} + +const { CompletionService } = require('../CompletionService') + +const services = {} +services[''] = new CompletionService() +main(8091, services) diff --git a/src/service/studio/index.html b/src/service/studio/index.html new file mode 100644 index 0000000..b5d8292 --- /dev/null +++ b/src/service/studio/index.html @@ -0,0 +1,692 @@ + + + + + + + LXL Studio v1 + + + + + + + + + +

Please wait while the connection to the LXL server is established...

+
+ + + + + + + + + \ No newline at end of file diff --git a/src/tools.js b/src/tools.js index 71cf6a1..d94f7fb 100644 --- a/src/tools.js +++ b/src/tools.js @@ -5,70 +5,17 @@ const yaml = require('./tools/yaml') const stripping = require('./tools/stripping') const mdp = require('./tools/mdp') const md = require('./tools/md') - -function createTypeWriterEffectStream (to = process.stdout) { - // Instead of writing everything at once, we want a typewriter effect - // so we'll write one character at a time - let remainingToWrite = '' - const interval = setInterval(() => { - if (remainingToWrite.length > 0) { - process.stdout.write(remainingToWrite.slice(0, 2)) - remainingToWrite = remainingToWrite.slice(2) - } - }, 10) - - return function (chunk) { - if (chunk.done) { - // Immediately flush whatever is left - to.write(remainingToWrite) - to.write('\n') - clearInterval(interval) - } - remainingToWrite += chunk.content || chunk.delta - } -} - -function extractCodeblockFromMarkdown (md) { - const tokens = stripping.tokenizeMarkdown(stripping.normalizeLineEndings(md), {}) - return tokens.reduce((acc, token) => { - if (token[1] === 'code') { - acc.push({ - raw: token[0], - lang: token[2], - code: token[3] - }) - } - return acc - }, []) -} - -function extractJSFunctionCall (text, enclosing = '', closing) { - if (text.includes(enclosing)) { - let slice - const start = text.indexOf(enclosing) - if (closing) { - const end = text.indexOf(closing) - slice = text.slice(start, end + closing.length) - } else { - slice = text.slice(start) - } - const fnName = slice.slice(enclosing.length, slice.indexOf('(')) - const args = slice.slice(slice.indexOf('(') + 1, slice.lastIndexOf(')')) - const argsEncapsulated = '[' + args + ']' - const argsArray = JSON.parse(argsEncapsulated) - return { name: fnName, args: argsArray } - } -} +const tokenizer = require('./tools/tokens') +const misc = require('./tools/misc') module.exports = { makeVizForPrompt: viz.makeVizForPrompt, stripping, + tokenizer, collectFolderFiles: codebase.collectFolderFiles, collectGithubRepoFiles: codebase.collectGithubRepoFiles, concatFilesToMarkdown: codebase.concatFilesToMarkdown, - createTypeWriterEffectStream, - extractCodeblockFromMarkdown, - extractJSFunctionCall, + ...misc, wrapContent: mdp.wrapContentWithSufficientTokens, preMarkdown: mdp.preMarkdown, loadPrompt: mdp.loadPrompt, diff --git a/src/tools/misc.js b/src/tools/misc.js new file mode 100644 index 0000000..5210c3c --- /dev/null +++ b/src/tools/misc.js @@ -0,0 +1,61 @@ +const stripping = require('./stripping') + +function createTypeWriterEffectStream (to = process.stdout) { + // Instead of writing everything at once, we want a typewriter effect + // so we'll write one character at a time + let remainingToWrite = '' + const interval = setInterval(() => { + if (remainingToWrite.length > 0) { + to.write(remainingToWrite.slice(0, 2)) + remainingToWrite = remainingToWrite.slice(2) + } + }, 10) + + return function (chunk) { + if (chunk.done) { + // Immediately flush whatever is left + to.write(remainingToWrite) + to.write('\n') + clearInterval(interval) + } + remainingToWrite += chunk.content || chunk.delta + } +} + +function extractCodeblockFromMarkdown (md) { + const tokens = stripping.tokenizeMarkdown(stripping.normalizeLineEndings(md), {}) + return tokens.reduce((acc, token) => { + if (token[1] === 'code') { + acc.push({ + raw: token[0], + lang: token[2], + code: token[3] + }) + } + return acc + }, []) +} + +function extractJSFunctionCall (text, enclosing = '', closing) { + if (text.includes(enclosing)) { + let slice + const start = text.indexOf(enclosing) + if (closing) { + const end = text.indexOf(closing) + slice = text.slice(start, end + closing.length) + } else { + slice = text.slice(start) + } + const fnName = slice.slice(enclosing.length, slice.indexOf('(')) + const args = slice.slice(slice.indexOf('(') + 1, slice.lastIndexOf(')')) + const argsEncapsulated = '[' + args + ']' + const argsArray = JSON.parse(argsEncapsulated) + return { name: fnName, args: argsArray } + } +} + +module.exports = { + createTypeWriterEffectStream, + extractCodeblockFromMarkdown, + extractJSFunctionCall +} diff --git a/src/tools/tokens.js b/src/tools/tokens.js new file mode 100644 index 0000000..c8fb935 --- /dev/null +++ b/src/tools/tokens.js @@ -0,0 +1,13 @@ +const gpt4 = require('gpt-tokenizer/cjs/model/gpt-4') + +function tokenize (tokenizer, data) { + if (tokenizer === 'gpt-4') { + const encoded = gpt4.encode(data) + return { + length: encoded.length + } + } + throw new Error('Unknown tokenizer') +} + +module.exports = { tokenize } diff --git a/src/util.js b/src/util.js index fc109cb..b2e4faa 100644 --- a/src/util.js +++ b/src/util.js @@ -5,15 +5,68 @@ function cleanMessage (msg) { return msg.replace(/\r\n/g, '\n') } +function toTitleCase (str) { + return str.charAt(0).toUpperCase() + str.slice(1) +} + const knownModelInfo = { - 'gpt-3.5-turbo-16k': { author: 'openai', family: 'openai', displayName: 'GPT-3.5 Turbo 16k', safeId: 'gpt3_5turbo16k' }, - 'gpt-3.5-turbo': { author: 'openai', family: 'openai', displayName: 'GPT-3.5 Turbo', safeId: 'gpt3_5turbo' }, - 'gpt-4': { author: 'openai', family: 'openai', displayName: 'GPT-4', safeId: 'gpt4' }, - 'gpt-4-turbo-preview': { author: 'openai', family: 'openai', displayName: 'GPT-4 Turbo Preview', safeId: 'gpt4turbo' }, - 'gemini-1.0-pro': { author: 'google', family: 'gemini', displayName: 'Gemini 1.0 Pro', safeId: 'gemini1_0pro' }, + // OpenAI + 'gpt-3.5-turbo-16k': { + author: 'openai', + family: 'openai', + displayName: 'GPT-3.5 Turbo 16k', + safeId: 'gpt3_5turbo16k', + contextWindow: 16_000 + }, + 'gpt-3.5-turbo': { + author: 'openai', + family: 'openai', + displayName: 'GPT-3.5 Turbo', + safeId: 'gpt3_5turbo', + contextWindow: 16_000 + }, + 'gpt-4': { + author: 'openai', + family: 'openai', + displayName: 'GPT-4', + safeId: 'gpt4', + outputTokens: 4096 + }, + 'gpt-4-32k': { + author: 'openai', + family: 'openai', + displayName: 'GPT-4 32k', + safeId: 'gpt4_32k', + outputTokens: 32_000 + }, + 'gpt-4-turbo-preview': { + author: 'openai', + family: 'openai', + displayName: 'GPT-4 Turbo Preview', + safeId: 'gpt4turbo', + outputTokens: 4096 + }, + // Google / Gemini + 'gemini-1.0-pro': { + author: 'google', + family: 'gemini', + displayName: 'Gemini 1.0 Pro', + safeId: 'gemini1_0pro', + inputTokens: 30720, + outputTokens: 2048 + }, // Gemini 1.5 Pro has 2 requests per minute - 'gemini-1.5-pro': { author: 'google', family: 'gemini', displayName: 'Gemini 1.5 Pro', safeId: 'gemini1_5pro', rateLimit: 1000 * 30 } + 'gemini-1.5-pro': { + author: 'google', + family: 'gemini', + displayName: 'Gemini 1.5 Pro', + safeId: 'gemini1_5pro', + rateLimit: 1000 * 30, + inputTokens: 1_048_576, + outputTokens: 8192 + } } + knownModelInfo['gemini-1.5-pro-latest'] = knownModelInfo['gemini-1.5-pro'] const knownModels = Object.keys(knownModelInfo) @@ -68,4 +121,4 @@ function checkGuidance (messages, chunkCb) { return '' } -module.exports = { sleep, cleanMessage, getModelInfo, getRateLimit, checkDoesGoogleModelSupportInstructions, checkGuidance, knownModelInfo, knownModels } +module.exports = { sleep, cleanMessage, toTitleCase, getModelInfo, getRateLimit, checkDoesGoogleModelSupportInstructions, checkGuidance, knownModelInfo, knownModels } From 6a0a4b1683362958ef2fa4062f132bbed25d543f Mon Sep 17 00:00:00 2001 From: extremeheat Date: Fri, 10 May 2024 03:37:42 -0400 Subject: [PATCH 02/12] Support multimodal image input in CompleteService --- src/CompletionService.js | 89 ++++++++++++++++++++++++++++++++++++++-- src/backends/gemini.js | 2 + src/backends/openai.js | 9 +++- src/index.d.ts | 10 ++++- src/util.js | 5 +++ test/api.js | 25 +++++++++++ 6 files changed, 132 insertions(+), 8 deletions(-) diff --git a/src/CompletionService.js b/src/CompletionService.js index 89151a0..eb18a9c 100644 --- a/src/CompletionService.js +++ b/src/CompletionService.js @@ -113,6 +113,27 @@ class CompletionService { delete msg.text msg.content = entry.text } + if (typeof msg.content === 'object') { + const updated = [] + for (const key in msg.content) { + const value = msg.content[key] + if (value.text) { + updated.push({ type: 'text', text: value.text }) + } else if (value.imageURL) { + updated.push({ type: 'image_url', image_url: { url: value.imageURL, detail: value.imageDetail } }) + } else if (value.imageB64) { + let dataURL = value.imageB64 + if (!dataURL.startsWith('data:')) { + if (!value.mimeType) throw new Error('Missing accompanying `mimeType` for imageB64 that is not a data URL') + dataURL = `data:${value.mimeType};base64,${dataURL}` + } + updated.push({ type: 'image_url', image_url: { url: dataURL, detail: value.imageDetail } }) + } else if (value.image_url) { + updated.push({ type: 'image_url', image_url: value.image_url }) + } + } + msg.content = updated + } return msg }).filter((msg) => msg.content), { @@ -136,26 +157,80 @@ class CompletionService { tool_calls: 'function' }[choice.finishReason] ?? 'unknown' const content = guidance ? guidance + choice.content : choice.content - return { type: choiceType, isTruncated: choice.finishReason === 'length', ...choice, content, text: content } + return { + type: choiceType, + isTruncated: choice.finishReason === 'length', + // ...choice, + content, + text: content + } }) } async _requestChatCompleteGemini (model, messages, { maxTokens, stopSequences, temperature, topP, topK }, functions, chunkCb) { + // Google Gemini doesn't support data URLs, or even remote ones, so we need to fetch them, extract data URLs then split + async function resolveImage (url) { + // fetch the URL contents to a data URL (node.js) + const req = await fetch(url) + const buffer = await req.arrayBuffer() + const dataURL = `data:${req.headers.get('content-type')};base64,${Buffer.from(buffer).toString('base64')}` + return dataURL + } + + function splitDataURL (entry) { + // gemini doesn't support data URLs + const mimeType = entry.slice(5, entry.indexOf(';')) + const data = entry.slice(entry.indexOf(',') + 1) + return { inlineData: { mimeType, data } } + } + if (!this.geminiApiKey) throw new Error('Gemini API key not set') // April 2024 - Only Gemini 1.5 supports instructions const supportsSystemInstruction = checkDoesGoogleModelSupportInstructions(model) const guidance = checkGuidance(messages, chunkCb) + const imagesForResolve = [] const geminiMessages = messages.map((msg) => { const m = structuredClone(msg) if (msg.role === 'assistant') m.role = 'model' if (msg.role === 'system') m.role = supportsSystemInstruction ? 'system' : 'user' if (msg.role === 'guidance') m.role = 'model' - if (msg.content != null) { + if (typeof msg.content === 'object') { + const updated = [] + for (const entry of msg.content) { + if (entry.text) { + updated.push({ text: entry.text }) + } else if (entry.imageURL) { + const val = { imageURL: entry.imageURL } + imagesForResolve.push(val) + updated.push(val) + } else if (entry.imageB64) { + if (entry.imageB64.startsWith('data:')) { + updated.push(splitDataURL(entry.imageB64)) + } else if (entry.mimeType) { + updated.push({ + inlineData: { + mimeType: entry.mimeType, + data: entry.imageB64 + } + }) + } + } + } + delete m.content + m.parts = updated + } else if (msg.content != null) { delete m.content m.parts = [{ text: msg.content }] } return m }).filter((msg) => msg.parts && (msg.parts.length > 0)) + + for (const entry of imagesForResolve) { + const dataURL = await resolveImage(entry.imageURL) + Object.assign(entry, splitDataURL(dataURL)) + delete entry.imageURL + } + const response = await gemini.generateChatCompletionEx(model, geminiMessages, { apiKey: this.geminiApiKey, functions, @@ -171,7 +246,13 @@ class CompletionService { const answer = response.text() chunkCb?.({ done: true, delta: '' }) const content = guidance ? guidance + answer : answer - const result = { type: 'text', content, text: content } + const result = { + type: 'text', + isTruncated: response.finishReason === 'MAX_TOKENS', + content, + safetyRatings: response.safetyRatings, + text: content + } return [result] } else if (response.functionCalls()) { const calls = response.functionCalls() @@ -184,7 +265,7 @@ class CompletionService { args: call.args } } - const result = { type: 'function', fnCalls } + const result = { type: 'function', fnCalls, safetyRatings: response.safetyRatings } return [result] } else { throw new Error('Unknown response from Gemini') diff --git a/src/backends/gemini.js b/src/backends/gemini.js index 43da004..be8caf4 100644 --- a/src/backends/gemini.js +++ b/src/backends/gemini.js @@ -89,6 +89,7 @@ async function generateChatCompletionIn (model, messages, options, chunkCb) { // Function response resultCandidates.push({ type: 'function', + finishReason: candidate.finishReason, fnCalls: candidate.content.functionCalls, raw: data, safetyRatings: candidate.safetyRatings @@ -97,6 +98,7 @@ async function generateChatCompletionIn (model, messages, options, chunkCb) { // Text response resultCandidates.push({ type: 'text', + finishReason: candidate.finishReason, text: () => candidate.content.parts.reduce((acc, part) => acc + part.text, ''), raw: data, safetyRatings: candidate.safetyRatings diff --git a/src/backends/openai.js b/src/backends/openai.js index 135fcfd..53e06d8 100644 --- a/src/backends/openai.js +++ b/src/backends/openai.js @@ -26,7 +26,12 @@ function createChunkProcessor (chunkCb, resultChoices) { } for (const choiceId in chunk.choices) { const choice = chunk.choices[choiceId] - const resultChoice = resultChoices[choiceId] ??= { content: '', fnCalls: [], finishReason: '', safetyRatings: {} } + const resultChoice = resultChoices[choiceId] ??= { + content: '', + fnCalls: [], + finishReason: '', + safetyRatings: {} + } if (choice.finish_reason) { resultChoice.finishReason = choice.finish_reason } @@ -93,7 +98,7 @@ function _sendApiRequest (apiKey, payload, chunkCb) { Authorization: 'Bearer ' + apiKey } } - debug('[OpenAI] /completions Payload', JSON.stringify(payload)) + console.debug('[OpenAI] /completions Payload', JSON.stringify(payload)) return new Promise((resolve, reject) => { const req = https.request(options, (res) => { if (res.statusCode !== 200) { diff --git a/src/index.d.ts b/src/index.d.ts index bff8280..f46a1e7 100644 --- a/src/index.d.ts +++ b/src/index.d.ts @@ -3,7 +3,13 @@ type CompletionResponse = { content: string, text: string } declare module 'langxlang' { type Model = 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo' | 'gpt-4' | 'gpt-4-turbo-preview' | 'gemini-1.0-pro' | 'gemini-1.5-pro-latest' type Role = 'system' | 'user' | 'assistant' | 'guidance' - type Message = { role: Role, content: string } + type MessagePart = + | { text: string } + | { imageURL: string, imageDetail? } + | { imageB64: string, mimeType?: string, imageDetail? } + type Message = + | { role: Role, content: string } + | { role: Role, content: MessagePart } type ChunkCb = ({ content: string }) => void type CompletionOptions = { @@ -34,7 +40,7 @@ declare module 'langxlang' { listModels(): Promise<{ openai: Record, google: Record }> // Request a completion from the model with a system prompt and a single user prompt. - requestCompletion(model: Model, systemPrompt: string, userPrompt: string, _chunkCb?: ChunkCb, options?: CompletionOptions & { + requestCompletion(model: Model, systemPrompt: string, userPrompt: string | MessagePart[], _chunkCb?: ChunkCb, options?: CompletionOptions & { // If true, the response will be cached and returned from the cache if the same request is made again. enableCaching?: boolean }): Promise diff --git a/src/util.js b/src/util.js index b2e4faa..375e6e9 100644 --- a/src/util.js +++ b/src/util.js @@ -1,4 +1,9 @@ function cleanMessage (msg) { + if (Array.isArray(msg)) { + return msg.map(m => { + if (m.text) { m.text = cleanMessage(m.text); return m } else return m + }) + } if (!msg) return msg if (msg.constructor.name === 'PromptString') return msg // fix systemMessage \r\n to \n diff --git a/test/api.js b/test/api.js index be4bdad..4da343a 100644 --- a/test/api.js +++ b/test/api.js @@ -170,6 +170,27 @@ async function testOptions () { console.log('Gemini 1.0 Pro with maxTokens=100, temp=2', resultGemini) } +const appleIcon64 = 'data:image/webp;base64,UklGRuwNAABXRUJQVlA4TOANAAAv/8A/EOJQ27aNJO0/9r3CdVdFxATwoNnrZrFiAwqOwbrz0t+r1WXb8v+btuU8Sq9/V+7S47tO9BZ5hFzmQXKXt+A1z3/3G+bsNfdaY4z/uOqqf9s2rnZsW2vHatvu2LYOYszYTk5cZYy2Vsdo27aZVZJi23ZtRSz1FrlH3fVTDxNZBR7QkN5YLyih2EiSI0lWpUPFESlOh78p6+f8xxcvAZIk07Zicfhs27Zt27Zt237v27Zt27Zt/2fcMwFwa9tWrbTy933jfvePQwSRu5O5u7unWoC7Q4//nftgnAKwyHp6NVAEbVADIbXcFHphkNGBA0mSTOu2875t27Zt27Zt3MVhAABGLGiHTtm2+TZzbCLJlqLiq7hU/PomLpCCF0wQkZNhBQFUT4AvAalkApJZSObUyVFf5DRUcM+RbJNkY5Jxw6FkLyUr7uQQpSaZDwB8dnzJTkomXGaSeZ9mm0t2oMgkx+G0IVWy0AKTHAPJfgx7luxweXUyWcleDXuX7KvkUKUlOYxkl4YNT5NMsLDkzQGSLR82leyLZFRhdbKMYWPJ9ntZdzIDyUabSeZfVp3TWMnuDBtLtsPLWrK+YWPJzkiOQFlJjq1k040kuyo5Ql7U8nqUZA+GTSV7LJmkl7VkNcOmkn2TTMvLupOpSPavyWclx80LW7LNw6aSVXphS461ZHNNHtQ5HVRYkgMlOzVsKNnnTo6UF7bkeAwbSsZb4IM5u0HtG2sjtVHaD/o9BxygDU80aL5FMiDZhSaSbfF+60NC2ky0xWpr1LZK2zFtd7S90/Zb26Q2njau1TarbVzbD21vtd3VdkrbRm092vK0eU80DX14k2aTHNePNpDsp+TI9WN2w9q0tWVoW67twaXtFGub0/ZH21VtS7SlaNPSI8Jikh0eNvylT7k2fm0h2lZre62N1/Zd23ttmy7RI2lrSY7BRxtI9mjRHGKKHqktQtueS9tB3VnbBW3F2hRNJdmyYUPJQn0qtYH3aVv0yXawX6ntiLZIPaLM1MnE/xQku9Q5DU3BxBGxhraLbYzaPmpr0iZtJMnybcWzvPHsxmjL1Pa8jVXbf23z3m4iyW4rSHZj0dNgE22EtrSr2pi1jWob0L5F7SM5xh9VkCzKe1/84QNoC9T2qI1d23dt2doI60jWN6RL9qaTQ/WmTVfbYW28NoG/02Zkm04OcXYDyanzXrXxaWt5ZZvGV2qr14c3toxkNsPeJZvoZPK9aLPV9rBNp7aT2uQMI1lPA8n2+uTaN6WtS9t0m9LztFmY5VbvDvhTA8nCJtOmqe16m1htt19kFcnRkGy2N8n+dDJmEm3Rh7ZpPVbben1IxK0qObnD3iVb73OcaOSrjm3Tqu3bP92ukrOzwYI5aBP/XpvWY7Xtmmhybld5ffBBvUn2XzKRB9Cmoe1Jm1ZtXw+Z/fAfcMNKjv6wd8mOu7trs92mTaq2OW1rtEm4aSUnq4HkVLi7Nh9t/9qkjrR5uHElW9Wgk5m5r/HKNqU3aqvTh2i3rmT3e5Pst2Skr/HKNqEbaNumTcXNK5mwZKO9SXbMtYVqm2oTqu2KNic3sGTmw94la3untok2ndpeaktY/OEDuoUlS2pwp4/fOm9sk6nti7Z87Zt1G0vW25sA4G6ffp9sU/kVbVV6xO+5BwJo4McVZITZI8YeURBiBBg2QALoj2S7e/MhxP365hJxnrbyiSbk+QaK0QI/KIIB2Abn4BF8hF8wATzgaq0wDf/gCzyDS7Ad+iATHEB0qg7vzQnoPVFbv7ZLP4vvc9/SfrCeaXYZMIIcWAE3YGxcpxpm4DGshDiQs0BefQWS/esNAE53n+ib1WatrUDbBm1PHxTPmdoOafPV4z/kWZ51fQBoQiHshe+1/zAFV6EOdJ7cAAACAPw0ADh8zvfv/2CiSWtz1Vahbbu2Z7Mf4kWg7YO2rl09y1weGDCD7nfUwYX70AQG3f0IegFQxaYDwD6Ju4MalMHlO/x5eHf8/TN351+/cRPh3pppsi9aXLE2N0fbD23r3nn/hj3LjAxUPRfm6mDPwJP1GVeGbxIAaAoAqs4JaLCGFrg5qnOOfG8yrg9F05fG6tdYpLdkHW3JFmfc1eM1LXOvz/9xd/n55+hdv/xVfO+XN5/nGexgM4zXKOEvnIEOCN48gP0WALqCA0D0xS4PxFUCH2iAQ9fXKUx83WXtn0rHb/W1r/7k34Jn8LOhmJ3wVlZcsjfritmcb/C5ZPeMA24EB3ed9e/C/IRBu8KFcY14BkbhUfB/u5w+51Hwnw433Jueqf0N/W9ekx93mYeH/PlLVf+trv7RWvrqyfwMx/8mQ71Zf2/eO1j0DL6mA7339sJPbecje/X09K9NPwIxDJkZcIKL4xq9K+KhAYsEXmc1ergPIcEFGQEN2F1T6PQ6aIJEBD4X8dX6G7DIRXBpqIPRmkSnw0QTNEbwd5WCCtMwElyhLIAx3KqJdFoMNMFiaSbhFa5b+rogKFmvplIaNTSDy+Giwiz0MlTiAoKwo6ZT6jU0g8djo1Y4z8gmbT48qAmVBg3NEAoYqfAhjHXCAjbwtabU6bLQpEbBSYX/EJwsxmu6JtXTyNHcyRArdSVISRT4rVfTGvhcCDqDAjwSuB7xUudCZpIYVxirqfV2S0BBZkAAdyGmuZ0LCQkC831rej3VzHc8oKEPzS9MgX9yAgr/qCn2b3ecVoMgYkABv8BjUq/xzVua433BJDG4LFytiQ58r/zHA//h4E0zvRtIpgWWVYNvES4XpwSigWexunVCAvLwq5p8U8Y0GWEv8OBqdLgJVCogcmy1ChWJAH54W80+fVkaoLEaHlYlAVca/lhuBIYp2KmaHrYnIIzEQtuNQC8+qK/G/2F0i+1F/sN6O4JEbBBezQ/lscFh+90SBsXFKMCk/cZgFRdTWAsQBuP6TQl8qXv5CcQURnJUAnWlmCCuFuGLY4K1ZQBnIwqDdyuD08MIxgMq4zKoN8fzplqIUB0P9JcC7IwHTpYCvIymu+/w/aXwOUYsFpCB6VKop8QCtrUYITYWiC+HZWJ5cTn8MBZYVA5fiAV2lwPcjgUulgO8C8A4nvyOcpgOLhEHLoL35QBzXNBxnPaYcqggFgcuuV5BgHwcP5hbEIx6HGGoWpCgFQfDlAToxgF0SRwQxw/GBTE/krkFAepxALlpQYByHGGI0wsijGwcuPgxBfHBOBh0cEF0hz+OMBBel8PovDgcHpbDdICIZJ1yODiAIoFT5fA99sBIfl4OL/dIYU05XBsLDJfDg2M5pBxgZSyQWw47xXJdOWwdCziXAyTHckA5gE8sILVpMfwolrBLTJfC6EGxOLwohW/i0tE8rRTgjUcLG0rhUfFAeynAxnggvRR+Gg+4lwJkxAOqpQCu8TB804UAivFw/g7gSRnAb2DjcdhfBnDfI4a+MnhbTExaGUBbTC8rg4tiYld4bhG8ISb/UglsGqCjggMlcItHfUkJfCqu95UAlMf113EBgFtc7JIH2+9zb43L4bj9XtO9PCAy6LQfbPLIIch+UBQbu1Jj84FlbADhufV2DHuY2BzWWe83Hv2K1tsvPkZvxni7xtcdBB9td3TYFY3PYYvtTvUEQrbtzkjB/LHpwCMFgM6x3DQjmAKHZZaDY55ECLPcJmkICG9qtzHopcGfZjd4c9vbDyARJ9rtKZ7IgO6M2cArFeyB8Nhqjwnwp8J3shps82T+x2oQnQ72oNVtdjQjnA6fZ7MHe0LB3mYQn5Lg4hMsBv9BJCW+vsX+6EkFU4sxoWn5/EfsdT3wpcW3thcs8cSGURiZC2xS41tY64ncH5AciLAWVHpyT4JPtoLxgFx6fH1bvc0TPH/GVL9Ikf/NUvBk1uVBSQoEWwqKPcnBQ5xjp4WHpslPtBMs9EQHV2yhlUagnSqHeVaCfZ7sMGorGYlxSpcfZqO9PeGMOcya6IUp810sBHcDOGnLW4iJ87QvsM8VYQ9OHDjOmCdMvCe+e34BOGodeBCGTJ2vPWOcp3v6Yadt4Hrw4AwwuitZZny25xCWWuZvnsXLFtplJTDLg7/YLj/0TAbY3ayykJHJhb9vbJRNPJu39iNY1Sa3dKHz4e852iIzl3tOodUisM6zyjBPtMf1Aam8+OVzzQHJnltYZI1dcGF2QGhjWywMo+T5ZdxGlhhDiucYhiwBu2adwSyd9HI7/CMg6XkG/W9aYcR4eK5XtMJ+nm12Aay1wYFdyHx52OWHuxb4OK6M5/z58CN/RwccPO+7rpS7MWR67s/N3VEHZc9hft4efJrn/+GX5WxvdgXcgmHY3+TrCpB0GwZXZJ1cbRxQdiv+4SN5+sfz3Y5hpOBOjuBdQMstyUjCtfycw2i6LT95YG5ueZBbExjYkJclP+j2DC76/Tgje+7hJn3fxbk4GgrdqmF0PjNYcy9+7pKXHXbY4/+2wu9Gg/W9l7ldg8usPxoYeHTUC0Gpe34hn3TWHhhGbtf9tp8ZlPG2y7ppX7bCQJyw33sY6I1xQRgVqHrHQMArCHTr7vEr+NWvuXDw7wHKp/jX4HLYpv1aD9o+6QZe9rG/6wc8/cDO3tcbH7bOTB++CQte6UZeFgo+MmPJ59Z5Sxhzzhd738Me8B6o+M2OUzL+3jIBGbd0wOQDvzm9lzF8+8xhUHs2iPvA4grcDOVrnQqf5/ZyNFyDruW7XtDNHYZmtMH7IkiFpEAchJ79H0YdVzAM4REymOFnVBiLV7/wIkj63z6MH2OAy+dLYAk=' +const bingImage = 'https://www.bing.com/th?id=OHR.CratersOfTheMoon_EN-US6516727783_1920x1080.jpg&w=1000' + +async function testRemoteImage (model = 'gpt-4-turbo') { + console.log('Image complete with model', model) + const [result] = await completionService.requestCompletion(model, '', [ + { text: "What's in this picture?" }, + { imageURL: bingImage } + ], toTerminal) + console.log('Image result', result) +} + +async function testImage (model = 'gemini-1.0-pro') { + console.log('Image complete with model', model) + const [result] = await completionService.requestCompletion(model, '', [ + { text: "What's in this image?" }, + { imageB64: appleIcon64 } + ], toTerminal) + console.log('Image result', result) +} + async function testBasic () { completionService.startLogging() await testListing() @@ -184,6 +205,10 @@ async function testBasic () { await testGeminiSessionWithFuncs('gemini-1.5-pro-latest') await testOpenAICaching() await testOptions() + await testImage('gemini-pro-vision') + await testImage('gpt-4-turbo') + await testRemoteImage('gemini-pro-vision') + await testRemoteImage('gpt-4-turbo') const log = completionService.stopLogging() const html = log.exportHTML() fs.writeFileSync('log.html', html) From 97399e841e22efa250c31c0b3aa6173527a3d2ed Mon Sep 17 00:00:00 2001 From: extremeheat Date: Fri, 10 May 2024 23:17:01 -0400 Subject: [PATCH 03/12] Update logging to support images, add chat+img test --- src/index.d.ts | 2 +- src/tools/logging.js | 7 ++- src/tools/loggingTemplate.html | 83 ++++++++++++++++++++++++++++------ test/api.js | 13 ++++++ 4 files changed, 90 insertions(+), 15 deletions(-) diff --git a/src/index.d.ts b/src/index.d.ts index f46a1e7..bf25f59 100644 --- a/src/index.d.ts +++ b/src/index.d.ts @@ -100,7 +100,7 @@ declare module 'langxlang' { constructor(completionService: SomeCompletionService, model: Model, systemPrompt?: string, options?: { functions?: Functions, generationOptions?: CompletionOptions }) // Send a message to the LLM and receive a response as return value. The chunkCallback // can be defined to listen to bits of the message stream as it's being written by the LLM. - sendMessage(userMessage: string, chunkCallback?: ChunkCb, generationOptions?: CompletionOptions): Promise + sendMessage(userMessage: string | MessagePart[], chunkCallback?: ChunkCb, generationOptions?: CompletionOptions): Promise } type StripOptions = { diff --git a/src/tools/logging.js b/src/tools/logging.js index 3a3f0ae..266335c 100644 --- a/src/tools/logging.js +++ b/src/tools/logging.js @@ -12,7 +12,12 @@ function createHTML (log) { on: new Date(entry.date).toISOString(), role: 'user', model: entry.model, - content: entry.messages ? null : [entry.system, entry.user].join('\n'), + content: entry.messages + ? null + : [ + { text: entry.system || '' }, + { text: entry.user || '' } + ], messages: entry.messages, generationOptions: entry.generationOptions }) diff --git a/src/tools/loggingTemplate.html b/src/tools/loggingTemplate.html index d61e3d7..5ded96a 100644 --- a/src/tools/loggingTemplate.html +++ b/src/tools/loggingTemplate.html @@ -38,8 +38,10 @@

LXL Session Log —