diff --git a/packages/ai-openai/README.md b/packages/ai-openai/README.md index 47c22bdb98deb..dc879794a087c 100644 --- a/packages/ai-openai/README.md +++ b/packages/ai-openai/README.md @@ -23,12 +23,12 @@ You can configure the end points via the `ai-features.openAiCustom.customOpenAiM ```ts { - model: string - url: string - id?: string - apiKey?: string | true - apiVersion?: string | true - supportsDeveloperMessage?: boolean + model: string, + url: string, + id?: string, + apiKey?: string | true, + apiVersion?: string | true, + developerMessageSettings?: 'user' | 'system' | 'developer' | 'mergeWithFollowingUserMessage' | 'skip', enableStreaming?: boolean } ``` @@ -37,7 +37,9 @@ You can configure the end points via the `ai-features.openAiCustom.customOpenAiM - `id` is an optional attribute which is used in the UI to refer to this configuration - `apiKey` is either the key to access the API served at the given URL or `true` to use the global OpenAI API key. If not given 'no-key' will be used. - `apiVersion` is either the api version to access the API served at the given URL in Azure or `true` to use the global OpenAI API version. -- `supportsDeveloperMessage` is a flag that indicates whether the model supports the `developer` role or not. `true` by default. +- `developerMessageSettings` Controls the handling of system messages: `user`, `system`, and `developer` will be used as a role, `mergeWithFollowingUserMessage` will prefix the + following user message with the system message or convert the system message to user message if the next message is not a user message. `skip` will just remove the system message. + Defaulting to `developer`. - `enableStreaming` is a flag that indicates whether the streaming API shall be used or not. `true` by default. ### Azure OpenAI @@ -49,7 +51,7 @@ Requests to an OpenAI model hosted on Azure need an `apiVersion`. To configure a Note that if you don't configure an `apiVersion`, the default `OpenAI` object is used for initialization and a connection to an Azure hosted OpenAI model will fail. An OpenAI model version deployed on Azure might not support the `developer` role. In that case it is possible to configure whether the `developer` role is supported or not via the -`supportsDeveloperMessage` option, which defaults to `true`. +`developerMessageSettings` option, e.g. setting it to `system` or `user`. The following snippet shows a possible configuration to access an OpenAI model hosted on Azure. The `AZURE_OPENAI_API_BASE_URL` needs to be given without the `/chat/completions` path and without the `api-version` parameter, e.g. _`https://.openai.azure.com/openai/deployments/`_ @@ -64,7 +66,7 @@ path and without the `api-version` parameter, e.g. _`https://.openai. "id": "azure-deployment", "apiKey": "", "apiVersion": "", - "supportsDeveloperMessage": false + "developerMessageSettings": "system" } ], "ai-features.agentSettings": { diff --git a/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts b/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts index aa0dcb8d233e3..4d585c8c8fe39 100644 --- a/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts +++ b/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts @@ -92,7 +92,7 @@ export class OpenAiFrontendApplicationContribution implements FrontendApplicatio model.url === newModel.url && model.apiKey === newModel.apiKey && model.apiVersion === newModel.apiVersion && - model.supportsDeveloperMessage === newModel.supportsDeveloperMessage && + model.developerMessageSettings === newModel.developerMessageSettings && model.supportsStructuredOutput === newModel.supportsStructuredOutput && model.enableStreaming === newModel.enableStreaming)); @@ -117,7 +117,7 @@ export class OpenAiFrontendApplicationContribution implements FrontendApplicatio model: modelId, apiKey: true, apiVersion: true, - supportsDeveloperMessage: !openAIModelsNotSupportingDeveloperMessages.includes(modelId), + developerMessageSettings: openAIModelsNotSupportingDeveloperMessages.includes(modelId) ? 'user' : 'developer', enableStreaming: !openAIModelsWithDisabledStreaming.includes(modelId), supportsStructuredOutput: !openAIModelsWithoutStructuredOutput.includes(modelId), defaultRequestSettings: modelRequestSetting?.requestSettings @@ -143,7 +143,7 @@ export class OpenAiFrontendApplicationContribution implements FrontendApplicatio url: pref.url, apiKey: typeof pref.apiKey === 'string' || pref.apiKey === true ? pref.apiKey : undefined, apiVersion: typeof pref.apiVersion === 'string' || pref.apiVersion === true ? pref.apiVersion : undefined, - supportsDeveloperMessage: pref.supportsDeveloperMessage ?? true, + developerMessageSettings: pref.developerMessageSettings ?? 'developer', supportsStructuredOutput: pref.supportsStructuredOutput ?? true, enableStreaming: pref.enableStreaming ?? true, defaultRequestSettings: modelRequestSetting?.requestSettings diff --git a/packages/ai-openai/src/browser/openai-preferences.ts b/packages/ai-openai/src/browser/openai-preferences.ts index 0c29d6ac5ff84..1973b52c8b40a 100644 --- a/packages/ai-openai/src/browser/openai-preferences.ts +++ b/packages/ai-openai/src/browser/openai-preferences.ts @@ -45,23 +45,26 @@ on the machine running Theia. Use the environment variable `OPENAI_API_KEY` to s type: 'array', title: AI_CORE_PREFERENCES_TITLE, markdownDescription: nls.localize('theia/ai/openai/customEndpoints/mdDescription', - 'Integrate custom models compatible with the OpenAI API, for example via `vllm`. The required attributes are `model` and `url`.\n\ -\n\ -Optionally, you can\ -\n\ -- specify a unique `id` to identify the custom model in the UI. If none is given `model` will be used as `id`.\ -\n\ -- provide an `apiKey` to access the API served at the given url. Use `true` to indicate the use of the global OpenAI API key.\ -\n\ -- provide an `apiVersion` to access the API served at the given url in Azure. Use `true` to indicate the use of the global OpenAI API version.\ -\n\ -- specify `supportsDeveloperMessage: false` to indicate that the developer role shall not be used.\ -\n\ -- specify `supportsStructuredOutput: false` to indicate that structured output shall not be used.\ -\n\ -- specify `enableStreaming: false` to indicate that streaming shall not be used.\n\ -\n\ -Refer to [our documentation](https://theia-ide.org/docs/user_ai/#openai-compatible-models-eg-via-vllm) for more information.'), + 'Integrate custom models compatible with the OpenAI API, for example via `vllm`. The required attributes are `model` and `url`.\ + \n\ + Optionally, you can\ + \n\ + - specify a unique `id` to identify the custom model in the UI. If none is given `model` will be used as `id`.\ + \n\ + - provide an `apiKey` to access the API served at the given url. Use `true` to indicate the use of the global OpenAI API key.\ + \n\ + - provide an `apiVersion` to access the API served at the given url in Azure. Use `true` to indicate the use of the global OpenAI API version.\ + \n\ + - set `developerMessageSettings` to one of `user`, `system`, `developer`, `mergeWithFollowingUserMessage`, or `skip` to control how the developer message is\ + included (where `user`, `system`, and `developer` will be used as a role, `mergeWithFollowingUserMessage` will prefix the following user message with the system\ + message or convert the system message to user message if the next message is not a user message. `skip` will just remove the system message).\ + Defaulting to `developer`.\ + \n\ + - specify `supportsStructuredOutput: false` to indicate that structured output shall not be used.\ + \n\ + - specify `enableStreaming: false` to indicate that streaming shall not be used.\ + \n\ + Refer to [our documentation](https://theia-ide.org/docs/user_ai/#openai-compatible-models-eg-via-vllm) for more information.'), default: [], items: { type: 'object', @@ -88,10 +91,14 @@ Refer to [our documentation](https://theia-ide.org/docs/user_ai/#openai-compatib title: nls.localize('theia/ai/openai/customEndpoints/apiVersion/title', 'Either the version to access the API served at the given url in Azure or `true` to use the global OpenAI API version'), }, - supportsDeveloperMessage: { - type: 'boolean', - title: nls.localize('theia/ai/openai/customEndpoints/supportsDevMessage/title', - 'Indicates whether the model supports the `developer` role. `true` by default.'), + developerMessageSettings: { + type: 'string', + enum: ['user', 'system', 'developer', 'mergeWithFollowingUserMessage', 'skip'], + default: 'developer', + title: nls.localize('theia/ai/openai/customEndpoints/developerMessageSettings/title', + 'Controls the handling of system messages: `user`, `system`, and `developer` will be used as a role, `mergeWithFollowingUserMessage` will prefix\ + the following user message with the system message or convert the system message to user message if the next message is not a user message.\ + `skip` will just remove the system message), defaulting to `developer`.') }, supportsStructuredOutput: { type: 'boolean', diff --git a/packages/ai-openai/src/common/openai-language-models-manager.ts b/packages/ai-openai/src/common/openai-language-models-manager.ts index 6cc8d349c3705..255b0d0d2dff9 100644 --- a/packages/ai-openai/src/common/openai-language-models-manager.ts +++ b/packages/ai-openai/src/common/openai-language-models-manager.ts @@ -15,6 +15,7 @@ // ***************************************************************************** export const OPENAI_LANGUAGE_MODELS_MANAGER_PATH = '/services/open-ai/language-model-manager'; export const OpenAiLanguageModelsManager = Symbol('OpenAiLanguageModelsManager'); + export interface OpenAiModelDescription { /** * The identifier of the model which will be shown in the UI. @@ -41,9 +42,12 @@ export interface OpenAiModelDescription { */ enableStreaming: boolean; /** - * Flag to configure whether the OpenAPI model supports the `developer` role. Default is `true`. + * Property to configure the developer message of the model. Setting this property to 'user', 'system', or 'developer' will use that string as the role for the system message. + * Setting it to 'mergeWithFollowingUserMessage' will prefix the following user message with the system message or convert the system message to user if the following message + * is not a user message. 'skip' will remove the system message altogether. + * Defaults to 'developer'. */ - supportsDeveloperMessage: boolean; + developerMessageSettings?: 'user' | 'system' | 'developer' | 'mergeWithFollowingUserMessage' | 'skip'; /** * Flag to configure whether the OpenAPI model supports structured output. Default is `true`. */ diff --git a/packages/ai-openai/src/node/openai-backend-module.ts b/packages/ai-openai/src/node/openai-backend-module.ts index 1850eddfe9582..5d5077f1f6053 100644 --- a/packages/ai-openai/src/node/openai-backend-module.ts +++ b/packages/ai-openai/src/node/openai-backend-module.ts @@ -19,6 +19,7 @@ import { OPENAI_LANGUAGE_MODELS_MANAGER_PATH, OpenAiLanguageModelsManager } from import { ConnectionHandler, RpcConnectionHandler } from '@theia/core'; import { OpenAiLanguageModelsManagerImpl } from './openai-language-models-manager-impl'; import { ConnectionContainerModule } from '@theia/core/lib/node/messaging/connection-container-module'; +import { OpenAiModelUtils } from './openai-language-model'; export const OpenAiModelFactory = Symbol('OpenAiModelFactory'); @@ -32,5 +33,6 @@ const openAiConnectionModule = ConnectionContainerModule.create(({ bind, bindBac }); export default new ContainerModule(bind => { + bind(OpenAiModelUtils).toSelf().inSingletonScope(); bind(ConnectionContainerModule).toConstantValue(openAiConnectionModule); }); diff --git a/packages/ai-openai/src/node/openai-language-model.ts b/packages/ai-openai/src/node/openai-language-model.ts index 4ff274656e07f..33aa9874ffd4a 100644 --- a/packages/ai-openai/src/node/openai-language-model.ts +++ b/packages/ai-openai/src/node/openai-language-model.ts @@ -23,6 +23,7 @@ import { LanguageModelTextResponse } from '@theia/ai-core'; import { CancellationToken } from '@theia/core'; +import { injectable } from '@theia/core/shared/inversify'; import { OpenAI, AzureOpenAI } from 'openai'; import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream'; import { RunnableToolFunctionWithoutParse } from 'openai/lib/RunnableFunction'; @@ -31,6 +32,8 @@ import { StreamingAsyncIterator } from './openai-streaming-iterator'; export const OpenAiModelIdentifier = Symbol('OpenAiModelIdentifier'); +export type DeveloperMessageSettings = 'user' | 'system' | 'developer' | 'mergeWithFollowingUserMessage' | 'skip'; + export class OpenAiModel implements LanguageModel { /** @@ -39,7 +42,7 @@ export class OpenAiModel implements LanguageModel { * @param enableStreaming whether the streaming API shall be used * @param apiKey a function that returns the API key to use for this model, called on each request * @param apiVersion a function that returns the OpenAPI version to use for this model, called on each request - * @param supportsDeveloperMessage whether the model supports the `developer` role + * @param developerMessageSettings how to handle system messages * @param url the OpenAI API compatible endpoint where the model is hosted. If not provided the default OpenAI endpoint will be used. * @param defaultRequestSettings optional default settings for requests made using this model. */ @@ -49,10 +52,11 @@ export class OpenAiModel implements LanguageModel { public enableStreaming: boolean, public apiKey: () => string | undefined, public apiVersion: () => string | undefined, - public supportsDeveloperMessage: boolean, public supportsStructuredOutput: boolean, public url: string | undefined, - public defaultRequestSettings?: { [key: string]: unknown } + public openAiModelUtils: OpenAiModelUtils, + public developerMessageSettings: DeveloperMessageSettings = 'developer', + public defaultRequestSettings?: { [key: string]: unknown }, ) { } protected getSettings(request: LanguageModelRequest): Record { @@ -78,13 +82,12 @@ export class OpenAiModel implements LanguageModel { if (cancellationToken?.isCancellationRequested) { return { text: '' }; } - let runner: ChatCompletionStream; const tools = this.createTools(request); if (tools) { runner = openai.beta.chat.completions.runTools({ model: this.model, - messages: request.messages.map(this.toOpenAIMessage.bind(this)), + messages: this.processMessages(request.messages), stream: true, tools: tools, tool_choice: 'auto', @@ -93,7 +96,7 @@ export class OpenAiModel implements LanguageModel { } else { runner = openai.beta.chat.completions.stream({ model: this.model, - messages: request.messages.map(this.toOpenAIMessage.bind(this)), + messages: this.processMessages(request.messages), stream: true, ...settings }); @@ -106,7 +109,7 @@ export class OpenAiModel implements LanguageModel { const settings = this.getSettings(request); const response = await openai.chat.completions.create({ model: this.model, - messages: request.messages.map(this.toOpenAIMessage.bind(this)), + messages: this.processMessages(request.messages), ...settings }); @@ -117,24 +120,6 @@ export class OpenAiModel implements LanguageModel { }; } - protected toOpenAIMessage(message: LanguageModelRequestMessage): ChatCompletionMessageParam { - return { - role: this.toOpenAiRole(message), - content: message.query || '' - }; - } - - protected toOpenAiRole(message: LanguageModelRequestMessage): 'developer' | 'user' | 'assistant' { - switch (message.actor) { - case 'system': - return this.supportsDeveloperMessage ? 'developer' : 'user'; - case 'ai': - return 'assistant'; - default: - return 'user'; - } - } - protected isNonStreamingModel(_model: string): boolean { return !this.enableStreaming; } @@ -144,7 +129,7 @@ export class OpenAiModel implements LanguageModel { // TODO implement tool support for structured output (parse() seems to require different tool format) const result = await openai.beta.chat.completions.parse({ model: this.model, - messages: request.messages.map(this.toOpenAIMessage.bind(this)), + messages: this.processMessages(request.messages), response_format: request.response_format, ...settings }); @@ -185,4 +170,92 @@ export class OpenAiModel implements LanguageModel { return new OpenAI({ apiKey: apiKey ?? 'no-key', baseURL: this.url }); } } + + protected processMessages(messages: LanguageModelRequestMessage[]): ChatCompletionMessageParam[] { + return this.openAiModelUtils.processMessages(messages, this.developerMessageSettings, this.model); + } +} + +/** + * Utility class for processing messages for the OpenAI language model. + * + * Adopters can rebind this class to implement custom message processing behavior. + */ +@injectable() +export class OpenAiModelUtils { + + protected processSystemMessages( + messages: LanguageModelRequestMessage[], + developerMessageSettings: DeveloperMessageSettings + ): LanguageModelRequestMessage[] { + if (developerMessageSettings === 'skip') { + return messages.filter(message => message.actor !== 'system'); + } else if (developerMessageSettings === 'mergeWithFollowingUserMessage') { + const updated = messages.slice(); + for (let i = updated.length - 1; i >= 0; i--) { + if (updated[i].actor === 'system') { + if (i + 1 < updated.length && updated[i + 1].actor === 'user') { + // Merge system message with the next user message + updated[i + 1] = { + ...updated[i + 1], + query: updated[i].query + '\n' + updated[i + 1].query + }; + updated.splice(i, 1); + } else { + // The message directly after is not a user message (or none exists), so create a new user message right after + updated.splice(i + 1, 0, { actor: 'user', type: 'text', query: updated[i].query }); + updated.splice(i, 1); + } + } + } + return updated; + } + return messages; + } + + protected toOpenAiRole( + message: LanguageModelRequestMessage, + developerMessageSettings: DeveloperMessageSettings + ): 'developer' | 'user' | 'assistant' | 'system' { + if (message.actor === 'system') { + if (developerMessageSettings === 'user' || developerMessageSettings === 'system' || developerMessageSettings === 'developer') { + return developerMessageSettings; + } else { + return 'developer'; + } + } else if (message.actor === 'ai') { + return 'assistant'; + } + return 'user'; + } + + protected toOpenAIMessage( + message: LanguageModelRequestMessage, + developerMessageSettings: DeveloperMessageSettings + ): ChatCompletionMessageParam { + return { + role: this.toOpenAiRole(message, developerMessageSettings), + content: message.query || '' + }; + } + + /** + * Processes the provided list of messages by applying system message adjustments and converting + * them to the format expected by the OpenAI API. + * + * Adopters can rebind this processing to implement custom behavior. + * + * @param messages the list of messages to process. + * @param developerMessageSettings how system and developer messages are handled during processing. + * @param model the OpenAI model identifier. Currently not used, but allows subclasses to implement model-specific behavior. + * @returns an array of messages formatted for the OpenAI API. + */ + processMessages( + messages: LanguageModelRequestMessage[], + developerMessageSettings: DeveloperMessageSettings, + model: string + ): ChatCompletionMessageParam[] { + const processed = this.processSystemMessages(messages, developerMessageSettings); + return processed.map(m => this.toOpenAIMessage(m, developerMessageSettings)); + } } diff --git a/packages/ai-openai/src/node/openai-language-models-manager-impl.ts b/packages/ai-openai/src/node/openai-language-models-manager-impl.ts index 347af39663949..0585aa36af941 100644 --- a/packages/ai-openai/src/node/openai-language-models-manager-impl.ts +++ b/packages/ai-openai/src/node/openai-language-models-manager-impl.ts @@ -16,12 +16,15 @@ import { LanguageModelRegistry } from '@theia/ai-core'; import { inject, injectable } from '@theia/core/shared/inversify'; -import { OpenAiModel } from './openai-language-model'; +import { OpenAiModel, OpenAiModelUtils } from './openai-language-model'; import { OpenAiLanguageModelsManager, OpenAiModelDescription } from '../common'; @injectable() export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsManager { + @inject(OpenAiModelUtils) + protected readonly openAiModelUtils: OpenAiModelUtils; + protected _apiKey: string | undefined; protected _apiVersion: string | undefined; @@ -70,7 +73,7 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana model.url = modelDescription.url; model.apiKey = apiKeyProvider; model.apiVersion = apiVersionProvider; - model.supportsDeveloperMessage = modelDescription.supportsDeveloperMessage; + model.developerMessageSettings = modelDescription.developerMessageSettings || 'developer'; model.supportsStructuredOutput = modelDescription.supportsStructuredOutput; model.defaultRequestSettings = modelDescription.defaultRequestSettings; } else { @@ -81,9 +84,10 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana modelDescription.enableStreaming, apiKeyProvider, apiVersionProvider, - modelDescription.supportsDeveloperMessage, modelDescription.supportsStructuredOutput, modelDescription.url, + this.openAiModelUtils, + modelDescription.developerMessageSettings, modelDescription.defaultRequestSettings ) ]); diff --git a/packages/ai-openai/src/node/openai-model-utils.spec.ts b/packages/ai-openai/src/node/openai-model-utils.spec.ts new file mode 100644 index 0000000000000..3b6b14eb42d54 --- /dev/null +++ b/packages/ai-openai/src/node/openai-model-utils.spec.ts @@ -0,0 +1,164 @@ +// ***************************************************************************** +// Copyright (C) 2024 EclipseSource GmbH. +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License v. 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0. +// +// This Source Code may also be made available under the following Secondary +// Licenses when the conditions for such availability set forth in the Eclipse +// Public License v. 2.0 are satisfied: GNU General Public License, version 2 +// with the GNU Classpath Exception which is available at +// https://www.gnu.org/software/classpath/license.html. +// +// SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0 +// ***************************************************************************** +const { expect } = require('chai'); +const { OpenAiModelUtils } = require('./openai-language-model'); +const utils = new OpenAiModelUtils(); + +describe('OpenAiModelUtils - processMessages', () => { + describe("when developerMessageSettings is 'skip'", () => { + it('should remove all system messages', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system message' }, + { actor: 'user', type: 'text', query: 'user message' }, + { actor: 'system', type: 'text', query: 'another system message' }, + ]; + const result = utils.processMessages(messages, 'skip'); + expect(result).to.deep.equal([ + { role: 'user', content: 'user message' } + ]); + }); + + it('should do nothing if there is no system message', () => { + const messages = [ + { actor: 'user', type: 'text', query: 'user message' }, + { actor: 'user', type: 'text', query: 'another user message' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + const result = utils.processMessages(messages, 'skip'); + expect(result).to.deep.equal([ + { role: 'user', content: 'user message' }, + { role: 'user', content: 'another user message' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + }); + + describe("when developerMessageSettings is 'mergeWithFollowingUserMessage'", () => { + it('should merge the system message with the next user message, assign role user, and remove the system message', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system msg' }, + { actor: 'user', type: 'text', query: 'user msg' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage'); + expect(result).to.deep.equal([ + { role: 'user', content: 'system msg\nuser msg' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + + it('should create a new user message if no user message exists, and remove the system message', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system only msg' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage'); + expect(result).to.deep.equal([ + { role: 'user', content: 'system only msg' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + + it('should create a merge multiple system message with the next user message', () => { + const messages = [ + { actor: 'user', type: 'text', query: 'user message' }, + { actor: 'system', type: 'text', query: 'system message' }, + { actor: 'system', type: 'text', query: 'system message2' }, + { actor: 'user', type: 'text', query: 'user message2' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage'); + expect(result).to.deep.equal([ + { role: 'user', content: 'user message' }, + { role: 'user', content: 'system message\nsystem message2\nuser message2' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + + it('should create a new user message from several system messages if the next message is not a user message', () => { + const messages = [ + { actor: 'user', type: 'text', query: 'user message' }, + { actor: 'system', type: 'text', query: 'system message' }, + { actor: 'system', type: 'text', query: 'system message2' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + const result = utils.processMessages(messages, 'mergeWithFollowingUserMessage'); + expect(result).to.deep.equal([ + { role: 'user', content: 'user message' }, + { role: 'user', content: 'system message\nsystem message2' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + }); + + describe('when no special merging or skipping is needed', () => { + it('should leave messages unchanged in ordering and assign roles based on developerMessageSettings', () => { + const messages = [ + { actor: 'user', type: 'text', query: 'user message' }, + { actor: 'system', type: 'text', query: 'system message' }, + { actor: 'ai', type: 'text', query: 'ai message' } + ]; + // Using a developerMessageSettings that is not merge/skip, e.g., 'developer' + const result = utils.processMessages(messages, 'developer'); + expect(result).to.deep.equal([ + { role: 'user', content: 'user message' }, + { role: 'developer', content: 'system message' }, + { role: 'assistant', content: 'ai message' } + ]); + }); + }); + + describe('role assignment for system messages when developerMessageSettings is one of the role strings', () => { + it('should assign role as specified for a system message when developerMessageSettings is "user"', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system msg' }, + { actor: 'ai', type: 'text', query: 'ai msg' } + ]; + // Since the first message is system and developerMessageSettings is not merge/skip, ordering is not adjusted + const result = utils.processMessages(messages, 'user'); + expect(result).to.deep.equal([ + { role: 'user', content: 'system msg' }, + { role: 'assistant', content: 'ai msg' } + ]); + }); + + it('should assign role as specified for a system message when developerMessageSettings is "system"', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system msg' }, + { actor: 'ai', type: 'text', query: 'ai msg' } + ]; + const result = utils.processMessages(messages, 'system'); + expect(result).to.deep.equal([ + { role: 'system', content: 'system msg' }, + { role: 'assistant', content: 'ai msg' } + ]); + }); + + it('should assign role as specified for a system message when developerMessageSettings is "developer"', () => { + const messages = [ + { actor: 'system', type: 'text', query: 'system msg' }, + { actor: 'user', type: 'text', query: 'user msg' }, + { actor: 'ai', type: 'text', query: 'ai msg' } + ]; + const result = utils.processMessages(messages, 'developer'); + expect(result).to.deep.equal([ + { role: 'developer', content: 'system msg' }, + { role: 'user', content: 'user msg' }, + { role: 'assistant', content: 'ai msg' } + ]); + }); + }); +});