Skip to content

Commit

Permalink
refactor: Speed up Config fetching and Setup Config Groundwork 👷🚧 (da…
Browse files Browse the repository at this point in the history
…nny-avila#1297)

* refactor: move endpoint services to own directory

* refactor: make endpointconfig handling more concise, separate logic, and cache result for subsequent serving

* refactor: ModelController gets same treatment as EndpointController, draft OverrideController

* wip: flesh out override controller more to return real value

* refactor: client/api changes in anticipation of override
  • Loading branch information
danny-avila authored and shortpoet committed Dec 30, 2023
1 parent 843bd2a commit a6d197c
Show file tree
Hide file tree
Showing 27 changed files with 405 additions and 138 deletions.
8 changes: 7 additions & 1 deletion api/cache/getLogStores.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
const Keyv = require('keyv');
const keyvMongo = require('./keyvMongo');
const keyvRedis = require('./keyvRedis');
const { math, isEnabled } = require('../server/utils');
const { CacheKeys } = require('~/common/enums');
const { math, isEnabled } = require('~/server/utils');
const { logFile, violationFile } = require('./keyvFiles');
const { BAN_DURATION, USE_REDIS } = process.env ?? {};

Expand All @@ -17,7 +18,12 @@ const pending_req = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: 'pending_req' });

const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG });

const namespaces = {
config,
pending_req,
ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }),
general: new Keyv({ store: logFile, namespace: 'violations' }),
Expand Down
15 changes: 15 additions & 0 deletions api/common/enums.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
/**
* @typedef {Object} CacheKeys
* @property {'config'} CONFIG - Key for the config cache.
* @property {'modelsConfig'} MODELS_CONFIG - Key for the model config cache.
* @property {'defaultConfig'} DEFAULT_CONFIG - Key for the default config cache.
* @property {'overrideConfig'} OVERRIDE_CONFIG - Key for the override config cache.
*/
const CacheKeys = {
CONFIG: 'config',
MODELS_CONFIG: 'modelsConfig',
DEFAULT_CONFIG: 'defaultConfig',
OVERRIDE_CONFIG: 'overrideConfig',
};

module.exports = { CacheKeys };
100 changes: 11 additions & 89 deletions api/server/controllers/EndpointController.js
Original file line number Diff line number Diff line change
@@ -1,95 +1,17 @@
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
const { availableTools } = require('~/app/clients/tools');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const {
openAIApiKey,
azureOpenAIApiKey,
useAzurePlugins,
userProvidedOpenAI,
palmKey,
openAI,
// assistant,
azureOpenAI,
bingAI,
chatGPTBrowser,
anthropic,
} = require('~/server/services/EndpointService').config;
const { getLogStores } = require('~/cache');
const { CacheKeys } = require('~/common/enums');
const { loadDefaultEndpointsConfig } = require('~/server/services/Config');

let i = 0;
async function endpointController(req, res) {
let key, palmUser;
try {
key = require('~/data/auth.json');
} catch (e) {
if (i === 0) {
i++;
}
const cache = getLogStores(CacheKeys.CONFIG);
const config = await cache.get(CacheKeys.DEFAULT_CONFIG);
if (config) {
res.send(config);
return;
}

if (palmKey === 'user_provided') {
palmUser = true;
if (i <= 1) {
i++;
}
}

const tools = await addOpenAPISpecs(availableTools);
function transformToolsToMap(tools) {
return tools.reduce((map, obj) => {
map[obj.pluginKey] = obj.name;
return map;
}, {});
}
const plugins = transformToolsToMap(tools);

const google = key || palmUser ? { userProvide: palmUser } : false;

const gptPlugins =
openAIApiKey || azureOpenAIApiKey
? {
plugins,
availableAgents: ['classic', 'functions'],
userProvide: userProvidedOpenAI,
azure: useAzurePlugins,
}
: false;

let enabledEndpoints = [
EModelEndpoint.openAI,
EModelEndpoint.azureOpenAI,
EModelEndpoint.google,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
];

const endpointsEnv = process.env.ENDPOINTS || '';
if (endpointsEnv) {
enabledEndpoints = endpointsEnv
.split(',')
.filter((endpoint) => endpoint?.trim())
.map((endpoint) => endpoint.trim());
}

const endpointConfig = {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.bingAI]: bingAI,
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.anthropic]: anthropic,
};

const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
if (endpointConfig[key]) {
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
}
return config;
}, {});

res.send(JSON.stringify(orderedAndFilteredEndpoints));
const defaultConfig = await loadDefaultEndpointsConfig();
await cache.set(CacheKeys.DEFAULT_CONFIG, defaultConfig);
res.send(JSON.stringify(defaultConfig));
}

module.exports = endpointController;
42 changes: 12 additions & 30 deletions api/server/controllers/ModelController.js
Original file line number Diff line number Diff line change
@@ -1,35 +1,17 @@
const { EModelEndpoint } = require('../routes/endpoints/schemas');
const {
getOpenAIModels,
getChatGPTBrowserModels,
getAnthropicModels,
} = require('../services/ModelService');

const { useAzurePlugins } = require('../services/EndpointService').config;

const fitlerAssistantModels = (str) => {
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
};
const { getLogStores } = require('~/cache');
const { CacheKeys } = require('~/common/enums');
const { loadDefaultModels } = require('~/server/services/Config');

async function modelController(req, res) {
const openAI = await getOpenAIModels();
const azureOpenAI = await getOpenAIModels({ azure: true });
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });
const chatGPTBrowser = getChatGPTBrowserModels();
const anthropic = getAnthropicModels();

res.send(
JSON.stringify({
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
[EModelEndpoint.google]: ['chat-bison', 'text-bison', 'codechat-bison'],
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.anthropic]: anthropic,
}),
);
const cache = getLogStores(CacheKeys.CONFIG);
let modelConfig = await cache.get(CacheKeys.MODELS_CONFIG);
if (modelConfig) {
res.send(modelConfig);
return;
}
modelConfig = await loadDefaultModels();
await cache.set(CacheKeys.MODELS_CONFIG, modelConfig);
res.send(modelConfig);
}

module.exports = modelController;
27 changes: 27 additions & 0 deletions api/server/controllers/OverrideController.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
const { getLogStores } = require('~/cache');
const { CacheKeys } = require('~/common/enums');
const { loadOverrideConfig } = require('~/server/services/Config');

async function overrideController(req, res) {
const cache = getLogStores(CacheKeys.CONFIG);
let overrideConfig = await cache.get(CacheKeys.OVERRIDE_CONFIG);
if (overrideConfig) {
res.send(overrideConfig);
return;
} else if (overrideConfig === false) {
res.send(false);
return;
}
overrideConfig = await loadOverrideConfig();
const { endpointsConfig, modelsConfig } = overrideConfig;
if (endpointsConfig) {
await cache.set(CacheKeys.DEFAULT_CONFIG, endpointsConfig);
}
if (modelsConfig) {
await cache.set(CacheKeys.MODELS_CONFIG, modelsConfig);
}
await cache.set(CacheKeys.OVERRIDE_CONFIG, overrideConfig);
res.send(JSON.stringify(overrideConfig));
}

module.exports = overrideController;
4 changes: 3 additions & 1 deletion api/server/routes/endpoints.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
const express = require('express');
const router = express.Router();
const endpointController = require('../controllers/EndpointController');
const endpointController = require('~/server/controllers/EndpointController');
const overrideController = require('~/server/controllers/OverrideController');

router.get('/', endpointController);
router.get('/config/override', overrideController);

module.exports = router;
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const { EModelEndpoint } = require('../routes/endpoints/schemas');
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');

const {
OPENAI_API_KEY: openAIApiKey,
Expand Down
13 changes: 13 additions & 0 deletions api/server/services/Config/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
const { config } = require('./EndpointService');
const loadDefaultModels = require('./loadDefaultModels');
const loadOverrideConfig = require('./loadOverrideConfig');
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');

module.exports = {
config,
loadDefaultModels,
loadOverrideConfig,
loadAsyncEndpoints,
loadDefaultEndpointsConfig,
};
51 changes: 51 additions & 0 deletions api/server/services/Config/loadAsyncEndpoints.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
const { availableTools } = require('~/app/clients/tools');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, palmKey } =
require('./EndpointService').config;

/**
* Load async endpoints and return a configuration object
*/
async function loadAsyncEndpoints() {
let i = 0;
let key, palmUser;
try {
key = require('~/data/auth.json');
} catch (e) {
if (i === 0) {
i++;
}
}

if (palmKey === 'user_provided') {
palmUser = true;
if (i <= 1) {
i++;
}
}

const tools = await addOpenAPISpecs(availableTools);
function transformToolsToMap(tools) {
return tools.reduce((map, obj) => {
map[obj.pluginKey] = obj.name;
return map;
}, {});
}
const plugins = transformToolsToMap(tools);

const google = key || palmUser ? { userProvide: palmUser } : false;

const gptPlugins =
openAIApiKey || azureOpenAIApiKey
? {
plugins,
availableAgents: ['classic', 'functions'],
userProvide: userProvidedOpenAI,
azure: useAzurePlugins,
}
: false;

return { google, gptPlugins };
}

module.exports = loadAsyncEndpoints;
52 changes: 52 additions & 0 deletions api/server/services/Config/loadDefaultEConfig.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
const loadAsyncEndpoints = require('./loadAsyncEndpoints');
const { config } = require('./EndpointService');

/**
* Load async endpoints and return a configuration object
* @function loadDefaultEndpointsConfig
* @returns {Promise<Object.<string, EndpointWithOrder>>} An object whose keys are endpoint names and values are objects that contain the endpoint configuration and an order.
*/
async function loadDefaultEndpointsConfig() {
const { google, gptPlugins } = await loadAsyncEndpoints();
const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;

let enabledEndpoints = [
EModelEndpoint.openAI,
EModelEndpoint.azureOpenAI,
EModelEndpoint.google,
EModelEndpoint.bingAI,
EModelEndpoint.chatGPTBrowser,
EModelEndpoint.gptPlugins,
EModelEndpoint.anthropic,
];

const endpointsEnv = process.env.ENDPOINTS || '';
if (endpointsEnv) {
enabledEndpoints = endpointsEnv
.split(',')
.filter((endpoint) => endpoint?.trim())
.map((endpoint) => endpoint.trim());
}

const endpointConfig = {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.bingAI]: bingAI,
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.anthropic]: anthropic,
};

const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => {
if (endpointConfig[key]) {
config[key] = { ...(endpointConfig[key] ?? {}), order: index };
}
return config;
}, {});

return orderedAndFilteredEndpoints;
}

module.exports = loadDefaultEndpointsConfig;
32 changes: 32 additions & 0 deletions api/server/services/Config/loadDefaultModels.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
const {
getOpenAIModels,
getChatGPTBrowserModels,
getAnthropicModels,
} = require('~/server/services/ModelService');
const { EModelEndpoint } = require('~/server/routes/endpoints/schemas');
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;

const fitlerAssistantModels = (str) => {
return /gpt-4|gpt-3\\.5/i.test(str) && !/vision|instruct/i.test(str);
};

async function loadDefaultModels() {
const openAI = await getOpenAIModels();
const anthropic = getAnthropicModels();
const chatGPTBrowser = getChatGPTBrowserModels();
const azureOpenAI = await getOpenAIModels({ azure: true });
const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true });

return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels),
[EModelEndpoint.google]: ['chat-bison', 'text-bison', 'codechat-bison'],
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.anthropic]: anthropic,
};
}

module.exports = loadDefaultModels;
6 changes: 6 additions & 0 deletions api/server/services/Config/loadOverrideConfig.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
// fetch some remote config
async function loadOverrideConfig() {
return false;
}

module.exports = loadOverrideConfig;
Loading

0 comments on commit a6d197c

Please sign in to comment.