Skip to content

Commit

Permalink
[editor] Streaming Outputs (#798)
Browse files Browse the repository at this point in the history
# [editor] Streaming Outputs

Add client-side handling for streaming outputs. We currently derive
whether the request should stream by checking settings. Not a huge fan
of this since:
- it couples client with config runtime logic for getting the 'stream'
value
- assumes 'stream' setting is used by the models

This is sufficient for now, I think. Note that I do think it's nice to
allow the editor user to toggle between stream/no stream since it will
be useful for debugging their model parser

## Testing:
- Error handling: added `return HttpResponseWithAIConfig(message="No
AIConfig loaded", code=400, aiconfig=None).to_flask_format()` in /run
with 'stream' set to true and ensured error is shown as notification and
run state is stopped on the client
- Non-streaming model (dall-e) run correctly
- Streaming works for models with stream setting set to tru/false:


https://github.com/lastmile-ai/aiconfig/assets/5060851/5242f49f-0003-45a6-af19-1e29c611eca4
  • Loading branch information
rholinshead authored Jan 6, 2024
2 parents e3590eb + 7613cf1 commit 54de9cc
Show file tree
Hide file tree
Showing 5 changed files with 132 additions and 14 deletions.
38 changes: 36 additions & 2 deletions python/src/aiconfig/editor/client/src/Editor.tsx
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
import EditorContainer, {
AIConfigCallbacks,
RunPromptStreamCallback,
} from "./components/EditorContainer";
import { Flex, Loader, MantineProvider, Image } from "@mantine/core";
import { AIConfig, InferenceSettings, JSONObject, Prompt } from "aiconfig";
import {
AIConfig,
InferenceSettings,
JSONObject,
Output,
Prompt,
} from "aiconfig";
import { useCallback, useEffect, useMemo, useState } from "react";
import { ufetch } from "ufetch";
import { ROUTE_TABLE } from "./utils/api";
import { streamingApi } from "./utils/oboeHelpers";

export default function Editor() {
const [aiconfig, setAiConfig] = useState<AIConfig | undefined>();
Expand Down Expand Up @@ -66,6 +74,30 @@ export default function Editor() {
});
}, []);

const runPromptStream = useCallback(
async (promptName: string, onStream: RunPromptStreamCallback) => {
await streamingApi(
{
url: ROUTE_TABLE.RUN_PROMPT,
method: "POST",
body: {
prompt_name: promptName,
stream: true,
},
},
"output_chunk",
(data) => {
onStream({ type: "output_chunk", data: data as Output });
},
"aiconfig",
(data) => {
onStream({ type: "aiconfig", data: data as AIConfig });
}
);
},
[]
);

const updatePrompt = useCallback(
async (promptName: string, promptData: Prompt) => {
return await ufetch.post(ROUTE_TABLE.UPDATE_PROMPT, {
Expand Down Expand Up @@ -124,6 +156,7 @@ export default function Editor() {
getModels,
getServerStatus,
runPrompt,
runPromptStream,
save,
setConfigDescription,
setConfigName,
Expand All @@ -137,6 +170,7 @@ export default function Editor() {
getModels,
getServerStatus,
runPrompt,
runPromptStream,
save,
setConfigDescription,
setConfigName,
Expand Down Expand Up @@ -168,7 +202,7 @@ export default function Editor() {
deg: 45,
},

globalStyles: (theme) => ({
globalStyles: () => ({
".editorBackground": {
background:
"radial-gradient(ellipse at top,#08122d,#030712),radial-gradient(ellipse at bottom,#030712,#030712)",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
AIConfig,
InferenceSettings,
JSONObject,
Output,
Prompt,
PromptInput,
} from "aiconfig";
Expand All @@ -36,6 +37,7 @@ import AddPromptButton from "./prompt/AddPromptButton";
import {
getDefaultNewPromptName,
getPrompt,
isStreamingSupported,
} from "../utils/aiconfigStateUtils";
import { debounce, uniqueId } from "lodash";
import PromptMenuButton from "./prompt/PromptMenuButton";
Expand All @@ -56,6 +58,18 @@ type Props = {
callbacks: AIConfigCallbacks;
};

type RunPromptStreamEvent =
| {
type: "output_chunk";
data: Output;
}
| {
type: "aiconfig";
data: AIConfig;
};

export type RunPromptStreamCallback = (event: RunPromptStreamEvent) => void;

export type AIConfigCallbacks = {
addPrompt: (
promptName: string,
Expand All @@ -66,6 +80,10 @@ export type AIConfigCallbacks = {
getModels: (search: string) => Promise<string[]>;
getServerStatus?: () => Promise<{ status: "OK" | "ERROR" }>;
runPrompt: (promptName: string) => Promise<{ aiconfig: AIConfig }>;
runPromptStream: (
promptName: string,
onStream: RunPromptStreamCallback
) => Promise<void>;
save: (aiconfig: AIConfig) => Promise<void>;
setConfigDescription: (description: string) => Promise<void>;
setConfigName: (name: string) => Promise<void>;
Expand Down Expand Up @@ -526,6 +544,8 @@ export default function EditorContainer({
);

const runPromptCallback = callbacks.runPrompt;
const runPromptStreamCallback = callbacks.runPromptStream;

const onRunPrompt = useCallback(
async (promptId: string) => {
const action: AIConfigReducerAction = {
Expand All @@ -540,14 +560,36 @@ export default function EditorContainer({
if (!statePrompt) {
throw new Error(`Could not find prompt with id ${promptId}`);
}
const promptName = statePrompt.name;
const serverConfigRes = await runPromptCallback(promptName);

dispatch({
type: "CONSOLIDATE_AICONFIG",
action,
config: serverConfigRes.aiconfig,
});
const promptName = statePrompt.name;
const isStream = isStreamingSupported(statePrompt, stateRef.current);

if (isStream) {
await runPromptStreamCallback(promptName, (event) => {
if (event.type === "output_chunk") {
dispatch({
type: "STREAM_OUTPUT_CHUNK",
id: promptId,
output: event.data,
});
} else if (event.type === "aiconfig") {
dispatch({
type: "CONSOLIDATE_AICONFIG",
action,
config: event.data,
});
}
});
return;
} else {
const serverConfigRes = await runPromptCallback(promptName);

dispatch({
type: "CONSOLIDATE_AICONFIG",
action,
config: serverConfigRes.aiconfig,
});
}
} catch (err: unknown) {
const message = (err as RequestCallbackError).message ?? null;

Expand All @@ -567,7 +609,7 @@ export default function EditorContainer({
});
}
},
[runPromptCallback]
[runPromptCallback, runPromptStreamCallback]
);

const setNameCallback = callbacks.setConfigName;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { ClientAIConfig, ClientPrompt } from "../shared/types";
import { getPromptModelName } from "../utils/promptUtils";
import { AIConfig, JSONObject, PromptInput } from "aiconfig";
import { AIConfig, JSONObject, Output, PromptInput } from "aiconfig";

export type AIConfigReducerAction =
| MutateAIConfigAction
Expand All @@ -14,6 +14,7 @@ export type MutateAIConfigAction =
| RunPromptAction
| SetDescriptionAction
| SetNameAction
| StreamOutputChunkAction
| UpdatePromptInputAction
| UpdatePromptNameAction
| UpdatePromptModelAction
Expand Down Expand Up @@ -63,6 +64,12 @@ export type SetNameAction = {
name: string;
};

export type StreamOutputChunkAction = {
type: "STREAM_OUTPUT_CHUNK";
id: string;
output: Output;
};

export type UpdatePromptInputAction = {
type: "UPDATE_PROMPT_INPUT";
id: string;
Expand Down Expand Up @@ -255,6 +262,12 @@ export default function aiconfigReducer(
name: action.name,
};
}
case "STREAM_OUTPUT_CHUNK": {
return reduceReplacePrompt(dirtyState, action.id, (prompt) => ({
...prompt,
outputs: [action.output],
}));
}
case "UPDATE_PROMPT_INPUT": {
return reduceReplaceInput(dirtyState, action.id, () => action.input);
}
Expand Down
29 changes: 29 additions & 0 deletions python/src/aiconfig/editor/client/src/utils/aiconfigStateUtils.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { AIConfig } from "aiconfig";
import { ClientAIConfig, ClientPrompt } from "../shared/types";
import { getPromptModelName } from "./promptUtils";

export function getDefaultNewPromptName(aiconfig: AIConfig): string {
const existingNames = aiconfig.prompts.map((prompt) => prompt.name);
Expand All @@ -16,3 +17,31 @@ export function getPrompt(
): ClientPrompt | undefined {
return aiconfig.prompts.find((prompt) => prompt._ui.id === id);
}

// TODO: This is pretty hacky. Streaming is actually part of AIConfig runtime and not necessarily part of model settings,
// let alone required to be defined by 'stream' boolean... Ideally we should treat everything as stream but this should work for now.
export function isStreamingSupported(
prompt: ClientPrompt,
config: ClientAIConfig
): boolean {
const promptModelSettings =
prompt.metadata?.model && typeof prompt.metadata.model !== "string"
? prompt.metadata.model?.settings
: undefined;
if (promptModelSettings) {
if (promptModelSettings?.stream === true) {
return true;
} else if (promptModelSettings?.stream === false) {
return false;
}
}

const promptModelName = getPromptModelName(prompt);
if (promptModelName) {
const globalModelSettings =
config.metadata?.models?.[promptModelName]?.settings;
return globalModelSettings?.stream === true;
}

return false;
}
6 changes: 3 additions & 3 deletions python/src/aiconfig/editor/client/src/utils/oboeHelpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@ import oboe, { Options } from "oboe";
export async function streamingApi<T>(
headers: Options,
on: string = "*",
fn: (data: any) => void,
fn: (data: unknown) => void,
on2?: string,
fn2?: (data: any) => void,
fn2?: (data: unknown) => void,
on3?: string,
fn3?: (data: any) => void
fn3?: (data: unknown) => void
): Promise<T> {
return new Promise((resolve, reject) => {
if (fn2 && on2 && fn3 && on3) {
Expand Down

0 comments on commit 54de9cc

Please sign in to comment.