diff --git a/.changeset/tame-wolves-obey.md b/.changeset/tame-wolves-obey.md new file mode 100644 index 000000000..97a5c0236 --- /dev/null +++ b/.changeset/tame-wolves-obey.md @@ -0,0 +1,7 @@ +--- +"create-llama": patch +"@llamaindex/server": patch +"@create-llama/llama-index-server": patch +--- + +feat: show document artifact after generating report diff --git a/packages/create-llama/templates/components/use-cases/python/deep_research/workflow.py b/packages/create-llama/templates/components/use-cases/python/deep_research/workflow.py index 315a41d91..480410909 100644 --- a/packages/create-llama/templates/components/use-cases/python/deep_research/workflow.py +++ b/packages/create-llama/templates/components/use-cases/python/deep_research/workflow.py @@ -1,7 +1,7 @@ import logging import os import uuid -from typing import List, Literal, Optional +from typing import List, Literal, Optional, AsyncGenerator from app.index import get_index from llama_index.core.base.llms.types import ( @@ -23,7 +23,18 @@ Workflow, step, ) -from llama_index.server.api.models import ChatRequest, SourceNodesEvent, UIEvent +from llama_index.server.api.models import ( + ArtifactEvent, + ArtifactType, + ChatRequest, + SourceNodesEvent, + UIEvent, + Artifact, + DocumentArtifactData, + DocumentArtifactSource, +) +import time +from llama_index.server.utils.stream import write_response_to_stream from pydantic import BaseModel, Field logger = logging.getLogger("uvicorn") @@ -365,8 +376,31 @@ async def report(self, ctx: Context, ev: ReportEvent) -> StopEvent: user_request=self.user_request, stream=self.stream, ) + + final_response = await write_response_to_stream(res, ctx) + + ctx.write_event_to_stream( + ArtifactEvent( + data=Artifact( + type=ArtifactType.DOCUMENT, + created_at=int(time.time()), + data=DocumentArtifactData( + title="DeepResearch Report", + content=final_response, + type="markdown", + sources=[ + DocumentArtifactSource( + id=node.id_, + ) + for node in self.context_nodes + ], + ), + ), + ) + ) + return StopEvent( - result=res, + result="", ) diff --git a/packages/create-llama/templates/components/use-cases/typescript/deep_research/src/app/workflow.ts b/packages/create-llama/templates/components/use-cases/typescript/deep_research/src/app/workflow.ts index f825f9a87..bb200f2e1 100644 --- a/packages/create-llama/templates/components/use-cases/typescript/deep_research/src/app/workflow.ts +++ b/packages/create-llama/templates/components/use-cases/typescript/deep_research/src/app/workflow.ts @@ -1,4 +1,4 @@ -import { toSourceEvent } from "@llamaindex/server"; +import { artifactEvent, toSourceEvent } from "@llamaindex/server"; import { agentStreamEvent, createStatefulMiddleware, @@ -339,6 +339,26 @@ export function getWorkflow(index: VectorStoreIndex | LlamaCloudIndex) { }), ); } + + // Open the generated report in Canvas + sendEvent( + artifactEvent.with({ + type: "artifact", + data: { + type: "document", + created_at: Date.now(), + data: { + title: "DeepResearch Report", + content: response, + type: "markdown", + sources: state.contextNodes.map((node) => ({ + id: node.node.id_, + })), + }, + }, + }), + ); + return stopAgentEvent.with({ result: response, }); diff --git a/packages/server/next/app/layout.tsx b/packages/server/next/app/layout.tsx index c1d2c94c6..7220ef98d 100644 --- a/packages/server/next/app/layout.tsx +++ b/packages/server/next/app/layout.tsx @@ -1,6 +1,7 @@ import type { Metadata } from "next"; import { Inter } from "next/font/google"; +import "@llamaindex/chat-ui/styles/editor.css"; import "@llamaindex/chat-ui/styles/markdown.css"; import "@llamaindex/chat-ui/styles/pdf.css"; import "./globals.css"; diff --git a/packages/server/project-config/eslint.config.mjs b/packages/server/project-config/eslint.config.mjs index 4da0f732b..c0c72d494 100644 --- a/packages/server/project-config/eslint.config.mjs +++ b/packages/server/project-config/eslint.config.mjs @@ -18,6 +18,7 @@ const eslintConfig = [ "react-hooks/exhaustive-deps": "off", "@next/next/no-img-element": "off", "@next/next/no-assign-module-variable": "off", + "@typescript-eslint/no-empty-object-type": "off", }, }, { diff --git a/packages/server/src/utils/events.ts b/packages/server/src/utils/events.ts index ebf528ebc..c8e8439e3 100644 --- a/packages/server/src/utils/events.ts +++ b/packages/server/src/utils/events.ts @@ -110,6 +110,7 @@ export type DocumentArtifactData = { title: string; content: string; type: string; // markdown, html,... + sources?: { id: string }[]; // sources that are used to render citation numbers in the document }; export type CodeArtifact = Artifact & { diff --git a/python/llama-index-server/llama_index/server/models/__init__.py b/python/llama-index-server/llama_index/server/models/__init__.py index 2d7150bbd..04f0c689c 100644 --- a/python/llama-index-server/llama_index/server/models/__init__.py +++ b/python/llama-index-server/llama_index/server/models/__init__.py @@ -4,6 +4,7 @@ ArtifactType, CodeArtifactData, DocumentArtifactData, + DocumentArtifactSource, ) from llama_index.server.models.chat import ChatAPIMessage, ChatRequest from llama_index.server.models.hitl import HumanInputEvent, HumanResponseEvent @@ -20,6 +21,7 @@ "ArtifactEvent", "ArtifactType", "DocumentArtifactData", + "DocumentArtifactSource", "CodeArtifactData", "ChatAPIMessage", "ChatRequest", diff --git a/python/llama-index-server/llama_index/server/models/artifacts.py b/python/llama-index-server/llama_index/server/models/artifacts.py index ecc146928..226bdb9e8 100644 --- a/python/llama-index-server/llama_index/server/models/artifacts.py +++ b/python/llama-index-server/llama_index/server/models/artifacts.py @@ -1,6 +1,6 @@ import logging from enum import Enum -from typing import Literal, Optional, Union +from typing import List, Literal, Optional, Union from llama_index.core.workflow.events import Event from llama_index.server.models.chat import ChatAPIMessage @@ -21,10 +21,16 @@ class CodeArtifactData(BaseModel): language: str +class DocumentArtifactSource(BaseModel): + id: str + # we can add more fields here + + class DocumentArtifactData(BaseModel): title: str content: str type: Literal["markdown", "html"] + sources: Optional[List[DocumentArtifactSource]] = None class Artifact(BaseModel): diff --git a/python/llama-index-server/llama_index/server/utils/stream.py b/python/llama-index-server/llama_index/server/utils/stream.py new file mode 100644 index 000000000..9e9010da3 --- /dev/null +++ b/python/llama-index-server/llama_index/server/utils/stream.py @@ -0,0 +1,45 @@ +from typing import AsyncGenerator, Union +from llama_index.core.base.llms.types import ( + CompletionResponse, + CompletionResponseAsyncGen, +) +from llama_index.core.workflow import Context +from llama_index.core.agent.workflow.workflow_events import AgentStream + + +async def write_response_to_stream( + res: Union[CompletionResponse, CompletionResponseAsyncGen], + ctx: Context, + current_agent_name: str = "assistant", +) -> str: + """ + Handle both streaming and non-streaming LLM responses. + + Args: + res: The LLM response (either streaming or non-streaming) + ctx: The workflow context for writing events to stream + current_agent_name: The name of the current agent (default: "assistant") + + Returns: + The final response text as a string + """ + final_response = "" + + if isinstance(res, AsyncGenerator): + # Handle streaming response (CompletionResponseAsyncGen) + async for chunk in res: + ctx.write_event_to_stream( + AgentStream( + delta=chunk.delta or "", + response=final_response, + current_agent_name=current_agent_name, + tool_calls=[], + raw=chunk.raw or "", + ) + ) + final_response = chunk.text + else: + # Handle non-streaming response (CompletionResponse) + final_response = res.text + + return final_response