Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
0e6505c
feat: show document artifact after generating report
thucpn May 28, 2025
5a8f1a3
keep chat message content as it is
thucpn Jun 5, 2025
0d10af0
Merge branch 'main' into tp/show-document-artifact-after-generate-report
thucpn Jun 5, 2025
e4db821
use artifactEvent from server
thucpn Jun 5, 2025
2c033d8
Merge branch 'main' into tp/show-document-artifact-after-generate-report
thucpn Jun 5, 2025
1905f7e
add deep research example
thucpn Jun 5, 2025
f7326ca
bump chat-ui for new editor
thucpn Jun 5, 2025
78d5efb
import editor css
thucpn Jun 5, 2025
02f4922
hide warning for workflowEvent<{}>() in eject mode
thucpn Jun 5, 2025
4f2cdbd
fix format
thucpn Jun 5, 2025
bd1baa3
use CL for better testing
thucpn Jun 5, 2025
3e28276
generate artifact after streaming report in Python
thucpn Jun 5, 2025
b0b12b8
bump chat-ui to support citations
thucpn Jun 5, 2025
a471085
use isinstance to check stream
thucpn Jun 5, 2025
b345945
fix document editor spacing
thucpn Jun 5, 2025
38ac299
Create tame-wolves-obey.md
thucpn Jun 5, 2025
b97dc3e
add sources to document artifact
thucpn Jun 6, 2025
f521f12
add sources to document artifact in python
thucpn Jun 6, 2025
bd1294e
type cast
thucpn Jun 6, 2025
f6e6997
no need score
thucpn Jun 6, 2025
b682e55
fix lint
thucpn Jun 6, 2025
c244605
move handle stream logic to server
thucpn Jun 6, 2025
9f062df
refactor: use chunk.text and chunk.raw
thucpn Jun 6, 2025
ba415e0
bump chat-ui 0.5.6 to fix citations
thucpn Jun 6, 2025
2b4dbba
update changset
thucpn Jun 6, 2025
4ce3cf6
Merge branch 'main' into tp/show-document-artifact-after-generate-report
thucpn Jun 6, 2025
b1e59f6
fix lock
thucpn Jun 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changeset/tame-wolves-obey.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
"create-llama": patch
"@llamaindex/server": patch
"@create-llama/llama-index-server": patch
---

feat: show document artifact after generating report
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import os
import uuid
from typing import List, Literal, Optional
from typing import List, Literal, Optional, AsyncGenerator

from app.index import get_index
from llama_index.core.base.llms.types import (
Expand All @@ -23,7 +23,18 @@
Workflow,
step,
)
from llama_index.server.api.models import ChatRequest, SourceNodesEvent, UIEvent
from llama_index.server.api.models import (
ArtifactEvent,
ArtifactType,
ChatRequest,
SourceNodesEvent,
UIEvent,
Artifact,
DocumentArtifactData,
DocumentArtifactSource,
)
import time
from llama_index.server.utils.stream import write_response_to_stream
from pydantic import BaseModel, Field

logger = logging.getLogger("uvicorn")
Expand Down Expand Up @@ -365,8 +376,31 @@ async def report(self, ctx: Context, ev: ReportEvent) -> StopEvent:
user_request=self.user_request,
stream=self.stream,
)

final_response = await write_response_to_stream(res, ctx)

ctx.write_event_to_stream(
ArtifactEvent(
data=Artifact(
type=ArtifactType.DOCUMENT,
created_at=int(time.time()),
data=DocumentArtifactData(
title="DeepResearch Report",
content=final_response,
type="markdown",
sources=[
DocumentArtifactSource(
id=node.id_,
)
for node in self.context_nodes
],
),
),
)
)

return StopEvent(
result=res,
result="",
)


Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { toSourceEvent } from "@llamaindex/server";
import { artifactEvent, toSourceEvent } from "@llamaindex/server";
import {
agentStreamEvent,
createStatefulMiddleware,
Expand Down Expand Up @@ -339,6 +339,26 @@ export function getWorkflow(index: VectorStoreIndex | LlamaCloudIndex) {
}),
);
}

// Open the generated report in Canvas
sendEvent(
artifactEvent.with({
type: "artifact",
data: {
type: "document",
created_at: Date.now(),
data: {
title: "DeepResearch Report",
content: response,
type: "markdown",
sources: state.contextNodes.map((node) => ({
id: node.node.id_,
})),
},
},
}),
);

return stopAgentEvent.with({
result: response,
});
Expand Down
1 change: 1 addition & 0 deletions packages/server/next/app/layout.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import type { Metadata } from "next";
import { Inter } from "next/font/google";

import "@llamaindex/chat-ui/styles/editor.css";
import "@llamaindex/chat-ui/styles/markdown.css";
import "@llamaindex/chat-ui/styles/pdf.css";
import "./globals.css";
Expand Down
1 change: 1 addition & 0 deletions packages/server/project-config/eslint.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const eslintConfig = [
"react-hooks/exhaustive-deps": "off",
"@next/next/no-img-element": "off",
"@next/next/no-assign-module-variable": "off",
"@typescript-eslint/no-empty-object-type": "off",
},
},
{
Expand Down
1 change: 1 addition & 0 deletions packages/server/src/utils/events.ts
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ export type DocumentArtifactData = {
title: string;
content: string;
type: string; // markdown, html,...
sources?: { id: string }[]; // sources that are used to render citation numbers in the document
};

export type CodeArtifact = Artifact<CodeArtifactData> & {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
ArtifactType,
CodeArtifactData,
DocumentArtifactData,
DocumentArtifactSource,
)
from llama_index.server.models.chat import ChatAPIMessage, ChatRequest
from llama_index.server.models.hitl import HumanInputEvent, HumanResponseEvent
Expand All @@ -20,6 +21,7 @@
"ArtifactEvent",
"ArtifactType",
"DocumentArtifactData",
"DocumentArtifactSource",
"CodeArtifactData",
"ChatAPIMessage",
"ChatRequest",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
from enum import Enum
from typing import Literal, Optional, Union
from typing import List, Literal, Optional, Union

from llama_index.core.workflow.events import Event
from llama_index.server.models.chat import ChatAPIMessage
Expand All @@ -21,10 +21,16 @@ class CodeArtifactData(BaseModel):
language: str


class DocumentArtifactSource(BaseModel):
id: str
# we can add more fields here


class DocumentArtifactData(BaseModel):
title: str
content: str
type: Literal["markdown", "html"]
sources: Optional[List[DocumentArtifactSource]] = None


class Artifact(BaseModel):
Expand Down
45 changes: 45 additions & 0 deletions python/llama-index-server/llama_index/server/utils/stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from typing import AsyncGenerator, Union
from llama_index.core.base.llms.types import (
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.core.workflow import Context
from llama_index.core.agent.workflow.workflow_events import AgentStream


async def write_response_to_stream(
res: Union[CompletionResponse, CompletionResponseAsyncGen],
ctx: Context,
current_agent_name: str = "assistant",
) -> str:
"""
Handle both streaming and non-streaming LLM responses.

Args:
res: The LLM response (either streaming or non-streaming)
ctx: The workflow context for writing events to stream
current_agent_name: The name of the current agent (default: "assistant")

Returns:
The final response text as a string
"""
final_response = ""

if isinstance(res, AsyncGenerator):
# Handle streaming response (CompletionResponseAsyncGen)
async for chunk in res:
ctx.write_event_to_stream(
AgentStream(
delta=chunk.delta or "",
response=final_response,
current_agent_name=current_agent_name,
tool_calls=[],
raw=chunk.raw or "",
)
)
final_response = chunk.text
else:
# Handle non-streaming response (CompletionResponse)
final_response = res.text

return final_response
Loading