diff --git a/py/examples/chatbot_events_suggestions.py b/py/examples/chatbot_events_suggestions.py
new file mode 100644
index 0000000000..03cf1fb145
--- /dev/null
+++ b/py/examples/chatbot_events_suggestions.py
@@ -0,0 +1,47 @@
+# Chatbot / Events/ Suggestions
+# Use prompt suggestions to simplify user interaction.
+# #chatbot #events #suggestions
+# ---
+from h2o_wave import main, app, Q, ui, data
+
+first_suggestion = "I need more information about this."
+second_suggestion = "I have another problem."
+third_suggestion = "The information you provided is not correct."
+fourth_suggestion = "I got this, thank you!"
+
+@app('/demo')
+async def serve(q: Q):
+ if not q.client.initialized:
+ q.page['example'] = ui.chatbot_card(
+ box='1 1 5 5',
+ data=data(fields='content from_user', t='list', rows=[
+ ['Hi, my files are not loaded after plugging my USB in.', True],
+ ['Hi, I am glad I can assist you today! Have you tried turning your PC off and on again?', False]
+ ]),
+ name='chatbot',
+ events=['prompt_suggestion'],
+ prompt_suggestions=[
+ ui.chat_prompt_suggestion('sug1', label=first_suggestion),
+ ui.chat_prompt_suggestion('sug2', label=second_suggestion),
+ ui.chat_prompt_suggestion('sug3', label=third_suggestion),
+ ui.chat_prompt_suggestion('sug4', label=fourth_suggestion),
+ ],
+ disabled=True
+ )
+ q.client.initialized = True
+
+ # Handle prompt_suggestion event.
+ elif q.events.chatbot and q.events.chatbot.prompt_suggestion:
+ # Append user message based on the suggestion.
+ if q.events.chatbot.prompt_suggestion == 'sug1':
+ q.page['example'].data += [first_suggestion, True]
+ elif q.events.chatbot.prompt_suggestion == 'sug2':
+ q.page['example'].data += [second_suggestion, True]
+ elif q.events.chatbot.prompt_suggestion == 'sug3':
+ q.page['example'].data += [third_suggestion, True]
+ elif q.events.chatbot.prompt_suggestion == 'sug4':
+ q.page['example'].data += [fourth_suggestion, True]
+ # Append bot response.
+ q.page['example'].data += ['I am a fake chatbot. Sorry, I cannot help you.', False]
+
+ await q.page.save()
diff --git a/py/examples/tour.conf b/py/examples/tour.conf
index a0bfc3992b..557b090506 100644
--- a/py/examples/tour.conf
+++ b/py/examples/tour.conf
@@ -36,6 +36,7 @@ chatbot_stream.py
chatbot_events_stop.py
chatbot_events_scroll.py
chatbot_events_feedback.py
+chatbot_events_suggestions.py
form.py
form_visibility.py
text.py
diff --git a/py/h2o_lightwave/h2o_lightwave/types.py b/py/h2o_lightwave/h2o_lightwave/types.py
index a00021a7c3..4ce958186e 100644
--- a/py/h2o_lightwave/h2o_lightwave/types.py
+++ b/py/h2o_lightwave/h2o_lightwave/types.py
@@ -8296,6 +8296,7 @@ def __init__(
events: Optional[List[str]] = None,
generating: Optional[bool] = None,
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None,
+ disabled: Optional[bool] = None,
commands: Optional[List[Command]] = None,
):
_guard_scalar('ChatbotCard.box', box, (str,), False, False, False)
@@ -8304,6 +8305,7 @@ def __init__(
_guard_vector('ChatbotCard.events', events, (str,), False, True, False)
_guard_scalar('ChatbotCard.generating', generating, (bool,), False, True, False)
_guard_vector('ChatbotCard.prompt_suggestions', prompt_suggestions, (ChatPromptSuggestion,), False, True, False)
+ _guard_scalar('ChatbotCard.disabled', disabled, (bool,), False, True, False)
_guard_vector('ChatbotCard.commands', commands, (Command,), False, True, False)
self.box = box
"""A string indicating how to place this component on the page."""
@@ -8319,6 +8321,8 @@ def __init__(
"""True to show a button to stop the text generation. Defaults to False."""
self.prompt_suggestions = prompt_suggestions
"""Clickable prompt suggestions shown below the last response."""
+ self.disabled = disabled
+ """True if the user input should be disabled."""
self.commands = commands
"""Contextual menu commands for this component."""
@@ -8330,6 +8334,7 @@ def dump(self) -> Dict:
_guard_vector('ChatbotCard.events', self.events, (str,), False, True, False)
_guard_scalar('ChatbotCard.generating', self.generating, (bool,), False, True, False)
_guard_vector('ChatbotCard.prompt_suggestions', self.prompt_suggestions, (ChatPromptSuggestion,), False, True, False)
+ _guard_scalar('ChatbotCard.disabled', self.disabled, (bool,), False, True, False)
_guard_vector('ChatbotCard.commands', self.commands, (Command,), False, True, False)
return _dump(
view='chatbot',
@@ -8340,6 +8345,7 @@ def dump(self) -> Dict:
events=self.events,
generating=self.generating,
prompt_suggestions=None if self.prompt_suggestions is None else [__e.dump() for __e in self.prompt_suggestions],
+ disabled=self.disabled,
commands=None if self.commands is None else [__e.dump() for __e in self.commands],
)
@@ -8359,6 +8365,8 @@ def load(__d: Dict) -> 'ChatbotCard':
_guard_scalar('ChatbotCard.generating', __d_generating, (bool,), False, True, False)
__d_prompt_suggestions: Any = __d.get('prompt_suggestions')
_guard_vector('ChatbotCard.prompt_suggestions', __d_prompt_suggestions, (dict,), False, True, False)
+ __d_disabled: Any = __d.get('disabled')
+ _guard_scalar('ChatbotCard.disabled', __d_disabled, (bool,), False, True, False)
__d_commands: Any = __d.get('commands')
_guard_vector('ChatbotCard.commands', __d_commands, (dict,), False, True, False)
box: str = __d_box
@@ -8368,6 +8376,7 @@ def load(__d: Dict) -> 'ChatbotCard':
events: Optional[List[str]] = __d_events
generating: Optional[bool] = __d_generating
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None if __d_prompt_suggestions is None else [ChatPromptSuggestion.load(__e) for __e in __d_prompt_suggestions]
+ disabled: Optional[bool] = __d_disabled
commands: Optional[List[Command]] = None if __d_commands is None else [Command.load(__e) for __e in __d_commands]
return ChatbotCard(
box,
@@ -8377,6 +8386,7 @@ def load(__d: Dict) -> 'ChatbotCard':
events,
generating,
prompt_suggestions,
+ disabled,
commands,
)
diff --git a/py/h2o_lightwave/h2o_lightwave/ui.py b/py/h2o_lightwave/h2o_lightwave/ui.py
index a21320e6cd..e2f8ab6f14 100644
--- a/py/h2o_lightwave/h2o_lightwave/ui.py
+++ b/py/h2o_lightwave/h2o_lightwave/ui.py
@@ -2904,6 +2904,7 @@ def chatbot_card(
events: Optional[List[str]] = None,
generating: Optional[bool] = None,
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None,
+ disabled: Optional[bool] = None,
commands: Optional[List[Command]] = None,
) -> ChatbotCard:
"""Create a chatbot card to allow getting prompts from users and providing them with LLM generated answers.
@@ -2916,6 +2917,7 @@ def chatbot_card(
events: The events to capture on this chatbot. One of 'stop' | 'scroll_up' | 'feedback' | 'prompt_suggestion'.
generating: True to show a button to stop the text generation. Defaults to False.
prompt_suggestions: Clickable prompt suggestions shown below the last response.
+ disabled: True if the user input should be disabled.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.ChatbotCard` instance.
@@ -2928,6 +2930,7 @@ def chatbot_card(
events,
generating,
prompt_suggestions,
+ disabled,
commands,
)
diff --git a/py/h2o_wave/h2o_wave/types.py b/py/h2o_wave/h2o_wave/types.py
index a00021a7c3..4ce958186e 100644
--- a/py/h2o_wave/h2o_wave/types.py
+++ b/py/h2o_wave/h2o_wave/types.py
@@ -8296,6 +8296,7 @@ def __init__(
events: Optional[List[str]] = None,
generating: Optional[bool] = None,
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None,
+ disabled: Optional[bool] = None,
commands: Optional[List[Command]] = None,
):
_guard_scalar('ChatbotCard.box', box, (str,), False, False, False)
@@ -8304,6 +8305,7 @@ def __init__(
_guard_vector('ChatbotCard.events', events, (str,), False, True, False)
_guard_scalar('ChatbotCard.generating', generating, (bool,), False, True, False)
_guard_vector('ChatbotCard.prompt_suggestions', prompt_suggestions, (ChatPromptSuggestion,), False, True, False)
+ _guard_scalar('ChatbotCard.disabled', disabled, (bool,), False, True, False)
_guard_vector('ChatbotCard.commands', commands, (Command,), False, True, False)
self.box = box
"""A string indicating how to place this component on the page."""
@@ -8319,6 +8321,8 @@ def __init__(
"""True to show a button to stop the text generation. Defaults to False."""
self.prompt_suggestions = prompt_suggestions
"""Clickable prompt suggestions shown below the last response."""
+ self.disabled = disabled
+ """True if the user input should be disabled."""
self.commands = commands
"""Contextual menu commands for this component."""
@@ -8330,6 +8334,7 @@ def dump(self) -> Dict:
_guard_vector('ChatbotCard.events', self.events, (str,), False, True, False)
_guard_scalar('ChatbotCard.generating', self.generating, (bool,), False, True, False)
_guard_vector('ChatbotCard.prompt_suggestions', self.prompt_suggestions, (ChatPromptSuggestion,), False, True, False)
+ _guard_scalar('ChatbotCard.disabled', self.disabled, (bool,), False, True, False)
_guard_vector('ChatbotCard.commands', self.commands, (Command,), False, True, False)
return _dump(
view='chatbot',
@@ -8340,6 +8345,7 @@ def dump(self) -> Dict:
events=self.events,
generating=self.generating,
prompt_suggestions=None if self.prompt_suggestions is None else [__e.dump() for __e in self.prompt_suggestions],
+ disabled=self.disabled,
commands=None if self.commands is None else [__e.dump() for __e in self.commands],
)
@@ -8359,6 +8365,8 @@ def load(__d: Dict) -> 'ChatbotCard':
_guard_scalar('ChatbotCard.generating', __d_generating, (bool,), False, True, False)
__d_prompt_suggestions: Any = __d.get('prompt_suggestions')
_guard_vector('ChatbotCard.prompt_suggestions', __d_prompt_suggestions, (dict,), False, True, False)
+ __d_disabled: Any = __d.get('disabled')
+ _guard_scalar('ChatbotCard.disabled', __d_disabled, (bool,), False, True, False)
__d_commands: Any = __d.get('commands')
_guard_vector('ChatbotCard.commands', __d_commands, (dict,), False, True, False)
box: str = __d_box
@@ -8368,6 +8376,7 @@ def load(__d: Dict) -> 'ChatbotCard':
events: Optional[List[str]] = __d_events
generating: Optional[bool] = __d_generating
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None if __d_prompt_suggestions is None else [ChatPromptSuggestion.load(__e) for __e in __d_prompt_suggestions]
+ disabled: Optional[bool] = __d_disabled
commands: Optional[List[Command]] = None if __d_commands is None else [Command.load(__e) for __e in __d_commands]
return ChatbotCard(
box,
@@ -8377,6 +8386,7 @@ def load(__d: Dict) -> 'ChatbotCard':
events,
generating,
prompt_suggestions,
+ disabled,
commands,
)
diff --git a/py/h2o_wave/h2o_wave/ui.py b/py/h2o_wave/h2o_wave/ui.py
index a21320e6cd..e2f8ab6f14 100644
--- a/py/h2o_wave/h2o_wave/ui.py
+++ b/py/h2o_wave/h2o_wave/ui.py
@@ -2904,6 +2904,7 @@ def chatbot_card(
events: Optional[List[str]] = None,
generating: Optional[bool] = None,
prompt_suggestions: Optional[List[ChatPromptSuggestion]] = None,
+ disabled: Optional[bool] = None,
commands: Optional[List[Command]] = None,
) -> ChatbotCard:
"""Create a chatbot card to allow getting prompts from users and providing them with LLM generated answers.
@@ -2916,6 +2917,7 @@ def chatbot_card(
events: The events to capture on this chatbot. One of 'stop' | 'scroll_up' | 'feedback' | 'prompt_suggestion'.
generating: True to show a button to stop the text generation. Defaults to False.
prompt_suggestions: Clickable prompt suggestions shown below the last response.
+ disabled: True if the user input should be disabled.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.ChatbotCard` instance.
@@ -2928,6 +2930,7 @@ def chatbot_card(
events,
generating,
prompt_suggestions,
+ disabled,
commands,
)
diff --git a/r/R/ui.R b/r/R/ui.R
index 68a9df79ca..16555e6315 100644
--- a/r/R/ui.R
+++ b/r/R/ui.R
@@ -3366,6 +3366,7 @@ ui_chat_prompt_suggestion <- function(
#' @param events The events to capture on this chatbot. One of 'stop' | 'scroll_up' | 'feedback' | 'prompt_suggestion'.
#' @param generating True to show a button to stop the text generation. Defaults to False.
#' @param prompt_suggestions Clickable prompt suggestions shown below the last response.
+#' @param disabled True if the user input should be disabled.
#' @param commands Contextual menu commands for this component.
#' @return A ChatbotCard instance.
#' @export
@@ -3377,6 +3378,7 @@ ui_chatbot_card <- function(
events = NULL,
generating = NULL,
prompt_suggestions = NULL,
+ disabled = NULL,
commands = NULL) {
.guard_scalar("box", "character", box)
.guard_scalar("name", "character", name)
@@ -3385,6 +3387,7 @@ ui_chatbot_card <- function(
.guard_vector("events", "character", events)
.guard_scalar("generating", "logical", generating)
.guard_vector("prompt_suggestions", "WaveChatPromptSuggestion", prompt_suggestions)
+ .guard_scalar("disabled", "logical", disabled)
.guard_vector("commands", "WaveCommand", commands)
.o <- list(
box=box,
@@ -3394,6 +3397,7 @@ ui_chatbot_card <- function(
events=events,
generating=generating,
prompt_suggestions=prompt_suggestions,
+ disabled=disabled,
commands=commands,
view='chatbot')
class(.o) <- append(class(.o), c(.wave_obj, "WaveChatbotCard"))
diff --git a/tools/intellij-plugin/src/main/resources/templates/wave-components.xml b/tools/intellij-plugin/src/main/resources/templates/wave-components.xml
index f72a718546..25cc8ec24e 100644
--- a/tools/intellij-plugin/src/main/resources/templates/wave-components.xml
+++ b/tools/intellij-plugin/src/main/resources/templates/wave-components.xml
@@ -1136,11 +1136,12 @@
-
+
+
@@ -1148,12 +1149,13 @@
-
+
+
diff --git a/tools/vscode-extension/component-snippets.json b/tools/vscode-extension/component-snippets.json
index 88b865db1c..18f3b604b6 100644
--- a/tools/vscode-extension/component-snippets.json
+++ b/tools/vscode-extension/component-snippets.json
@@ -1073,14 +1073,14 @@
"Wave Full Chatbot": {
"prefix": "w_full_chatbot",
"body": [
- "ui.chatbot(name='$1', data=$2, placeholder='$3', generating=${4:False}, events=[\n\t\t$5\t\t\n], prev_items=[\n\t\t$6\t\t\n], prompt_suggestions=[\n\t\t$7\t\t\n]),$0"
+ "ui.chatbot(name='$1', data=$2, placeholder='$3', generating=${4:False}, disabled=${5:False}, events=[\n\t\t$6\t\t\n], prev_items=[\n\t\t$7\t\t\n], prompt_suggestions=[\n\t\t$8\t\t\n]),$0"
],
"description": "Create a full Wave Chatbot."
},
"Wave Full ChatbotCard": {
"prefix": "w_full_chatbot_card",
"body": [
- "ui.chatbot_card(box='$1', name='$2', data=$3, placeholder='$4', generating=${5:False}, events=[\n\t\t$6\t\t\n], prompt_suggestions=[\n\t\t$7\t\t\n], commands=[\n\t\t$8\t\t\n])$0"
+ "ui.chatbot_card(box='$1', name='$2', data=$3, placeholder='$4', generating=${5:False}, disabled=${6:False}, events=[\n\t\t$7\t\t\n], prompt_suggestions=[\n\t\t$8\t\t\n], commands=[\n\t\t$9\t\t\n])$0"
],
"description": "Create a full Wave ChatbotCard."
},
diff --git a/ui/src/chatbot.tsx b/ui/src/chatbot.tsx
index ac0ace61ca..30c80a8548 100644
--- a/ui/src/chatbot.tsx
+++ b/ui/src/chatbot.tsx
@@ -123,6 +123,8 @@ export interface Chatbot {
prev_items?: ChatbotMessage[]
/** Clickable prompt suggestions shown below the last response. */
prompt_suggestions?: ChatPromptSuggestion[]
+ /** True if the user input should be disabled. */
+ disabled?: B
}
const processData = (data: Rec) => unpack(data).map(({ content, from_user }) => ({ content, from_user }))
@@ -260,6 +262,7 @@ export const XChatbot = (props: Chatbot) => {
multiline
autoAdjustHeight
placeholder={props.placeholder || 'Type your message'}
+ disabled={props.disabled}
styles={{
root: { flexGrow: 1 },
fieldGroup: { minHeight: INPUT_HEIGHT },
@@ -302,6 +305,8 @@ interface State {
generating?: B
/** Clickable prompt suggestions shown below the last response. */
prompt_suggestions?: ChatPromptSuggestion[]
+ /** True if the user input should be disabled. */
+ disabled?: B
}
export const View = bond(({ name, state, changed }: Model) => {