@@ -442,8 +442,9 @@ async def generate_user_intent(
442442 result = self .llm_task_manager .parse_task_output (
443443 Task .GENERATE_USER_INTENT , output = result
444444 )
445+ text = result .text
445446
446- user_intent = get_first_nonempty_line (result )
447+ user_intent = get_first_nonempty_line (text )
447448 if user_intent is None :
448449 user_intent = "unknown message"
449450
@@ -527,9 +528,11 @@ async def generate_user_intent(
527528 prompt ,
528529 custom_callback_handlers = [streaming_handler_var .get ()],
529530 )
530- text = self .llm_task_manager .parse_task_output (
531+ result = self .llm_task_manager .parse_task_output (
531532 Task .GENERAL , output = text
532533 )
534+ text = result .text
535+ text = text .strip ()
533536 else :
534537 # Initialize the LLMCallInfo object
535538 llm_call_info_var .set (LLMCallInfo (task = Task .GENERAL .value ))
@@ -562,9 +565,10 @@ async def generate_user_intent(
562565 stop = ["User:" ],
563566 )
564567
565- text = self .llm_task_manager .parse_task_output (
568+ result = self .llm_task_manager .parse_task_output (
566569 Task .GENERAL , output = result
567570 )
571+ text = result .text
568572 text = text .strip ()
569573 if text .startswith ('"' ):
570574 text = text [1 :- 1 ]
@@ -646,10 +650,11 @@ async def generate_next_step(
646650 result = self .llm_task_manager .parse_task_output (
647651 Task .GENERATE_NEXT_STEPS , output = result
648652 )
653+ text = result .text
649654
650655 # If we don't have multi-step generation enabled, we only look at the first line.
651656 if not self .config .enable_multi_step_generation :
652- result = get_first_nonempty_line (result )
657+ result = get_first_nonempty_line (text )
653658
654659 if result and result .startswith ("bot " ):
655660 bot_intent = result [4 :]
@@ -687,7 +692,7 @@ async def generate_next_step(
687692 # Otherwise, we parse the output as a single flow.
688693 # If we have a parsing error, we try to reduce size of the flow, potentially
689694 # up to a single step.
690- lines = result .split ("\n " )
695+ lines = text .split ("\n " )
691696 while True :
692697 try :
693698 parse_colang_file ("dynamic.co" , content = "\n " .join (lines ))
@@ -896,10 +901,15 @@ async def generate_bot_message(
896901 llm , prompt , custom_callback_handlers = [streaming_handler ]
897902 )
898903
904+ # it seems that removing the reasoning traces is llm_call responsibility
905+ #
906+
899907 result = self .llm_task_manager .parse_task_output (
900908 Task .GENERAL , output = result
901909 )
902910
911+ result = result .text
912+
903913 log .info (
904914 "--- :: LLM Bot Message Generation passthrough call took %.2f seconds" ,
905915 time () - t0 ,
@@ -963,6 +973,8 @@ async def generate_bot_message(
963973 Task .GENERATE_BOT_MESSAGE , output = result
964974 )
965975
976+ result = result .text
977+
966978 # TODO: catch openai.error.InvalidRequestError from exceeding max token length
967979
968980 result = get_multiline_response (result )
@@ -1055,10 +1067,11 @@ async def generate_value(
10551067 result = self .llm_task_manager .parse_task_output (
10561068 Task .GENERATE_VALUE , output = result
10571069 )
1070+ text = result .text
10581071
10591072 # We only use the first line for now
10601073 # TODO: support multi-line values?
1061- value = result .strip ().split ("\n " )[0 ]
1074+ value = text .strip ().split ("\n " )[0 ]
10621075
10631076 # Because of conventions from other languages, sometimes the LLM might add
10641077 # a ";" at the end of the line. We remove that
@@ -1266,22 +1279,23 @@ async def generate_intent_steps_message(
12661279 result = self .llm_task_manager .parse_task_output (
12671280 Task .GENERATE_INTENT_STEPS_MESSAGE , output = result
12681281 )
1282+ text = result .text
12691283
12701284 # TODO: Implement logic for generating more complex Colang next steps (multi-step),
12711285 # not just a single bot intent.
12721286
12731287 # Get the next 2 non-empty lines, these should contain:
12741288 # line 1 - user intent, line 2 - bot intent.
12751289 # Afterwards we have the bot message.
1276- next_three_lines = get_top_k_nonempty_lines (result , k = 2 )
1290+ next_three_lines = get_top_k_nonempty_lines (text , k = 2 )
12771291 user_intent = next_three_lines [0 ] if len (next_three_lines ) > 0 else None
12781292 bot_intent = next_three_lines [1 ] if len (next_three_lines ) > 1 else None
12791293 bot_message = None
12801294 if bot_intent :
1281- pos = result .find (bot_intent )
1295+ pos = text .find (bot_intent )
12821296 if pos != - 1 :
12831297 # The bot message could be multiline
1284- bot_message = result [pos + len (bot_intent ) :]
1298+ bot_message = text [pos + len (bot_intent ) :]
12851299 bot_message = get_multiline_response (bot_message )
12861300 bot_message = strip_quotes (bot_message )
12871301 # Quick hack for degenerated / empty bot messages
@@ -1348,7 +1362,8 @@ async def generate_intent_steps_message(
13481362 result = self .llm_task_manager .parse_task_output (
13491363 Task .GENERAL , output = result
13501364 )
1351- text = result .strip ()
1365+ text = result .text
1366+ text = text .strip ()
13521367 if text .startswith ('"' ):
13531368 text = text [1 :- 1 ]
13541369
0 commit comments