kshitijthakkar commited on
Commit
2454bbc
Β·
1 Parent(s): 4e4722b

feat: Make agent reasoning and planning collapsible, keep final answer expanded

Browse files

- Add 'title' metadata to reasoning sections to make them collapsible
- Add 'title' metadata to planning phase to make it collapsible
- Remove 'title' metadata from final answer to keep it always visible
- Improve UX by reducing visual clutter while maintaining transparency

Files changed (1) hide show
  1. screens/chat.py +20 -14
screens/chat.py CHANGED
@@ -92,12 +92,14 @@ def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False)
92
  if not skip_model_outputs:
93
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**", metadata={"status": "done"})
94
 
95
- # First yield the thought/reasoning from the LLM
96
  if not skip_model_outputs and getattr(step_log, "model_output", ""):
97
  model_output = _clean_model_output(step_log.model_output)
98
- # Format as thinking/reasoning
99
- formatted_output = f"πŸ’­ **Reasoning:**\n{model_output}"
100
- yield gr.ChatMessage(role="assistant", content=formatted_output, metadata={"status": "done"})
 
 
101
 
102
  # For tool calls, create a parent message
103
  if getattr(step_log, "tool_calls", []):
@@ -173,8 +175,12 @@ def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False)
173
  def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False):
174
  """Process a PlanningStep and yield appropriate gradio.ChatMessage objects."""
175
  if not skip_model_outputs:
176
- yield gr.ChatMessage(role="assistant", content="🧠 **Planning Phase**", metadata={"status": "done"})
177
- yield gr.ChatMessage(role="assistant", content=step_log.plan, metadata={"status": "done"})
 
 
 
 
178
  yield gr.ChatMessage(
179
  role="assistant", content=get_step_footnote_content(step_log, "Planning Phase"), metadata={"status": "done"}
180
  )
@@ -201,32 +207,32 @@ def _process_final_answer_step(step_log: FinalAnswerStep):
201
  )
202
  return
203
 
204
- # Process the final answer based on its type
205
  if isinstance(final_answer, AgentText):
206
  yield gr.ChatMessage(
207
  role="assistant",
208
- content=final_answer.to_string(),
209
- metadata={"status": "done", "title": "πŸ“œ Final Answer"},
210
  )
211
  elif isinstance(final_answer, AgentImage):
212
  # Handle image if needed
213
  yield gr.ChatMessage(
214
  role="assistant",
215
- content=f"![Image]({final_answer.to_string()})",
216
- metadata={"status": "done", "title": "🎨 Image Result"},
217
  )
218
  elif isinstance(final_answer, AgentAudio):
219
  yield gr.ChatMessage(
220
  role="assistant",
221
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
222
- metadata={"status": "done", "title": "πŸ”Š Audio Result"},
223
  )
224
  else:
225
  # Assume markdown content and render as-is
226
  yield gr.ChatMessage(
227
  role="assistant",
228
- content=str(final_answer),
229
- metadata={"status": "done", "title": "πŸ“œ Final Answer"},
230
  )
231
 
232
 
 
92
  if not skip_model_outputs:
93
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**", metadata={"status": "done"})
94
 
95
+ # First yield the thought/reasoning from the LLM (collapsed)
96
  if not skip_model_outputs and getattr(step_log, "model_output", ""):
97
  model_output = _clean_model_output(step_log.model_output)
98
+ yield gr.ChatMessage(
99
+ role="assistant",
100
+ content=model_output,
101
+ metadata={"title": "πŸ’­ Reasoning", "status": "done"}
102
+ )
103
 
104
  # For tool calls, create a parent message
105
  if getattr(step_log, "tool_calls", []):
 
175
  def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False):
176
  """Process a PlanningStep and yield appropriate gradio.ChatMessage objects."""
177
  if not skip_model_outputs:
178
+ # Show planning phase as collapsible section
179
+ yield gr.ChatMessage(
180
+ role="assistant",
181
+ content=step_log.plan,
182
+ metadata={"title": "🧠 Planning Phase", "status": "done"}
183
+ )
184
  yield gr.ChatMessage(
185
  role="assistant", content=get_step_footnote_content(step_log, "Planning Phase"), metadata={"status": "done"}
186
  )
 
207
  )
208
  return
209
 
210
+ # Process the final answer based on its type (NOT collapsed - visible by default)
211
  if isinstance(final_answer, AgentText):
212
  yield gr.ChatMessage(
213
  role="assistant",
214
+ content=f"πŸ“œ **Final Answer:**\n\n{final_answer.to_string()}",
215
+ metadata={"status": "done"},
216
  )
217
  elif isinstance(final_answer, AgentImage):
218
  # Handle image if needed
219
  yield gr.ChatMessage(
220
  role="assistant",
221
+ content=f"🎨 **Image Result:**\n\n![Image]({final_answer.to_string()})",
222
+ metadata={"status": "done"},
223
  )
224
  elif isinstance(final_answer, AgentAudio):
225
  yield gr.ChatMessage(
226
  role="assistant",
227
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
228
+ metadata={"status": "done"},
229
  )
230
  else:
231
  # Assume markdown content and render as-is
232
  yield gr.ChatMessage(
233
  role="assistant",
234
+ content=f"πŸ“œ **Final Answer:**\n\n{str(final_answer)}",
235
+ metadata={"status": "done"},
236
  )
237
 
238