# Global variables to store persona history and refinement prompts
gr.Markdown("# 🌲 **Tree Correspondents Generator**")
gr.Markdown("### 🌱 **Customize Tree**")
species_input = gr.Textbox(label="Species", value=parameters["core_info"]["species"])
age_input = gr.Textbox(label="Age", value=parameters["core_info"]["age"])
location_input = gr.Textbox(label="Location", value=parameters["core_info"]["location"])
temperament_input = gr.Textbox(label="Temperament", value=parameters["traits_inputs"]["temperament"])
voice_input = gr.Textbox(label="Voice", value=parameters["traits_inputs"]["voice"])
text_type_input = gr.Textbox(label="Text Type", value=parameters["text_type"])
start_button = gr.Button("Generate Persona")
# 🔹 RIGHT: Persona Display (ALWAYS VISIBLE)
with gr.Column(scale=2): # RIGHT: Persona Display
gr.Markdown("### 🌿 **Current Persona**")
persona_display = gr.Textbox(label="Persona", interactive=False, lines=10)
# 🔹 NEW: Persona Refinement Box (Appears after Persona Generation)
gr.Markdown("### ✏️ **Refine Persona**")
refinement_prompt_box = gr.Textbox(label="Edit Persona Refinement", interactive=True, visible=False, lines=6)
refine_button = gr.Button("Refine Persona", visible=False)
# 🔹 START CONVERSATION BUTTON (Hidden Initially)
start_conversation_btn = gr.Button("Start Conversation", visible=False)
# 🔹 BELOW: Conversation Interface (Initially Hidden)
with gr.Column(visible=False) as chat_interface:
chatbox = gr.Chatbot(label="Tree's Perspective Chat", elem_id="custom_chatbox")
prompt_box = gr.Textbox(label="Modify Prompt Before Sending", elem_id="custom_promptbox", interactive=True, lines=3)
next_step_btn = gr.Button("Next Step")
download_btn = gr.Button("📥 Download History", variant="secondary")
file_output = gr.File(label="Download your history here")
# Function to handle chat logic (example)
def respond(user_input, history):
history = history + [(user_input, f"Response to: {user_input}")] # Append response
# Button action: When clicked, generate file and allow download
download_btn.click(save_history, inputs=[chatbox], outputs=[file_output])
prompt_box.submit(respond, [prompt_box, chatbox], chatbox)
# ✅ Function to Start Chain (ONLY Executes First Two Steps)
def initialize_chain(species, age, location, temperament, voice, text_type):
global parameters, step_index, user_inputs, execution_history, current_persona
# 🔹 Update parameters with user input
"temperament": temperament,
# 🔹 Run Only First Step (Prepares Data, No Output)
first_step = workflow_steps[0]
first_step_output = full_workflow.steps[0].invoke(parameters)
# 🔹 Run Second Step (Persona Generation)
second_step = workflow_steps[1]
persona_output = full_workflow.steps[1].invoke(first_step_output)
current_persona = persona_output["persona"] # Store it globally
"parameters": parameters,
"base_persona": current_persona
print(f"🔍 Debug: Generated Persona → {current_persona}") # Debugging Output
step_index = 2 # Move to Third Chain Step (Refinement)
# 🔹 Get Third Step (Persona Refinement)
refinement_step = workflow_steps[step_index]
refinement_template = prompts[refinement_step["prompt"]]["template"]
#refinement_filled = refinement_template.format(persona=current_persona)
refinement_display = refinement_template
return current_persona, gr.update(value=refinement_display, visible=True), gr.update(visible=True) # ✅ Updates Persona Box
# ✅ Function to Refine Persona
def refine_persona(user_input):
global step_index, current_persona
refined_prompt = user_input if user_input else prompts[step_index]["prompt"]
#refined_persona = full_workflow.steps[2].invoke(workflow_steps[step_index]["output"])
refined_persona = full_workflow.steps[2].invoke({}, _prompt=refined_prompt)
current_persona = refined_persona["refined_persona"] # Update Persona Globally
# 🔹 Store the persona alongside the refinement prompt
"refinement_prompt": refined_prompt, # Store the exact prompt used for refinement
"refined_persona": current_persona # Store the newly generated persona
step_index = 3 # Move to Next Step (Skip RAG if Needed)
step_index = define_step_index(step_index)
print(f"🔍 Debug: Updated Persona → {current_persona}") # Debugging Output
# 🔹 Fetch Next Step (First Actual Prompt)
first_actual_prompt_step = workflow_steps[step_index]
first_prompt_template = prompts[first_actual_prompt_step["prompt"]]["template"]
next_inputs = {key: generated_inputs.get(key, parameters.get(key, f"Missing_{key}")) for key in
first_actual_prompt_step["inputs"]}
first_filled_prompt = first_prompt_template.format(**next_inputs)
display_prompt = first_prompt_template
return current_persona, gr.update(visible=True), gr.update(value=display_prompt, visible=False), gr.update(visible=False)
def define_step_index(current_index):
step_type = workflow_steps[current_index]["type"]
print(f"------FOUND RAG at {index-1} ----- updated index: {index}")
# ✅ Function to Execute Steps (PAUSES Until User Clicks Next)
def process_step(user_input, history):
global step_index, current_persona
# 🔹 If All Steps Are Completed
if step_index >= len(workflow_steps):
return history + [("✅ Process Complete!", "")], None, current_persona
step_name = workflow_steps[step_index]["name"]
step_type = workflow_steps[step_index]["type"]
step_index= define_step_index(step_index)
# 🔹 Handle LLM Steps (User Can Modify Prompt)
prompt_template = prompts[workflow_steps[step_index]["prompt"]]["template"]
inputs = {key: generated_inputs.get(key, parameters.get(key, "")) for key in workflow_steps[step_index]["inputs"]}
filled_prompt = prompt_template.format(**inputs)
# 🔹 If User Modified the Input, Use That
final_prompt = user_input if user_input else filled_prompt
user_inputs[step_name] = final_prompt # Store user-modified prompt
response = full_workflow.steps[step_index].invoke({},_prompt=final_prompt)
response = response[f"{workflow_steps[step_index]['output']}"]
# 🔹 Prepare Next Step Prompt
if step_index < len(workflow_steps):
step_index = define_step_index(step_index)
next_step = workflow_steps[step_index]
if next_step["type"] == "LLM":
next_prompt_template = prompts[next_step["prompt"]]["template"]
next_inputs = {key: generated_inputs.get(key, parameters.get(key, f"Missing_{key}")) for key in next_step["inputs"]}
next_filled_prompt = next_prompt_template #next_prompt_template.format(**next_inputs)
next_filled_prompt = None
next_filled_prompt = None # No more steps
return history + [(final_prompt, response)], next_filled_prompt, current_persona
inputs=[species_input, age_input, location_input, temperament_input, voice_input, text_type_input],
#outputs=[persona_display, chatbox, prompt_box]
outputs=[persona_display,refinement_prompt_box,refine_button]
inputs=[refinement_prompt_box],
#outputs=[persona_display, prompt_box, chat_interface]
outputs=[persona_display, start_conversation_btn, prompt_box, chat_interface]
start_conversation_btn.click(
lambda: (gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)),
outputs=[prompt_box, chat_interface, start_conversation_btn]
inputs=[prompt_box, chatbox],
outputs=[chatbox, prompt_box, persona_display]