Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Load the model and tokenizer | |
| model_name = "Lyte/Llama-3.2-3B-Overthinker" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto") | |
| def generate_response_stream(prompt, max_tokens, temperature, top_p, repeat_penalty, num_steps=4): | |
| messages = [{"role": "user", "content": prompt}] | |
| # Generate reasoning | |
| reasoning_template = tokenizer.apply_chat_template(messages, tokenize=False, add_reasoning_prompt=True) | |
| reasoning_inputs = tokenizer(reasoning_template, return_tensors="pt").to(model.device) | |
| reasoning_ids = model.generate( | |
| **reasoning_inputs, | |
| max_new_tokens=max_tokens // 3, | |
| temperature=temperature, | |
| top_p=top_p, | |
| repetition_penalty=repeat_penalty | |
| ) | |
| reasoning_output = tokenizer.decode(reasoning_ids[0, reasoning_inputs.input_ids.shape[1]:], skip_special_tokens=True) | |
| yield reasoning_output, "", "" | |
| # Generate thinking (step-by-step and verifications) | |
| messages.append({"role": "reasoning", "content": reasoning_output}) | |
| thinking_template = tokenizer.apply_chat_template(messages, tokenize=False, add_thinking_prompt=True, num_steps=num_steps) | |
| thinking_inputs = tokenizer(thinking_template, return_tensors="pt").to(model.device) | |
| thinking_ids = model.generate( | |
| **thinking_inputs, | |
| max_new_tokens=max_tokens // 3, | |
| temperature=temperature, | |
| top_p=top_p, | |
| repetition_penalty=repeat_penalty | |
| ) | |
| thinking_output = tokenizer.decode(thinking_ids[0, thinking_inputs.input_ids.shape[1]:], skip_special_tokens=True) | |
| yield reasoning_output, thinking_output, "" | |
| # Generate final answer | |
| messages.append({"role": "thinking", "content": thinking_output}) | |
| answer_template = tokenizer.apply_chat_template(messages, tokenize=False, add_answer_prompt=True) | |
| answer_inputs = tokenizer(answer_template, return_tensors="pt").to(model.device) | |
| answer_ids = model.generate( | |
| **answer_inputs, | |
| max_new_tokens=max_tokens // 3, | |
| temperature=temperature, | |
| top_p=top_p, | |
| repetition_penalty=repeat_penalty | |
| ) | |
| answer_output = tokenizer.decode(answer_ids[0, answer_inputs.input_ids.shape[1]:], skip_special_tokens=True) | |
| yield reasoning_output, thinking_output, answer_output | |
| with gr.Blocks() as iface: | |
| gr.Markdown("# Llama-3.2-3B Overthinker Customizable Steps, Please Duplicate and run with GPU if you can! T4 is fine!") | |
| gr.Markdown("Generate responses using the Llama-3.2-3B Reasoning model.") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| prompt = gr.Textbox(lines=5, label="Prompt") | |
| generate_button = gr.Button("Generate Response") | |
| with gr.Column(scale=1): | |
| max_tokens = gr.Slider(minimum=512, maximum=32768, value=8192, label="Max Number of Tokens") | |
| temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.8, label="Temperature") | |
| top_p = gr.Slider(minimum=0.01, maximum=0.99, value=0.95, label="Top P") | |
| repeat_penalty = gr.Slider(minimum=0.5, maximum=2, value=1.1, label="Repeat Penalty") | |
| num_steps = gr.Slider(minimum=1, maximum=10, value=4, label="Max Number of Steps") | |
| reasoning_output = gr.Textbox(lines=5, label="Reasoning") | |
| with gr.Accordion("Thinking Process", open=False): | |
| thinking_output = gr.Textbox(lines=10, label="Step-by-Step Thinking") | |
| answer_output = gr.Textbox(lines=5, label="Final Answer") | |
| generate_button.click( | |
| fn=generate_response_stream, | |
| inputs=[prompt, max_tokens, temperature, top_p, repeat_penalty, num_steps], | |
| outputs=[reasoning_output, thinking_output, answer_output] | |
| ) | |
| iface.launch() |