Spaces:
Runtime error
Runtime error
File size: 1,988 Bytes
7fcc9f3 dea59cc 7fcc9f3 dea59cc 7fcc9f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# import gradio as gr
# from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
# from peft import PeftModel, PeftConfig
# # Load tokenizer
# tokenizer = AutoTokenizer.from_pretrained(".")
# # Load base model with quantization
# bnb_config = BitsAndBytesConfig(load_in_4bit=True)
# base_model = AutoModelForCausalLM.from_pretrained(
# "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # same base you fine-tuned
# quantization_config=bnb_config,
# device_map="auto"
# )
# # Load LoRA adapters
# model = PeftModel.from_pretrained(base_model, ".")
# # Create Gradio Interface
# def generate_response(prompt):
# inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
# return tokenizer.decode(outputs[0], skip_special_tokens=True)
# gr.Interface(
# fn=generate_response,
# inputs=gr.Textbox(label="Enter your instruction"),
# outputs=gr.Textbox(label="Model response"),
# title="LLaMA 3 - Fine-tuned Model"
# ).launch()
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(".")
# Load base model (non-quantized)
base_model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3-8B", # use standard non-quantized base model
device_map="auto"
)
# Load LoRA adapters
model = PeftModel.from_pretrained(base_model, ".")
# Create Gradio Interface
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
gr.Interface(
fn=generate_response,
inputs=gr.Textbox(label="Enter your instruction"),
outputs=gr.Textbox(label="Model response"),
title="LLaMA 3 - Fine-tuned Model"
).launch()
|