Ai / app.py
siyam2009's picture
Update app.py
31ffca5 verified
import os
from huggingface_hub import hf_hub_download, list_repo_files
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
MODEL_NAME = "Orion-zhen/Qwen2.5-7B-Instruct-Uncensored"
MODEL_DIR = "./Qwen2.5-7B-Instruct-Uncensored"
# Step 1: সমস্ত ফাইলের লিস্ট নিয়ে ডাউনলোড
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR, exist_ok=True)
print("মডেল ডাউনলোড হচ্ছে hf_hub_download দিয়ে...")
# রেপোসিটরির সব ফাইলের লিস্ট
repo_files = list_repo_files(MODEL_NAME)
for file_name in repo_files:
print(f"ডাউনলোড হচ্ছে: {file_name}")
hf_hub_download(repo_id=MODEL_NAME, filename=file_name, cache_dir=MODEL_DIR)
# Step 2: Tokenizer এবং Model লোড
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
model = AutoModelForCausalLM.from_pretrained(MODEL_DIR, device_map="auto")
# Step 3: Pipeline তৈরি
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Step 4: Gradio UI
def generate_text(prompt):
outputs = text_generator(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
return outputs[0]['generated_text']
with gr.Blocks() as demo:
gr.Markdown("# Orion-zhen/Qwen2.5-7B-Instruct-Uncensored Inference")
prompt_input = gr.Textbox(label="Prompt", lines=3)
output_text = gr.Textbox(label="Generated Text", lines=10)
submit_btn = gr.Button("Generate")
submit_btn.click(generate_text, inputs=prompt_input, outputs=output_text)
demo.launch()