File size: 1,682 Bytes
54e50cd
31ffca5
 
 
541eda8
ed8f3c1
54e50cd
ff0c8df
31ffca5
 
 
 
 
 
 
 
 
 
 
541eda8
31ffca5
54e50cd
31ffca5
ff0c8df
31ffca5
 
324e6fd
31ffca5
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import os
from huggingface_hub import hf_hub_download, list_repo_files
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr

MODEL_NAME = "Orion-zhen/Qwen2.5-7B-Instruct-Uncensored"
MODEL_DIR = "./Qwen2.5-7B-Instruct-Uncensored"

# Step 1: সমস্ত ফাইলের লিস্ট নিয়ে ডাউনলোড
if not os.path.exists(MODEL_DIR):
    os.makedirs(MODEL_DIR, exist_ok=True)
    print("মডেল ডাউনলোড হচ্ছে hf_hub_download দিয়ে...")
    
    # রেপোসিটরির সব ফাইলের লিস্ট
    repo_files = list_repo_files(MODEL_NAME)
    
    for file_name in repo_files:
        print(f"ডাউনলোড হচ্ছে: {file_name}")
        hf_hub_download(repo_id=MODEL_NAME, filename=file_name, cache_dir=MODEL_DIR)

# Step 2: Tokenizer এবং Model লোড
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
model = AutoModelForCausalLM.from_pretrained(MODEL_DIR, device_map="auto")

# Step 3: Pipeline তৈরি
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Step 4: Gradio UI
def generate_text(prompt):
    outputs = text_generator(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
    return outputs[0]['generated_text']

with gr.Blocks() as demo:
    gr.Markdown("# Orion-zhen/Qwen2.5-7B-Instruct-Uncensored Inference")
    prompt_input = gr.Textbox(label="Prompt", lines=3)
    output_text = gr.Textbox(label="Generated Text", lines=10)
    submit_btn = gr.Button("Generate")
    
    submit_btn.click(generate_text, inputs=prompt_input, outputs=output_text)

demo.launch()