File size: 2,756 Bytes
2249a67 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
#!/usr/bin/env python3
"""
🌟 PQN.AI - Persian Quantum Neural AI
Example usage script for the model
"""
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
def load_model():
"""بارگذاری مدل PQN.AI"""
print("🚀 Loading PQN.AI model...")
model_name = "iman-noroozi/pqn-ai"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto"
)
print("✅ Model loaded successfully!")
return tokenizer, model
def generate_response(tokenizer, model, prompt, max_length=512, temperature=0.7):
"""تولید پاسخ با مدل PQN.AI"""
# تنظیمات تولید
generation_config = {
"max_length": max_length,
"temperature": temperature,
"top_p": 0.9,
"top_k": 50,
"repetition_penalty": 1.1,
"do_sample": True,
"pad_token_id": tokenizer.eos_token_id,
"eos_token_id": tokenizer.eos_token_id
}
# کدگذاری ورودی
inputs = tokenizer.encode(prompt, return_tensors="pt")
# تولید پاسخ
with torch.no_grad():
outputs = model.generate(inputs, **generation_config)
# کدگشایی پاسخ
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# حذف متن ورودی از پاسخ
if prompt in response:
response = response.replace(prompt, "").strip()
return response
def main():
"""تابع اصلی"""
print("🌟 PQN.AI - Persian Quantum Neural AI")
print("=" * 50)
# بارگذاری مدل
tokenizer, model = load_model()
# مثالهای تست
test_prompts = [
"سلام! تو کی هستی؟",
"یک اسکریپت پایتون بنویس که لیست فایلها را نمایش دهد",
"حل کن: 2x + 5 = 15",
"به فارسی توضیح بده Transformer چطور کار میکند",
"What is quantum computing?",
"کوانتوم چطور کار میکند؟"
]
print("\n🧪 Running test prompts...")
print("=" * 50)
for i, prompt in enumerate(test_prompts, 1):
print(f"\n📝 Test {i}: {prompt}")
print("-" * 30)
try:
response = generate_response(tokenizer, model, prompt)
print(f"🤖 Response: {response}")
except Exception as e:
print(f"❌ Error: {e}")
print("\n" + "=" * 50)
print("✅ Testing completed!")
if __name__ == "__main__":
main()
|