|
|
|
|
|
"""
|
|
|
🌟 PQN.AI - Persian Quantum Neural AI
|
|
|
Example usage script for the model
|
|
|
"""
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
import torch
|
|
|
|
|
|
def load_model():
|
|
|
"""بارگذاری مدل PQN.AI"""
|
|
|
print("🚀 Loading PQN.AI model...")
|
|
|
|
|
|
model_name = "iman-noroozi/pqn-ai"
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
|
model_name,
|
|
|
torch_dtype=torch.float16,
|
|
|
device_map="auto"
|
|
|
)
|
|
|
|
|
|
print("✅ Model loaded successfully!")
|
|
|
return tokenizer, model
|
|
|
|
|
|
def generate_response(tokenizer, model, prompt, max_length=512, temperature=0.7):
|
|
|
"""تولید پاسخ با مدل PQN.AI"""
|
|
|
|
|
|
|
|
|
generation_config = {
|
|
|
"max_length": max_length,
|
|
|
"temperature": temperature,
|
|
|
"top_p": 0.9,
|
|
|
"top_k": 50,
|
|
|
"repetition_penalty": 1.1,
|
|
|
"do_sample": True,
|
|
|
"pad_token_id": tokenizer.eos_token_id,
|
|
|
"eos_token_id": tokenizer.eos_token_id
|
|
|
}
|
|
|
|
|
|
|
|
|
inputs = tokenizer.encode(prompt, return_tensors="pt")
|
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
outputs = model.generate(inputs, **generation_config)
|
|
|
|
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
if prompt in response:
|
|
|
response = response.replace(prompt, "").strip()
|
|
|
|
|
|
return response
|
|
|
|
|
|
def main():
|
|
|
"""تابع اصلی"""
|
|
|
print("🌟 PQN.AI - Persian Quantum Neural AI")
|
|
|
print("=" * 50)
|
|
|
|
|
|
|
|
|
tokenizer, model = load_model()
|
|
|
|
|
|
|
|
|
test_prompts = [
|
|
|
"سلام! تو کی هستی؟",
|
|
|
"یک اسکریپت پایتون بنویس که لیست فایلها را نمایش دهد",
|
|
|
"حل کن: 2x + 5 = 15",
|
|
|
"به فارسی توضیح بده Transformer چطور کار میکند",
|
|
|
"What is quantum computing?",
|
|
|
"کوانتوم چطور کار میکند؟"
|
|
|
]
|
|
|
|
|
|
print("\n🧪 Running test prompts...")
|
|
|
print("=" * 50)
|
|
|
|
|
|
for i, prompt in enumerate(test_prompts, 1):
|
|
|
print(f"\n📝 Test {i}: {prompt}")
|
|
|
print("-" * 30)
|
|
|
|
|
|
try:
|
|
|
response = generate_response(tokenizer, model, prompt)
|
|
|
print(f"🤖 Response: {response}")
|
|
|
except Exception as e:
|
|
|
print(f"❌ Error: {e}")
|
|
|
|
|
|
print("\n" + "=" * 50)
|
|
|
print("✅ Testing completed!")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|
|
|
|