Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,10 +7,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 8 |
|
| 9 |
def load_model():
|
| 10 |
-
model_id = "nanochat-students/chat-d20"
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
model.eval()
|
| 15 |
|
| 16 |
return tokenizer, model
|
|
|
|
| 7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 8 |
|
| 9 |
def load_model():
|
|
|
|
| 10 |
|
| 11 |
+
model_id="karpathy/nanochat-d32"
|
| 12 |
+
revision="refs/pr/1"
|
| 13 |
+
|
| 14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=False, revision=revision)
|
| 15 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=False, dtype=torch.bfloat16, revision=revision).to(device)
|
| 16 |
+
|
| 17 |
model.eval()
|
| 18 |
|
| 19 |
return tokenizer, model
|