SameerJugno commited on
Commit
7fcc9f3
·
verified ·
1 Parent(s): b3fc8d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -30
app.py CHANGED
@@ -1,30 +1,60 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
3
- from peft import PeftModel, PeftConfig
4
-
5
- # Load tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained(".")
7
-
8
- # Load base model with quantization
9
- bnb_config = BitsAndBytesConfig(load_in_4bit=True)
10
- base_model = AutoModelForCausalLM.from_pretrained(
11
- "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # same base you fine-tuned
12
- quantization_config=bnb_config,
13
- device_map="auto"
14
- )
15
-
16
- # Load LoRA adapters
17
- model = PeftModel.from_pretrained(base_model, ".")
18
-
19
- # Create Gradio Interface
20
- def generate_response(prompt):
21
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
22
- outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
23
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
-
25
- gr.Interface(
26
- fn=generate_response,
27
- inputs=gr.Textbox(label="Enter your instruction"),
28
- outputs=gr.Textbox(label="Model response"),
29
- title="LLaMA 3 - Fine-tuned Model"
30
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
3
+ # from peft import PeftModel, PeftConfig
4
+
5
+ # # Load tokenizer
6
+ # tokenizer = AutoTokenizer.from_pretrained(".")
7
+
8
+ # # Load base model with quantization
9
+ # bnb_config = BitsAndBytesConfig(load_in_4bit=True)
10
+ # base_model = AutoModelForCausalLM.from_pretrained(
11
+ # "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # same base you fine-tuned
12
+ # quantization_config=bnb_config,
13
+ # device_map="auto"
14
+ # )
15
+
16
+ # # Load LoRA adapters
17
+ # model = PeftModel.from_pretrained(base_model, ".")
18
+
19
+ # # Create Gradio Interface
20
+ # def generate_response(prompt):
21
+ # inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
22
+ # outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
23
+ # return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+
25
+ # gr.Interface(
26
+ # fn=generate_response,
27
+ # inputs=gr.Textbox(label="Enter your instruction"),
28
+ # outputs=gr.Textbox(label="Model response"),
29
+ # title="LLaMA 3 - Fine-tuned Model"
30
+ # ).launch()
31
+
32
+
33
+ import gradio as gr
34
+ from transformers import AutoTokenizer, AutoModelForCausalLM
35
+ from peft import PeftModel
36
+
37
+ # Load tokenizer
38
+ tokenizer = AutoTokenizer.from_pretrained(".")
39
+
40
+ # Load base model normally (no 4-bit quantization)
41
+ base_model = AutoModelForCausalLM.from_pretrained(
42
+ "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # same base you fine-tuned
43
+ device_map="auto"
44
+ )
45
+
46
+ # Load LoRA adapters
47
+ model = PeftModel.from_pretrained(base_model, ".")
48
+
49
+ # Create Gradio Interface
50
+ def generate_response(prompt):
51
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
52
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
53
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
54
+
55
+ gr.Interface(
56
+ fn=generate_response,
57
+ inputs=gr.Textbox(label="Enter your instruction"),
58
+ outputs=gr.Textbox(label="Model response"),
59
+ title="LLaMA 3 - Fine-tuned Model"
60
+ ).launch()