File size: 18,509 Bytes
6bf01ee
 
 
 
 
 
 
 
 
 
 
 
 
97d9f7d
e340879
6bf01ee
 
a6db50e
007b03f
6bf01ee
 
 
 
cbc5c4f
52947a8
b2b0ba4
7da11c0
0ef86f0
61c5e3f
b2b0ba4
c4da416
6bf01ee
 
6553d2e
 
 
0ef86f0
 
 
6553d2e
b2b0ba4
d59ad96
6bf01ee
 
 
 
 
 
 
8d41f45
 
6034f32
198517c
3a0abab
198517c
 
 
 
 
 
8d41f45
0ef86f0
79f451b
6bf01ee
 
 
 
c451a74
6bf01ee
c451a74
b2b0ba4
82bf440
6bf01ee
 
b2b0ba4
6bf01ee
 
b2b0ba4
6bf01ee
 
 
 
23f4a93
6bf01ee
 
 
 
 
 
6553d2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aea9a14
95df290
aea9a14
6bf01ee
 
 
 
 
 
 
 
 
 
 
 
 
85e51be
b2b0ba4
6bf01ee
0ef86f0
79f451b
 
 
 
6bf01ee
94c072e
 
6bf01ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ef86f0
6bf01ee
 
 
 
 
 
 
 
 
 
 
 
 
 
0ef86f0
 
79f451b
 
 
 
 
6bf01ee
 
 
b2b0ba4
 
6bf01ee
b2b0ba4
aea9a14
 
b7146d7
6bf01ee
ccf831d
0ef86f0
82bf440
 
52947a8
0ef86f0
82bf440
52947a8
b81b835
 
52947a8
0ef86f0
 
0a759e0
315e48e
d5e45a8
b7146d7
d5e45a8
 
 
 
 
 
 
 
 
 
 
b7146d7
d5e45a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7146d7
d5e45a8
 
 
 
 
 
 
 
 
 
 
 
 
1d73000
95df290
0ef86f0
b2b0ba4
6bf01ee
 
91813ed
c5136a1
0ef86f0
6bf01ee
 
b2b0ba4
3517c33
0ef86f0
 
 
 
 
 
82bf440
c451a74
95df290
0ef86f0
 
 
95df290
6bf01ee
 
 
 
0ef86f0
6bf01ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aea9a14
82bf440
6bf01ee
 
0ef86f0
 
6bf01ee
0ef86f0
6bf01ee
 
 
 
 
 
 
 
 
 
 
ee87f39
6bf01ee
 
6a05c32
6bf01ee
 
 
f07ce30
198517c
 
 
 
 
 
 
 
6034f32
6bf01ee
 
b2b0ba4
198517c
6bf01ee
 
 
 
 
 
 
 
 
0ade16f
4b59605
5bed01e
 
 
 
 
 
 
 
 
 
 
 
 
957d924
 
6bf01ee
 
 
 
 
 
 
 
b2b0ba4
 
7de1321
7da11c0
6220eaa
6bf01ee
 
 
 
 
c451a74
6bf01ee
 
5d5ce97
6bf01ee
c451a74
6bf01ee
 
 
 
 
 
 
7da11c0
6bf01ee
bee7df0
85e51be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bf01ee
 
957d924
 
 
64771ae
 
 
 
 
 
 
7de1321
5611f39
6bf01ee
 
 
7da11c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
import os
import json
import datetime
import requests
from email.utils import parseaddr

import gradio as gr
import pandas as pd
import numpy as np

from datasets import load_dataset, VerificationMode
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import HfApi
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM

# InfoStrings
#from scorer import question_scorer
from content import format_error, format_warning, format_log, TITLE, INTRODUCTION_TEXT, DATA_TEXT, SUBMISSION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, model_hyperlink

TOKEN = os.environ.get("TOKEN", None)

OWNER="Blanca"
DATA_DATASET = f"{OWNER}/CQs-Gen_test_embeddings"
INTERNAL_DATA_DATASET = f"{OWNER}/CQs-Gen_test_embeddings"
SUBMISSION_DATASET = f"{OWNER}/submissions_internal"
#SUBMISSION_DATASET_PUBLIC = f"{OWNER}/submissions_public"
#CONTACT_DATASET = f"{OWNER}/contact_info" # TODO: I should reactivate this
RESULTS_DATASET = f"{OWNER}/results_public"
LEADERBOARD_PATH = f"HiTZ/Critical_Questions_Leaderboard"
METRIC = 'similarity'
api = HfApi()

if METRIC == 'similarity':
    similarity_model = SentenceTransformer("stsb-mpnet-base-v2") 

if METRIC == 'gemma': # WARNING: this can't be used because I do not have GPU in HF
    model = AutoModelForCausalLM.from_pretrained('google/gemma-3-12b-it', device_map="auto", attn_implementation='eager')
    tokenizer = AutoTokenizer.from_pretrained('google/gemma-3-12b-it')

YEAR_VERSION = "2025"
#ref_scores_len = {"test": 32}

os.makedirs("scored", exist_ok=True)

# Should be False on spaces and True outside
LOCAL_DEBUG = False #not (os.environ.get("system") == "spaces")

# Display the results
eval_results = {}

eval_results['test'] = load_dataset(
    RESULTS_DATASET,
    YEAR_VERSION,
    split="test",
    token=TOKEN,
    download_mode="force_redownload",
    verification_mode=VerificationMode.NO_CHECKS,
    trust_remote_code=True,
)

# TODO: I should reactivate saving contact infos
#contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
def get_dataframe_from_results(eval_results, split):
    local_df = eval_results[split]
    local_df = local_df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
    local_df = local_df.remove_columns(["system_prompt", "url"])
    local_df = local_df.rename_column("model", "Submission name")
    local_df = local_df.rename_column("model_family", "Model family")
    local_df = local_df.rename_column("organisation", "Authors")
    local_df = local_df.rename_column("score", "Score (%)")
    local_df = local_df.rename_column("NAE", "NAE (%)")
    local_df = local_df.rename_column("date", "Submission date")
    df = pd.DataFrame(local_df)
    df = df.sort_values(by=["Score (%)"], ascending=False)
    return df


eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")

# Gold answers
gold_results = {}
gold_dataset = load_dataset(INTERNAL_DATA_DATASET, "test", token=TOKEN, trust_remote_code=True)['test']

def restart_space():
    api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)

TYPES = ["markdown", "number", "number", "number", "number", "str", "str", "str"]


def run_model(model, tokenizer, prompt):
    chat = [{"role": "user", "content": prompt}] 
    chat_formated = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
    #print(chat_formated, flush=True)
    inputs = tokenizer(chat_formated, return_tensors="pt")

    inputs = inputs.to('cuda')

    generated_ids = model.generate(**inputs, max_new_tokens=512) 

    out = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

    try:
        output = out.split('model\n')[1].replace('\n', '')
    except IndexError:
        print('EVAL ERROR: '+output, flush=True)

    output = output.strip()

    return output

def get_prompts(cq, references):
    return {
        'compare': f"""You will be given a set of reference questions, each with an identifying ID, and a newly generated question. Your task is to determine if any of the reference questions are asking for the same information as the new question.
        
Here is the set of reference questions with their IDs:

<reference_questions>
{references}
</reference_questions>

Here is the newly generated question:

<new_question>
{cq}
</new_question>

Compare the new question to each of the reference questions. Look for questions that are asking for the same information, even if they are worded differently. Consider the core meaning and intent of each question, not just the exact wording.

If you find a reference question that is asking for the same information as the new question, output only the ID of that reference question.

If none of the reference questions are asking for the same information as the new question, output exactly 'Similar reference not found.' (without quotes).

Your final output should consist of only one of the following:
1. The ID of the most similar reference question
2. The exact phrase 'Similar reference not found.'

Do not include any explanation, reasoning, or additional text in your output."""}

def call_start():
    return format_log("We are starting your evaluation. This can take a few minutes.")

def add_new_eval(
    model: str,
    model_family: str,
    system_prompt: str,
    url: str,
    path_to_file: str,
    organisation: str,
    mail: str,
    profile: gr.OAuthProfile, 
):
    # Was the profile created less than 2 month ago?
    user_data = requests.get(f"https://huggingface.co/api/users/{profile.username}/overview")
    creation_date = json.loads(user_data.content)["createdAt"]
    if datetime.datetime.now() - datetime.datetime.strptime(creation_date, '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.timedelta(days=60):
        return format_error("This account is not authorized to submit on this leaderboard.")
        
    # TODO: I should reactivate this check
    #contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
    #user_submission_dates = sorted(row["date"] for row in contact_infos[val_or_test] if row["username"] == profile.username)
    #if len(user_submission_dates) > 0 and user_submission_dates[-1] == datetime.datetime.today().strftime('%Y-%m-%d'):
    #    return format_error("You already submitted once today, please try again tomorrow.")

    val_or_test = "test"
    is_validation = False
    # Very basic email parsing
    _, parsed_mail = parseaddr(mail)
    if not "@" in parsed_mail:
        return format_warning("Please provide a valid email adress.")


    # Check if the combination model/org already exists and prints a warning message if yes
    if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
        return format_warning("This model has been already submitted.")
    
    if path_to_file is None:
        return format_warning("Please attach a file.")

    # SAVE UNSCORED SUBMISSION
    if LOCAL_DEBUG:
        print("mock uploaded submission")
    else:
        api.upload_file(
            repo_id=SUBMISSION_DATASET, 
            path_or_fileobj=path_to_file.name, 
            path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_raw_{datetime.datetime.today()}.json",
            repo_type="dataset", 
            token=TOKEN
        )

    # SAVE CONTACT
    contact_info = {
        "model": model,
        "model_family": model_family,
        "url": url,
        "organisation": organisation,
        "username": profile.username,
        "mail": mail,
        "date": datetime.datetime.today().strftime('%Y-%m-%d')
    }

    # TODO: reactivate this
    #contact_infos[val_or_test]= contact_infos[val_or_test].add_item(contact_info)
    #if LOCAL_DEBUG:
    #    print("mock uploaded contact info")
    #else:
    #    contact_infos.push_to_hub(CONTACT_DATASET, config_name = YEAR_VERSION, token=TOKEN)

    # SCORE SUBMISSION
    file_path = path_to_file.name        
    scores = 0
    num_questions = 0
    task_ids = []

    call_start()

    with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file: 
        with open(file_path, 'r') as f:
            data = json.load(f)
            scores = []
            nae = 0
            num_cqs = 0
            for id_to_eval, line in data.items(): # data to evaluate
                intervention_score = 0
                
                for indx, intervention_id in enumerate(gold_dataset['intervention_id']): # references
                    if id_to_eval == intervention_id:
                        references = gold_dataset['cqs']
                        reference_embeddings = [row['embedding'] for row in references[indx]]
                        # TODO: here upload the embedding that I have saved, so they can be used in similarity evaluation
                        
                        #print(reference_set, flush=True)
                        if len(line['cqs']) < 3 or type(line['cqs']) is not list: # make sure there are at least 3 cqs
                            num_cqs += 3
                            #return format_warning("Make sure that there are at least 3 questions per intervention, or check that the format is right.")
                            continue
                        for cq in line['cqs'][:3]: # here only take the first 3 cqs
                            if type(cq) is not dict:
                                num_cqs += 1
                                continue
                            cq_text = cq['cq']
                            
                            if METRIC == 'similarity':
                                sentence_embedding = similarity_model.encode(cq_text)
                                #reference_embedding = similarity_model.encode(reference_set) # TODO: here have the embeddings directly, do no calculate each time
                                sims = similarity_model.similarity(sentence_embedding, reference_embeddings).tolist()[0]
    
                                winner = np.argmax(sims)
                                # make sure the similarity of the winning reference sentence is at least 0.65
                                if sims[winner] > 0.65:
                                    label = references[indx][winner]['label']
                                else:
                                    label = 'not_able_to_evaluate'

                            if METRIC == 'gemma':
                                prompts = get_prompts(cq_text, '\n'.join(reference_set))
                                winner = run_model(model, tokenizer, prompts['compare'])
                                try: # here make sure the output is the id of a reference cq
                                    if winner.strip() != 'Similar reference not found.':
                                        label = references[index][int(winner)]['label']
                                    else: 
                                        label = 'not_able_to_evaluate'
                                        print(winner, flush=True)
                                except IndexError:
                                    label = 'evaluation_issue'
                                    print(winner, flush=True)
                                except ValueError:
                                    label = 'evaluation_issue'
                                    print(winner, flush=True)

                            #print(label, flush=True)
                            num_cqs += 1
                            if label == 'Useful':
                                intervention_score += 1/3
                            if label == 'not_able_to_evaluate':
                                nae += 1

                #print(id_to_eval, intervention_score, flush=True)
                scores.append(intervention_score)

                scored_file.write(
                    json.dumps({
                        "id": intervention_id,
                        #"model_answer": answer,
                        "score": intervention_score
                    }) + "\n"
                )

                task_ids.append(id_to_eval)
                #scores += score
                #num_questions += 1
                #break

                
                #return format_error(score)
            nae_score = round(nae/num_cqs*100, 1)
            score = round(sum(scores)/len(scores)*100,3)
            #print(score, flush=True)


            
    #print(task_ids, flush=True)

    # Check if there's any duplicate in the submission
    if len(task_ids) != len(set(task_ids)):
        return format_error("There are duplicates in your submission. Please check your file and resubmit it.")
        
    # SAVE SCORED SUBMISSION
    if LOCAL_DEBUG:
        print("mock uploaded scored submission")
    else:
        api.upload_file(
            repo_id=SUBMISSION_DATASET, 
            path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
            path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl", 
            repo_type="dataset", 
            token=TOKEN
        )


    # SAVE TO LEADERBOARD DATA
    eval_entry = {
        "model": model,
        "model_family": model_family,
        "system_prompt": system_prompt,
        "url": url,
        "organisation": organisation,
        "score": score,  #s / ref_scores_len,#[val_or_test],
        "NAE": nae_score,
        "date": datetime.datetime.today().strftime('%Y-%m-%d')
    }
    
    #TODO: if I find potential errors, I should check them here and maybe suggest that they open a discussion

    # TODO: I should reactivate this
    # Testing for duplicates - to see if we want to add something like it as it would allow people to try to see the content of other submissions
    #eval_entry_no_date = {k: v for k, v in eval_entry if k != "date"}
    #columns_no_date = [c for c in eval_results[val_or_test].column_names if c != "date"]
    #if eval_entry_no_date in eval_results[val_or_test].select_columns(columns_no_date):
    #    return format_error(f"Your submission is an exact duplicate from an existing submission.")

    eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
    print(eval_results)
    if LOCAL_DEBUG:
        print("mock uploaded results to lb")
    else:
        eval_results[val_or_test].push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)


    return format_log(f"Submission {model} submitted by {organisation} successfully.\nPlease refresh the leaderboard to see your score displayed.")


def refresh():
    eval_results['test'] = load_dataset(
        RESULTS_DATASET,
        YEAR_VERSION,
        split="test",
        token=TOKEN,
        download_mode="force_redownload",
        verification_mode=VerificationMode.NO_CHECKS,
        trust_remote_code=True,
    )
    eval_dataframe_test = get_dataframe_from_results(eval_results={"test": eval_results['test']}, split="test")
    return eval_dataframe_test



def upload_file(files):
    file_paths = [file.name for file in files]
    return file_paths


demo = gr.Blocks()
with demo:
    gr.HTML(TITLE)

    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    #with gr.Row():
    #    with gr.Column(scale=1, min_width=0):
    #        pass
    #    gr.Image(
    #            value="examples.png", 
    #            label="Example",
    #            interactive=False,    
    #            show_label=False,
    #            show_download_button=False, 
    #            show_share_button=False
    #    )
    #    with gr.Column(scale=1, min_width=0):
    #        pass

    gr.Markdown(DATA_TEXT, elem_classes="markdown-text")

    with gr.Tab("Results: Test"):
        leaderboard_table_test = gr.components.Dataframe(
            value=eval_dataframe_test, datatype=TYPES, interactive=False,
            column_widths=["20%"] 
        )

    refresh_button = gr.Button("Refresh")
    refresh_button.click(refresh, inputs=[], outputs=[leaderboard_table_test])

    

    with gr.Accordion(""):
        with gr.Row():
            gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text")
        with gr.Row():
            with gr.Column():
                level_of_test = gr.Radio(["test"], value="test", label="Split")
                model_name_textbox = gr.Textbox(label="Submission name")
                model_family_textbox = gr.Textbox(label="Model family")
                system_prompt_textbox = gr.Textbox(label="System prompt example")
                url_textbox = gr.Textbox(label="Url to submission information")
            with gr.Column():
                organisation = gr.Textbox(label="Team name")
                mail = gr.Textbox(label="Contact email (will be stored privately, & used if there is an issue with your submission)")
                file_output = gr.File()


        with gr.Row():
            gr.LoginButton()
            submit_button = gr.Button("Submit Eval")
        status = gr.Label(label="Status")
        submission_result = gr.Markdown()
        submit_button.click(
                    fn=lambda: "⏳ Submitting...", 
                    inputs=None, 
                    outputs=status,
                    ).then(
                    add_new_eval,
                    [
                        model_name_textbox,
                        model_family_textbox,
                        system_prompt_textbox,
                        url_textbox,
                        file_output,
                        organisation,
                        mail
                    ],
                    submission_result,
        )

    with gr.Row():
        with gr.Accordion("📙 Citation", open=True):
            citation_button = gr.Textbox(
                    value=CITATION_BUTTON_TEXT,
                    label=CITATION_BUTTON_LABEL,
                    elem_id="citation-button",
                    lines=8,           
                    max_lines=10,      
                    show_copy_button=True,  
                )


scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600)
scheduler.start()
demo.launch(debug=True)