Datasets:

Modalities:
Text
Formats:
json
Languages:
Hindi
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 11,428 Bytes
e20ef19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
#!/usr/bin/env python3
"""
This script provides a unified interface to:
1. Run inference for all datasets using HuggingFace models
2. Evaluate all predictions and generate scores
"""

import os
import sys
import argparse
import subprocess
from typing import List, Optional
import pandas as pd

ALL_DATASETS = [
    'doc2dial', 'quac', 'qrecc', 'inscit',
    'hybridial', 
    'doqa_cooking', 'doqa_travel', 'doqa_movies', 
    'convfinqa'
]

def run_inference_for_dataset(
    model_id: str,
    dataset: str,
    data_folder: str,
    output_folder: str,
    device: str = 'cuda',
    num_ctx: int = 5,
    max_tokens: int = 64,
    expected_samples: int = 500,
    limit: Optional[int] = None
) -> bool:
    """
    Run inference for a single dataset
    
    Args:
        model_id: Model identifier or path
        dataset: Dataset name
        data_folder: Path to data folder
        output_folder: Path to output folder
        device: Device to run on (cuda/cpu)
        num_ctx: Number of contexts
        max_tokens: Maximum number of tokens to generate
        expected_samples: Expected number of samples
        limit: Limit number of samples to process
        
    Returns:
        bool: True if successful, False otherwise
    """
    print(f"\n{'='*80}")
    print(f"Running inference for dataset: {dataset}")
    print(f"{'='*80}\n")
    
    cmd = [
        'python', 'run_generation_hf.py',
        '--model-id', model_id,
        '--data-folder', data_folder,
        '--output-folder', output_folder,
        '--eval-dataset', dataset,
        '--device', device,
        '--num-ctx', str(num_ctx),
        '--max-tokens', str(max_tokens),
        '--expected-samples', str(expected_samples)
    ]
    
    if limit is not None:
        cmd.extend(['--limit', str(limit)])
    
    try:
        result = subprocess.run(cmd, check=True, capture_output=False, text=True)
        print(f"✓ Inference completed for {dataset}")
        return True
    except subprocess.CalledProcessError as e:
        print(f"✗ Error running inference for {dataset}: {e}")
        return False
    except Exception as e:
        print(f"✗ Unexpected error for {dataset}: {e}")
        return False


def run_inference_for_all_datasets(
    model_id: str,
    datasets: List[str],
    data_folder: str,
    output_folder: str,
    device: str = 'cuda',
    num_ctx: int = 5,
    max_tokens: int = 64,
    expected_samples: int = 500,
    limit: Optional[int] = None
) -> dict:
    """
    Run inference for all specified datasets
    
    Args:
        model_id: Model identifier or path
        datasets: List of dataset names
        data_folder: Path to data folder
        output_folder: Path to output folder
        device: Device to run on (cuda/cpu)
        num_ctx: Number of contexts
        max_tokens: Maximum number of tokens to generate
        expected_samples: Expected number of samples
        limit: Limit number of samples to process
        
    Returns:
        dict: Dictionary mapping dataset names to success status
    """
    print(f"\n{'#'*80}")
    print(f"# Running Inference for Model: {model_id}")
    print(f"# Total Datasets: {len(datasets)}")
    print(f"{'#'*80}\n")
    
    results = {}
    for dataset in datasets:
        success = run_inference_for_dataset(
            model_id=model_id,
            dataset=dataset,
            data_folder=data_folder,
            output_folder=output_folder,
            device=device,
            num_ctx=num_ctx,
            max_tokens=max_tokens,
            expected_samples=expected_samples,
            limit=limit
        )
        results[dataset] = success
    
    # Print summary
    print(f"\n{'='*80}")
    print("Inference Summary:")
    print(f"{'='*80}")
    successful = sum(1 for v in results.values() if v)
    print(f"✓ Successful: {successful}/{len(datasets)}")
    print(f"✗ Failed: {len(datasets) - successful}/{len(datasets)}")
    
    if successful < len(datasets):
        print("\nFailed datasets:")
        for dataset, success in results.items():
            if not success:
                print(f"  - {dataset}")
    
    return results


def run_evaluation(
    results_dir: str,
    data_path: str,
    datasets: List[str],
    output_csv: Optional[str] = None
) -> pd.DataFrame:
    """
    Run evaluation for all models and datasets
    
    Args:
        results_dir: Directory containing model results
        data_path: Path to ground truth data
        datasets: List of dataset names to evaluate
        output_csv: Path to output CSV file
        
    Returns:
        pd.DataFrame: Evaluation results
    """
    print(f"\n{'#'*80}")
    print(f"# Running Evaluation")
    print(f"# Results Directory: {results_dir}")
    print(f"# Data Path: {data_path}")
    print(f"{'#'*80}\n")
    
    cmd = [
        'python', 'get_scores.py',
        '--results-dir', results_dir,
        '--data-path', data_path,
        '--datasets'
    ] + datasets
    
    if output_csv:
        cmd.extend(['--output-csv', output_csv])
    
    try:
        result = subprocess.run(cmd, check=True, capture_output=False, text=True)
        print(f"\n✓ Evaluation completed successfully")
        
        # Load and return the results
        if output_csv:
            csv_path = output_csv
        else:
            csv_path = os.path.join(results_dir, 'scores.csv')
        
        if os.path.exists(csv_path):
            df = pd.read_csv(csv_path)
            return df
        else:
            print(f"Warning: Output CSV not found at {csv_path}")
            return pd.DataFrame()
            
    except subprocess.CalledProcessError as e:
        print(f"✗ Error running evaluation: {e}")
        return pd.DataFrame()
    except Exception as e:
        print(f"✗ Unexpected error during evaluation: {e}")
        return pd.DataFrame()

def run_full_pipeline(
    model_id: str,
    data_folder: str,
    output_folder: str,
    datasets: List[str] = ALL_DATASETS,
    device: str = 'cuda',
    num_ctx: int = 5,
    max_tokens: int = 64,
    expected_samples: int = 500,
    limit: Optional[int] = None,
    skip_inference: bool = False,
    skip_evaluation: bool = False,
    output_csv: Optional[str] = None
) -> pd.DataFrame:
    """
    Run the complete pipeline: inference + evaluation
    
    Args:
        model_id: Model identifier or path
        data_folder: Path to data folder
        output_folder: Path to output folder
        datasets: List of dataset names
        device: Device to run on (cuda/cpu)
        num_ctx: Number of contexts
        max_tokens: Maximum number of tokens to generate
        expected_samples: Expected number of samples
        limit: Limit number of samples to process
        skip_inference: Skip inference step
        skip_evaluation: Skip evaluation step
        output_csv: Path to output CSV file
        
    Returns:
        pd.DataFrame: Evaluation results
    """
    print(f"\n{'#'*80}")
    print(f"# ChatRAG-Hi Full Evaluation Pipeline")
    print(f"{'#'*80}\n")
    print(f"Model: {model_id}")
    print(f"Datasets: {', '.join(datasets)}")
    print(f"Device: {device}")
    print(f"Skip Inference: {skip_inference}")
    print(f"Skip Evaluation: {skip_evaluation}")
    
    # Step 1: Run inference
    if not skip_inference:
        inference_results = run_inference_for_all_datasets(
            model_id=model_id,
            datasets=datasets,
            data_folder=data_folder,
            output_folder=output_folder,
            device=device,
            num_ctx=num_ctx,
            max_tokens=max_tokens,
            expected_samples=expected_samples,
            limit=limit
        )
    else:
        print("\n⊘ Skipping inference step")
    
    # Step 2: Run evaluation
    if not skip_evaluation:
        eval_results = run_evaluation(
            results_dir=output_folder,
            data_path=data_folder,
            datasets=datasets,
            output_csv=output_csv
        )
        return eval_results
    else:
        print("\n⊘ Skipping evaluation step")
        return pd.DataFrame()

def get_args():
    """Parse command line arguments"""
    parser = argparse.ArgumentParser(
        description="Comprehensive wrapper for ChatRAG-Hi inference and evaluation"
    )
    
    # Pipeline control
    parser.add_argument('--mode', type=str, choices=['inference', 'evaluation', 'full'],
                        default='full',
                        help='Pipeline mode: inference only, evaluation only, or full pipeline')
    
    # Model configuration
    parser.add_argument('--model-id', type=str, required=True,
                        help='Model identifier or path')
    
    # Data paths
    parser.add_argument('--data-folder', type=str, required=True,
                        help='Path to data folder containing ground truth JSON files')
    parser.add_argument('--output-folder', type=str, required=True,
                        help='Path to output folder for predictions and scores')
    
    # Dataset selection
    parser.add_argument('--datasets', type=str, nargs='+',
                        default=ALL_DATASETS,
                        help='List of datasets to process')
    parser.add_argument('--all-datasets', action='store_true',
                        help='Process all available datasets')
    
    # Inference parameters
    parser.add_argument('--device', type=str, default='cuda',
                        help='Device to run on: cpu or cuda')
    parser.add_argument('--num-ctx', type=int, default=5,
                        help='Number of contexts')
    parser.add_argument('--max-tokens', type=int, default=64,
                        help='Maximum number of tokens to generate')
    parser.add_argument('--expected-samples', type=int, default=500,
                        help='Expected number of samples per dataset')
    parser.add_argument('--limit', type=int, default=None,
                        help='Limit number of samples to process (for testing)')
    
    # Output options
    parser.add_argument('--output-csv', type=str, default=None,
                        help='Path to output CSV file for scores')
    
    args = parser.parse_args()
    
    # Use all datasets if specified
    if args.all_datasets:
        args.datasets = ALL_DATASETS
    
    return args


def main():
    """Main entry point"""
    args = get_args()
    
    # Create output directory if it doesn't exist
    os.makedirs(args.output_folder, exist_ok=True)
    
    # Determine what to skip based on mode
    skip_inference = (args.mode == 'evaluation')
    skip_evaluation = (args.mode == 'inference')
    
    # Run the pipeline
    results = run_full_pipeline(
        model_id=args.model_id,
        data_folder=args.data_folder,
        output_folder=args.output_folder,
        datasets=args.datasets,
        device=args.device,
        num_ctx=args.num_ctx,
        max_tokens=args.max_tokens,
        expected_samples=args.expected_samples,
        limit=args.limit,
        skip_inference=skip_inference,
        skip_evaluation=skip_evaluation,
        output_csv=args.output_csv
    )
    
    if not results.empty and args.mode != 'inference':
        print(f"\n{'='*80}")
        print("Final Evaluation Results:")
        print(f"{'='*80}\n")
        print(results.to_string(index=False))
        print(f"\n{'='*80}\n")


if __name__ == "__main__":
    main()