File size: 10,536 Bytes
49a3557 23208fa 49a3557 23208fa 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 436c84d 49a3557 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
#!/usr/bin/env python3
"""
Decrypt MMSearch-Plus dataset after loading from HuggingFace Hub.
This module provides two main functions:
1. decrypt_dataset(): Decrypt an already-loaded Dataset object
2. decrypt_mmsearch_plus(): Load from path and decrypt in one step
Example usage with loaded dataset:
from datasets import load_dataset
from decrypt_after_load import decrypt_dataset
# Load encrypted dataset
encrypted_ds = load_dataset("Cie1/MMSearch-Plus", split='train')
# Decrypt it
decrypted_ds = decrypt_dataset(encrypted_ds, canary="MMSearch-Plus")
Example usage with path:
from decrypt_after_load import decrypt_mmsearch_plus
# Load and decrypt in one step
decrypted_ds = decrypt_mmsearch_plus(
dataset_path="Cie1/MMSearch-Plus",
canary="MMSearch-Plus"
)
"""
import base64
import hashlib
import argparse
import io
from pathlib import Path
from datasets import load_dataset, load_from_disk, Dataset
from PIL import Image
from typing import Dict, Any
import os
import multiprocessing
def derive_key(password: str, length: int) -> bytes:
"""Derive encryption key from password using SHA-256."""
hasher = hashlib.sha256()
hasher.update(password.encode())
key = hasher.digest()
return key * (length // len(key)) + key[: length % len(key)]
def decrypt_image(ciphertext_b64: str, password: str) -> Image.Image:
"""Decrypt base64-encoded encrypted image bytes back to PIL Image."""
if not ciphertext_b64:
return None
try:
encrypted = base64.b64decode(ciphertext_b64)
key = derive_key(password, len(encrypted))
decrypted = bytes([a ^ b for a, b in zip(encrypted, key)])
# Convert bytes back to PIL Image
img_buffer = io.BytesIO(decrypted)
image = Image.open(img_buffer)
return image
except Exception as e:
print(f"[Warning] Image decryption failed: {e}")
return None
def decrypt_text(ciphertext_b64: str, password: str) -> str:
"""Decrypt base64-encoded ciphertext using XOR cipher with derived key."""
if not ciphertext_b64:
return ciphertext_b64
try:
encrypted = base64.b64decode(ciphertext_b64)
key = derive_key(password, len(encrypted))
decrypted = bytes([a ^ b for a, b in zip(encrypted, key)])
return decrypted.decode('utf-8')
except Exception as e:
print(f"[Warning] Decryption failed: {e}")
return ciphertext_b64 # Return original if decryption fails
def decrypt_sample(sample: Dict[str, Any], canary: str) -> Dict[str, Any]:
"""Decrypt text and image fields in a single sample using the provided canary password."""
decrypted_sample = sample.copy()
# Decrypt text fields (must match what was encrypted)
text_fields = ['question', 'video_url', 'arxiv_id']
for field in text_fields:
if field in sample and sample[field]:
decrypted_sample[field] = decrypt_text(sample[field], canary)
# Handle answer field (list of strings)
if 'answer' in sample and sample['answer']:
decrypted_answers = []
for answer in sample['answer']:
if answer:
decrypted_answers.append(decrypt_text(answer, canary))
else:
decrypted_answers.append(answer)
decrypted_sample['answer'] = decrypted_answers
# Images are NOT encrypted in the current version, so no image decryption needed
# If your dataset has encrypted images (base64 strings), uncomment below:
# image_fields = ['img_1', 'img_2', 'img_3', 'img_4', 'img_5']
# for field in image_fields:
# if field in sample and sample[field] is not None and isinstance(sample[field], str):
# decrypted_sample[field] = decrypt_image(sample[field], canary)
return decrypted_sample
def decrypt_dataset(
encrypted_dataset: Dataset,
canary: str,
output_path: str = None,
num_proc: int = None,
batch_size: int = 1000,
) -> Dataset:
"""
Decrypt an already-loaded dataset object.
Args:
encrypted_dataset: Already loaded Dataset object to decrypt
canary: Canary string used for encryption
output_path: Path to save decrypted dataset (optional)
num_proc: Number of processes for parallel decryption (defaults to CPU count)
batch_size: Batch size for Dataset.map
"""
if not isinstance(encrypted_dataset, Dataset):
raise TypeError(f"Expected Dataset object, got {type(encrypted_dataset)}")
if num_proc is None:
# Leave 1 core free so your machine stays responsive
cpu_count = multiprocessing.cpu_count()
num_proc = max(1, cpu_count - 1)
print(f"π Dataset contains {len(encrypted_dataset)} samples")
print(f"π§ Features: {list(encrypted_dataset.features.keys())}")
print(f"π Using canary string: {canary}")
print(f"π§΅ Using {num_proc} processes, batch_size={batch_size}")
# Vectorized batch decryption (column-wise)
def decrypt_batch(batch):
decrypted_batch = dict(batch) # shallow copy
text_fields = ['question', 'video_url', 'arxiv_id']
for field in text_fields:
if field in batch:
decrypted_batch[field] = [
decrypt_text(x, canary) if x else x
for x in batch[field]
]
# answer: list[list[str]]
if 'answer' in batch:
decrypted_answers = []
for answers in batch['answer']:
if answers:
decrypted_answers.append([
decrypt_text(a, canary) if a else a
for a in answers
])
else:
decrypted_answers.append(answers)
decrypted_batch['answer'] = decrypted_answers
# Images are kept as-is (not encrypted)
return decrypted_batch
print("π Decrypting dataset with multiprocessing...")
decrypted_dataset = encrypted_dataset.map(
decrypt_batch,
batched=True,
batch_size=batch_size,
num_proc=num_proc,
desc="Decrypting samples",
)
print("β
Decryption completed!")
print(f"π Decrypted {len(decrypted_dataset)} samples")
print("π Text fields decrypted: question, answer, video_url, arxiv_id")
print("πΌοΈ Images: kept as-is (not encrypted in current version)")
print("π Metadata preserved: category, difficulty, subtask, etc.")
if output_path:
print(f"πΎ Saving decrypted dataset to: {output_path}")
decrypted_dataset.save_to_disk(output_path)
print("β
Saved successfully!")
return decrypted_dataset
def decrypt_mmsearch_plus(
dataset_path: str,
canary: str,
output_path: str = None,
from_hub: bool = False,
num_proc: int = None,
batch_size: int = 1000,
):
"""
Load and decrypt the MMSearch-Plus dataset.
Args:
dataset_path: Path to local dataset or HuggingFace Hub repo ID
canary: Canary string used for encryption
output_path: Path to save decrypted dataset (optional)
from_hub: Whether to load from HuggingFace Hub (default: auto-detect)
num_proc: Number of processes for parallel decryption
batch_size: Batch size for Dataset.map
"""
# Auto-detect if loading from hub (contains "/" and doesn't exist locally)
if not from_hub:
from_hub = "/" in dataset_path and not Path(dataset_path).exists()
# Load the encrypted dataset
if from_hub:
print(f"π Loading encrypted dataset from HuggingFace Hub: {dataset_path}")
encrypted_dataset = load_dataset(dataset_path, split='train')
else:
print(f"π Loading encrypted dataset from local path: {dataset_path}")
if not Path(dataset_path).exists():
raise ValueError(f"Dataset path does not exist: {dataset_path}")
encrypted_dataset = load_from_disk(dataset_path)
return decrypt_dataset(
encrypted_dataset,
canary,
output_path=output_path,
num_proc=num_proc,
batch_size=batch_size,
)
def main():
parser = argparse.ArgumentParser(
description="Decrypt MMSearch-Plus dataset after loading from HuggingFace Hub or local path.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# From HuggingFace Hub
python decrypt_after_load.py --dataset-path username/mmsearch-plus-encrypted --canary "MMSearch-Plus" --output ./decrypted
# From local directory
python decrypt_after_load.py --dataset-path ./mmsearch_plus_encrypted --canary "MMSearch-Plus" --output ./decrypted
# Using environment variable for canary
export MMSEARCH_PLUS="your-canary-string"
python decrypt_after_load.py --dataset-path username/mmsearch-plus-encrypted --output ./decrypted
"""
)
parser.add_argument(
"--dataset-path",
required=True,
help="Path to encrypted dataset (local directory or HuggingFace Hub repo ID)"
)
parser.add_argument(
"--canary",
help="Canary string used for encryption (or set MMSEARCH_PLUS environment variable)"
)
parser.add_argument(
"--output",
help="Path to save the decrypted dataset (optional, defaults to not saving)"
)
parser.add_argument(
"--from-hub",
action="store_true",
help="Force loading from HuggingFace Hub (auto-detected by default)"
)
args = parser.parse_args()
# Get canary from args or environment variable
canary = args.canary or os.environ.get("MMSEARCH_PLUS")
if not canary:
raise ValueError(
"Canary string is required for decryption. Either provide --canary argument "
"or set the MMSEARCH_PLUS environment variable.\n"
"Example: export MMSEARCH_PLUS='your-canary-string'"
)
# Check if output path exists
if args.output:
output_path = Path(args.output)
if output_path.exists():
response = input(f"Output path {output_path} already exists. Overwrite? (y/N): ")
if response.lower() != 'y':
print("Aborted.")
return
# Decrypt dataset
decrypt_mmsearch_plus(
dataset_path=args.dataset_path,
canary=canary,
output_path=args.output,
from_hub=args.from_hub
)
if __name__ == "__main__":
main()
|