AbstractPhil commited on
Commit
bf7f823
Β·
verified Β·
1 Parent(s): c1383cb

Create preparer.py

Browse files
Files changed (1) hide show
  1. preparer.py +511 -0
preparer.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =================================================================================== #
2
+ # ImageNet CLIP Feature Extraction - Download-First Strategy
3
+ # Author:AbstractPhil
4
+ #
5
+ # Description: Should sufficiently handle preparing imagenet from a repo of choice.
6
+ # Formatted for colab - uses userdata to set HF_TOKEN with userdata.get('HF_TOKEN')
7
+ # Should run as-is without hassle, but it's a little time consuming.
8
+ #
9
+ # License: MIT
10
+ # =================================================================================== #
11
+
12
+ import os, json, datetime, time
13
+ from pathlib import Path
14
+ from typing import Dict, List, Union, Optional, Generator
15
+ import torch
16
+ import torch.nn.functional as F
17
+ from datasets import Dataset, DatasetDict, Features, Value, Sequence
18
+ from transformers import CLIPModel
19
+ from huggingface_hub import HfApi, HfFolder, create_repo
20
+ from google.colab import userdata
21
+
22
+ # Set your HF_TOKEN here.
23
+ HF_TOKEN = userdata.get('HF_TOKEN') # set to os.environ or whatever you want to use.
24
+ os.environ["HF_TOKEN"] = HF_TOKEN
25
+
26
+ import torchvision.transforms.functional as TF
27
+ from torch.utils.data import DataLoader
28
+
29
+
30
+ # Configuration for ImageNet-scale processing
31
+ CONFIG = {
32
+ "device": "cuda" if torch.cuda.is_available() else "cpu",
33
+ "batch_size": 256, # A100 can handle much larger batches
34
+ "generator_chunk_size": 5000, # Process and yield in chunks
35
+ "prefetch_factor": 16, # DataLoader prefetch
36
+ "persistent_workers": True, # Keep workers alive
37
+ "num_workers": 2, # Parallel data loading
38
+
39
+ "image_size": 224,
40
+ "vector_dim": 768,
41
+ "normalize_on_gpu": True,
42
+ "clip_mean": (0.48145466, 0.4578275, 0.40821073),
43
+ "clip_std": (0.26862954, 0.26130258, 0.27577711),
44
+
45
+ # Memory management for ImageNet scale
46
+ "max_memory_gb": 64, # Adjust based on available RAM
47
+ "memory_cleanup_interval": 10000, # Clean memory every N images
48
+
49
+ # Output configuration
50
+ "upload_to_hub": False, # set to true if you wish to upload to your repo
51
+ "repo_id": "", #"AbstractPhil/imagenet-clip-features", # change this to your HF repo, you can't upload to mine.
52
+ "generator_version": "2.0.0", # Must be x.y.z format
53
+
54
+ # Download-first strategy (optimized for multiple models)
55
+ "download_first": True, # Download entire dataset before processing
56
+ "cache_dir": "./imagenet_cache", # Where to cache downloaded data
57
+ "keep_dataset_in_memory": False, # False to save RAM
58
+ }
59
+
60
+ # Extended list of CLIP models to process
61
+ CLIP_MODELS = [
62
+ # OpenAI CLIP models
63
+ #{"repo_id": "openai/clip-vit-base-patch32", "short_name": "clip_vit_b32", "dim": 512},
64
+ # {"repo_id": "openai/clip-vit-base-patch16", "short_name": "clip_vit_b16", "dim": 512},
65
+
66
+ #{"repo_id": "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", "short_name": "clip_vit_laion_b32", "dim": 512},
67
+ #{"repo_id": "openai/clip-vit-large-patch14", "short_name": "clip_vit_l14", "dim": 768},
68
+ #{"repo_id": "openai/clip-vit-large-patch14-336", "short_name": "clip_vit_l14_336", "dim": 768},
69
+
70
+ # LAION CLIP models (if you want to add them)
71
+ {"repo_id": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", "short_name": "clip_vit_laion_h14", "dim": 1024},
72
+ #{"repo_id": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K", "short_name": "clip_vit_laion_g14", "dim": 1024},
73
+ # {"repo_id": "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", "short_name": "clip_vit_laion_bigg14", "dim": 1280},
74
+
75
+ # You can add more models here
76
+ ]
77
+
78
+ TARGET_SPLITS = ["train", "validation", "test"]
79
+
80
+
81
+ class ImageNetClipFeatureExtractor:
82
+ """
83
+ Production-ready CLIP feature extractor optimized for processing multiple models.
84
+ Uses download-first strategy for maximum throughput.
85
+ """
86
+
87
+ def __init__(self, config: dict):
88
+ self.cfg = config
89
+ self.device = torch.device(config["device"])
90
+ self._setup_preprocessing()
91
+ self.hf_token = os.environ.get("HF_TOKEN") or userdata.get('HF_TOKEN')
92
+ self.datasets_cache = {} # Cache loaded datasets
93
+
94
+ def _setup_preprocessing(self):
95
+ self._mean = torch.tensor(self.cfg["clip_mean"]).view(1, 3, 1, 1)
96
+ self._std = torch.tensor(self.cfg["clip_std"]).view(1, 3, 1, 1)
97
+
98
+ def _download_datasets(self):
99
+ """
100
+ Pre-download all datasets once before processing any models.
101
+ This is called once and datasets are reused for all models.
102
+ """
103
+ from datasets import load_dataset
104
+
105
+ print("=" * 60)
106
+ print("πŸ“₯ DOWNLOADING IMAGENET DATASET")
107
+ print("=" * 60)
108
+
109
+ for split in TARGET_SPLITS:
110
+ if split not in self.datasets_cache:
111
+ print(f"\n[⏬] Downloading {split} split to {self.cfg['cache_dir']}...")
112
+ start_time = time.time()
113
+
114
+ dataset = load_dataset(
115
+ "benjamin-paine/imagenet-1k-256x256",
116
+ split=split,
117
+ cache_dir=self.cfg["cache_dir"],
118
+ keep_in_memory=self.cfg["keep_dataset_in_memory"],
119
+ num_proc=None # Disable the progress bar noise
120
+ )
121
+
122
+ download_time = time.time() - start_time
123
+ print(f"[βœ…] Downloaded {len(dataset)} {split} images in {download_time/60:.1f} minutes")
124
+ if download_time > 0:
125
+ print(f"[πŸ“Š] Download speed: {len(dataset)/download_time:.1f} images/sec")
126
+
127
+ self.datasets_cache[split] = dataset
128
+
129
+ print("\n[βœ…] All datasets downloaded and cached!")
130
+ print("=" * 60)
131
+
132
+ def _gpu_preprocess(self, images: torch.Tensor) -> torch.Tensor:
133
+ """Memory-efficient GPU preprocessing."""
134
+ if images.dtype != torch.float32:
135
+ images = images.float()
136
+
137
+ # Handle both 0-1 and 0-255 ranges
138
+ if images.max() > 1.5:
139
+ images = images / 255.0
140
+
141
+ # Resize if needed
142
+ if images.shape[-1] != self.cfg["image_size"]:
143
+ images = F.interpolate(
144
+ images,
145
+ size=(self.cfg["image_size"], self.cfg["image_size"]),
146
+ mode="bilinear",
147
+ align_corners=False
148
+ )
149
+
150
+ # Normalize
151
+ if self.cfg["normalize_on_gpu"]:
152
+ mean = self._mean.to(images.device, dtype=images.dtype)
153
+ std = self._std.to(images.device, dtype=images.dtype)
154
+ images = (images - mean) / std
155
+
156
+ return images
157
+
158
+ def _collate_fn(self, batch):
159
+ """Custom collate function for DataLoader."""
160
+ import hashlib
161
+ images = []
162
+ labels = []
163
+ image_ids = []
164
+
165
+ for item in batch:
166
+ image = item['image']
167
+ if image.mode != 'RGB':
168
+ image = image.convert('RGB')
169
+
170
+ # Convert to tensor [3, H, W]
171
+ image_tensor = TF.to_tensor(image)
172
+
173
+ # Generate SHA256 hash of the image
174
+ image_bytes = image.tobytes()
175
+ sha256_hash = hashlib.sha256(image_bytes).hexdigest()
176
+
177
+ images.append(image_tensor)
178
+ labels.append(item.get('label', -1))
179
+ image_ids.append(sha256_hash)
180
+
181
+ return {
182
+ 'images': torch.stack(images),
183
+ 'labels': labels,
184
+ 'image_ids': image_ids
185
+ }
186
+
187
+ def _imagenet_generator_optimized(self, split: str, model_id: str) -> Generator[Dict, None, None]:
188
+ """
189
+ Optimized generator using pre-downloaded data and DataLoader for parallel loading.
190
+ """
191
+ # Use cached dataset
192
+ dataset = self.datasets_cache[split]
193
+
194
+ # Create DataLoader for efficient parallel loading
195
+ dataloader = DataLoader(
196
+ dataset,
197
+ batch_size=self.cfg["batch_size"],
198
+ shuffle=False, # Keep order for reproducibility
199
+ num_workers=self.cfg["num_workers"],
200
+ prefetch_factor=self.cfg["prefetch_factor"],
201
+ persistent_workers=self.cfg["persistent_workers"],
202
+ collate_fn=self._collate_fn,
203
+ pin_memory=True # Faster GPU transfer
204
+ )
205
+
206
+ # Load CLIP model
207
+ print(f"\n[πŸ€–] Loading {model_id}")
208
+ model = CLIPModel.from_pretrained(model_id).to(self.device)
209
+ model.eval()
210
+
211
+ # Setup for chunked processing
212
+ chunk_buffer = []
213
+ timestamp = datetime.datetime.now(datetime.timezone.utc)
214
+ images_processed = 0
215
+ start_time = time.time()
216
+ last_print_time = start_time
217
+ print_interval = 10 # Print progress every 10 seconds
218
+
219
+ try:
220
+ with torch.no_grad():
221
+ for batch_idx, batch in enumerate(dataloader):
222
+ # Move batch to GPU
223
+ image_batch = batch['images'].to(self.device, non_blocking=True)
224
+ labels = batch['labels']
225
+ image_ids = batch['image_ids']
226
+
227
+ # Preprocess on GPU
228
+ image_batch = self._gpu_preprocess(image_batch)
229
+
230
+ # Extract features
231
+ features = model.get_image_features(pixel_values=image_batch)
232
+ features = features / features.norm(dim=-1, keepdim=True)
233
+
234
+ # Create records
235
+ for img_id, label, feature_vec in zip(image_ids, labels, features):
236
+ chunk_buffer.append({
237
+ "image_id": img_id, # Now using SHA256 hash
238
+ "label": int(label),
239
+ "clip_model": model_id,
240
+ "clip_features": feature_vec.detach().cpu().float().numpy().tolist(),
241
+ "vector_dim": features.shape[-1],
242
+ "timestamp": timestamp,
243
+ })
244
+
245
+ images_processed += len(image_ids)
246
+
247
+ # Print progress at regular time intervals
248
+ current_time = time.time()
249
+ if current_time - last_print_time >= print_interval:
250
+ elapsed = current_time - start_time
251
+ speed = images_processed / elapsed
252
+ eta = (len(dataset) - images_processed) / speed
253
+ print(f"[⚑] Progress: {images_processed}/{len(dataset)} "
254
+ f"({100*images_processed/len(dataset):.1f}%) | "
255
+ f"Speed: {speed:.1f} img/sec | "
256
+ f"ETA: {eta/60:.1f} min")
257
+ last_print_time = current_time
258
+
259
+ # Yield chunk when it reaches configured size
260
+ if len(chunk_buffer) >= self.cfg["generator_chunk_size"]:
261
+ elapsed = time.time() - start_time
262
+ speed = images_processed / elapsed
263
+ print(f"[πŸ“¦] Yielding chunk of {len(chunk_buffer)} features | "
264
+ f"Progress: {images_processed}/{len(dataset)} "
265
+ f"({100*images_processed/len(dataset):.1f}%)")
266
+ yield from chunk_buffer
267
+ chunk_buffer = []
268
+
269
+ # Memory cleanup at configured interval
270
+ if images_processed % self.cfg["memory_cleanup_interval"] == 0:
271
+ torch.cuda.empty_cache()
272
+
273
+ # Yield remaining chunk buffer
274
+ if chunk_buffer:
275
+ print(f"[πŸ“¦] Final chunk of {len(chunk_buffer)} features")
276
+ yield from chunk_buffer
277
+
278
+ # Final stats
279
+ total_time = time.time() - start_time
280
+ print(f"\n[βœ…] Processed {images_processed} images in {total_time/60:.1f} minutes")
281
+ print(f"[πŸ“Š] Average speed: {images_processed/total_time:.1f} images/sec")
282
+
283
+ finally:
284
+ del model
285
+ torch.cuda.empty_cache()
286
+
287
+ def extract_and_upload(self, model_config: dict, split: str = "train"):
288
+ """
289
+ Extract features using optimized generator and upload to HuggingFace.
290
+ Returns the dataset if upload fails for retry purposes.
291
+ """
292
+ model_id = model_config["repo_id"]
293
+ short_name = model_config["short_name"]
294
+
295
+ print("\n" + "=" * 60)
296
+ print(f"βš™οΈ PROCESSING: {short_name} - {split}")
297
+ print("=" * 60)
298
+
299
+ # Define dataset features
300
+ features = Features({
301
+ "image_id": Value("string"),
302
+ "label": Value("int32"),
303
+ "clip_model": Value("string"),
304
+ "clip_features": Sequence(Value("float32")),
305
+ "vector_dim": Value("int32"),
306
+ "timestamp": Value("timestamp[ns]"),
307
+ })
308
+
309
+ # Suppress the "Generating split" progress bar
310
+ import sys
311
+ import io
312
+ old_stderr = sys.stderr
313
+ sys.stderr = io.StringIO()
314
+
315
+ try:
316
+ # Create dataset from generator
317
+ dataset = Dataset.from_generator(
318
+ lambda: self._imagenet_generator_optimized(split, model_id),
319
+ features=features,
320
+ writer_batch_size=self.cfg["generator_chunk_size"],
321
+ split=split
322
+ )
323
+ except Exception as e:
324
+ raise Exception(e)
325
+ #finally:
326
+ # # Restore stderr
327
+ # sys.stderr = old_stderr
328
+ # return
329
+
330
+ # Add metadata
331
+ dataset.info.description = f"CLIP features for ImageNet-1k 256x256 {split} using {model_id}"
332
+ dataset.info.version = self.cfg["generator_version"]
333
+
334
+ # Save to disk before upload (safety backup)
335
+ temp_path = f"./temp_dataset_{short_name}_{split}"
336
+ print(f"[πŸ’Ύ] Saving dataset to {temp_path} for safety...")
337
+ dataset.save_to_disk(temp_path)
338
+
339
+ # Upload to HuggingFace
340
+ split_name = f"{short_name}_{split}"
341
+
342
+ print(f"\n[πŸ“€] Uploading {split_name} to {self.cfg['repo_id']}")
343
+ try:
344
+ dataset.push_to_hub(
345
+ self.cfg["repo_id"],
346
+ split=split_name,
347
+ token=self.hf_token,
348
+ commit_message=f"Add {split_name} CLIP features",
349
+ max_shard_size="500MB"
350
+ )
351
+ print(f"[βœ…] Successfully uploaded {split_name}")
352
+
353
+ # Clean up temp file on success
354
+ import shutil
355
+ shutil.rmtree(temp_path, ignore_errors=True)
356
+ return None
357
+
358
+ except Exception as e:
359
+ print(f"[❌] Upload failed for {split_name}: {e}")
360
+ print(f"[πŸ’‘] Dataset saved at {temp_path} - you can retry upload with:")
361
+ print(f" from datasets import load_from_disk")
362
+ print(f" dataset = load_from_disk('{temp_path}')")
363
+ print(f" dataset.push_to_hub('{self.cfg['repo_id']}', split='{split_name}', ...)")
364
+ return dataset # Return dataset for potential retry
365
+
366
+ def extract_all_models(self, models_to_process=None):
367
+ """
368
+ Extract features for all models and splits.
369
+
370
+ Args:
371
+ models_to_process: List of model configs to process (default: all)
372
+ """
373
+ # Ensure repo exists
374
+ if self.hf_token:
375
+ try:
376
+ create_repo(self.cfg["repo_id"], repo_type="dataset", exist_ok=True, token=self.hf_token)
377
+ print(f"[βœ…] Repository ready: {self.cfg['repo_id']}")
378
+ except Exception as e:
379
+ print(f"[⚠️] Repo creation warning: {e}")
380
+
381
+ # Download all data first (once for all models)
382
+ self._download_datasets()
383
+
384
+ # Process specified models or all
385
+ models = models_to_process or CLIP_MODELS
386
+ total_combinations = len(models) * 2 # train + validation
387
+
388
+ print("\n" + "=" * 60)
389
+ print(f"πŸ“‹ PROCESSING PLAN: {len(models)} models Γ— 2 splits = {total_combinations} tasks")
390
+ print("=" * 60)
391
+
392
+ # Keep track of failed uploads for retry
393
+ failed_uploads = []
394
+
395
+ for i, model_config in enumerate(models, 1):
396
+ print(f"\n[{i}/{len(models)}] Model: {model_config['short_name']}")
397
+
398
+ for split in TARGET_SPLITS: #"train", "test"]:
399
+ try:
400
+ dataset = self.extract_and_upload(model_config, split)
401
+ if dataset is not None:
402
+ # Upload failed but we have the dataset
403
+ failed_uploads.append({
404
+ 'model': model_config['short_name'],
405
+ 'split': split,
406
+ 'dataset': dataset,
407
+ 'path': f"./temp_dataset_{model_config['short_name']}_{split}"
408
+ })
409
+ except Exception as e:
410
+ print(f"[❌] Failed {model_config['short_name']} {split}: {e}")
411
+ continue
412
+
413
+ # Cleanup between models
414
+ torch.cuda.empty_cache()
415
+
416
+ print("\n" + "=" * 60)
417
+ if failed_uploads:
418
+ print(f"⚠️ PROCESSING COMPLETE WITH {len(failed_uploads)} FAILED UPLOADS")
419
+ print("\nFailed uploads saved to disk:")
420
+ for failure in failed_uploads:
421
+ print(f" - {failure['model']}_{failure['split']}: {failure['path']}")
422
+ print("\nYou can retry these uploads after fixing the issue.")
423
+ else:
424
+ print("πŸŽ‰ ALL PROCESSING COMPLETE!")
425
+ print("=" * 60)
426
+
427
+ return failed_uploads # Return list of failed uploads for retry
428
+
429
+
430
+ # ============================================================
431
+ # Utility Functions
432
+ # ============================================================
433
+
434
+ def estimate_processing_time(num_models=len(CLIP_MODELS)):
435
+ """
436
+ Estimate total processing time for all models.
437
+ """
438
+ print("=" * 60)
439
+ print("⏱️ TIME ESTIMATES")
440
+ print("=" * 60)
441
+
442
+ # Dataset sizes
443
+ train_size = 1_281_167
444
+ val_size = 50_000
445
+ total_images = train_size + val_size
446
+
447
+ # Time estimates
448
+ download_time_min = 60 # minutes
449
+ download_time_max = 120
450
+
451
+ # Processing speeds (images/sec)
452
+ speed_min = 800
453
+ speed_max = 1200
454
+
455
+ print(f"\nπŸ“Š Dataset sizes:")
456
+ print(f" - Train: {train_size:,} images")
457
+ print(f" - Validation: {val_size:,} images")
458
+ print(f" - Total per model: {total_images:,} images")
459
+
460
+ print(f"\n⏬ Download time (one-time):")
461
+ print(f" - Estimated: {download_time_min}-{download_time_max} minutes")
462
+
463
+ print(f"\nπŸš€ Processing speed:")
464
+ print(f" - Expected: {speed_min}-{speed_max} images/sec")
465
+
466
+ # Per model
467
+ time_per_model_min = total_images / speed_max / 60
468
+ time_per_model_max = total_images / speed_min / 60
469
+
470
+ print(f"\n⏱️ Per model:")
471
+ print(f" - Processing time: {time_per_model_min:.1f}-{time_per_model_max:.1f} minutes")
472
+
473
+ # Total
474
+ total_min = download_time_min + (num_models * time_per_model_min)
475
+ total_max = download_time_max + (num_models * time_per_model_max)
476
+
477
+ print(f"\n🎯 Total for {num_models} models:")
478
+ print(f" - Total time: {total_min:.1f}-{total_max:.1f} minutes")
479
+ print(f" - Or: {total_min/60:.1f}-{total_max/60:.1f} hours")
480
+
481
+ print("\nπŸ’‘ Tips:")
482
+ print(" - Processing is GPU-bound, so better GPUs = faster")
483
+ print(" - A100/H100 can use batch_size=1024+ for more speed")
484
+ print(" - Multiple GPUs can process different models in parallel")
485
+ print("=" * 60)
486
+
487
+
488
+ # ============================================================
489
+ # Main Execution
490
+ # ============================================================
491
+ """
492
+ Main execution for multi-model ImageNet CLIP feature extraction.
493
+ """
494
+ # Show time estimates
495
+ estimate_processing_time()
496
+
497
+ # Confirm settings
498
+ print(f"\nπŸ”§ Current configuration:")
499
+ print(f" - Batch size: {CONFIG['batch_size']}")
500
+ print(f" - Chunk size: {CONFIG['generator_chunk_size']}")
501
+ print(f" - Workers: {CONFIG['num_workers']}")
502
+ print(f" - Models to process: {len(CLIP_MODELS)}")
503
+
504
+ # Option to process subset of models
505
+ # For testing, you might want to start with just one:
506
+ # test_models = CLIP_MODELS[:1] # Just first model
507
+ # extractor.extract_all_models(models_to_process=test_models)
508
+
509
+ # Run extraction
510
+ extractor = ImageNetClipFeatureExtractor(CONFIG)
511
+ extractor.extract_all_models() # Process all models