| #!/usr/bin/env python3 | |
| """ | |
| Test the clean model | |
| """ | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| def test_clean_model(): | |
| print("🧪 Testing CLEAN model locally...") | |
| tokenizer = AutoTokenizer.from_pretrained('.') | |
| model = AutoModelForSequenceClassification.from_pretrained('.') | |
| model.eval() | |
| text = "I am extremely happy and joyful!" | |
| inputs = tokenizer(text, return_tensors='pt', truncation=True, max_length=256) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| probabilities = torch.sigmoid(logits).squeeze(0) | |
| print(f"Text: {text}") | |
| print(f"Max probability: {probabilities.max().item():.4f}") | |
| print(f"Joy probability: {probabilities[17].item():.4f}") | |
| if probabilities.max().item() > 0.8: | |
| print("✅ CLEAN MODEL WORKS PERFECTLY!") | |
| return True | |
| else: | |
| print("❌ Clean model still has issues") | |
| return False | |
| if __name__ == "__main__": | |
| test_clean_model() | |