prensofslavs commited on
Commit
fdf2275
·
verified ·
1 Parent(s): caf8be0

Update ui/dream_interface.py

Browse files
Files changed (1) hide show
  1. ui/dream_interface.py +268 -72
ui/dream_interface.py CHANGED
@@ -1,88 +1,284 @@
1
  import os
2
  import sys
3
- import argparse
4
- import gradio as gr
5
  import logging
 
 
 
6
 
7
  # Set up logging
8
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
9
  logger = logging.getLogger(__name__)
10
 
11
- # Add the current directory to the path
12
- current_dir = os.path.dirname(os.path.abspath(__file__))
13
- sys.path.append(current_dir)
14
-
15
- def main():
16
- parser = argparse.ArgumentParser(description="Freudian Dream Analyzer Interface")
17
- parser.add_argument("--share", action="store_true",
18
- help="Create a shareable link")
19
- parser.add_argument("--debug", action="store_true",
20
- help="Run in debug mode")
21
- parser.add_argument("--hf_space", action="store_true",
22
- help="Running in Hugging Face Space")
23
- args = parser.parse_args()
24
-
25
- # Auto-detect if we're in Hugging Face Space
26
- in_hf_space = args.hf_space or os.path.exists("/home/user")
27
- if in_hf_space and not args.hf_space:
28
- logger.info("Detected Hugging Face Space environment")
29
- args.hf_space = True
30
-
31
- # Import the dream interface function
32
- try:
33
- from ui.dream_interface import create_dream_interface
34
- logger.info("Successfully imported create_dream_interface")
35
- except ImportError as e:
36
- logger.error(f"Error importing create_dream_interface: {e}")
37
- # Try to add the ui directory to the path
38
- ui_dir = os.path.join(current_dir, "ui")
39
- if os.path.exists(ui_dir):
40
- sys.path.append(ui_dir)
41
- try:
42
- from dream_interface import create_dream_interface
43
- logger.info("Successfully imported create_dream_interface from ui directory")
44
- except ImportError as e2:
45
- logger.error(f"Error importing create_dream_interface from ui directory: {e2}")
46
- raise
47
- else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  raise
49
-
50
- # Use a dummy path - our new interface ignores it and uses a CPU-compatible model
51
- model_path = "dummy_path"
52
- logger.info(f"Using dummy model path for CPU-compatible model")
53
 
54
- # Create the interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  try:
56
- logger.info("Creating interface...")
57
- interface = create_dream_interface(model_path)
58
- logger.info("Interface created successfully")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- # Launch with appropriate settings
61
- if args.debug:
62
- logger.info("Launching in debug mode...")
63
- interface.launch(
64
- server_name="0.0.0.0",
65
- server_port=7860,
66
- share=args.share,
67
- debug=True,
68
- show_error=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  )
70
- else:
71
- logger.info("Launching in normal mode...")
72
- interface.launch(
73
- server_name="0.0.0.0",
74
- server_port=7860,
75
- share=args.share,
76
- show_error=True
 
 
 
 
 
77
  )
 
 
 
 
 
 
 
 
78
  except Exception as e:
79
- logger.error(f"Error creating or launching interface: {e}")
80
- raise
81
-
82
- if __name__ == "__main__":
83
- try:
84
- logger.info("Starting Freudian Dream Analyzer application")
85
- main()
86
- except Exception as e:
87
- logger.error(f"Application crashed: {e}")
88
  raise
 
1
  import os
2
  import sys
3
+ import json
4
+ import torch
5
  import logging
6
+ import gradio as gr
7
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
8
+ from datetime import datetime
9
 
10
  # Set up logging
11
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
12
  logger = logging.getLogger(__name__)
13
 
14
+ class DreamAnalyzer:
15
+ def __init__(self, model_path):
16
+ self.model_path = model_path
17
+ self.pipe, self.model_format = self._load_model()
18
+
19
+ def _load_model(self):
20
+ logger.info(f"Loading model from {self.model_path}...")
21
+
22
+ # Check if this is a local path or HF Hub model
23
+ is_local_path = os.path.exists(self.model_path) if not self.model_path.startswith("http") else False
24
+ model_format = "llama" # Default format
25
+
26
+ try:
27
+ # Check if we're in a CPU-only environment
28
+ cuda_available = torch.cuda.is_available()
29
+ logger.info(f"CUDA available: {cuda_available}")
30
+
31
+ # Use a pre-defined model from Hugging Face Hub that can run on CPU
32
+ logger.info("Loading CPU-compatible model from Hugging Face Hub...")
33
+ # Use a smaller model like Phi-2 that can run on CPU
34
+ model_id = "microsoft/phi-2"
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
37
+ model = AutoModelForCausalLM.from_pretrained(
38
+ model_id,
39
+ torch_dtype=torch.float32, # Use float32 for CPU
40
+ device_map="auto"
41
+ )
42
+
43
+ logger.info(f"Special tokens: {tokenizer.special_tokens_map}")
44
+ model_format = "generic" # Default format for most models
45
+
46
+ # Create text generation pipeline
47
+ text_generation_pipeline = pipeline(
48
+ "text-generation",
49
+ model=model,
50
+ tokenizer=tokenizer,
51
+ max_new_tokens=512, # Reduced for faster inference
52
+ do_sample=True,
53
+ temperature=0.7,
54
+ top_p=0.9,
55
+ repetition_penalty=1.2,
56
+ )
57
+ logger.info("Text generation pipeline created successfully")
58
+
59
+ except Exception as e:
60
+ logger.error(f"Error loading model: {e}")
61
+ # Print more detailed debug information
62
+ logger.error(f"Directory contents of model path:")
63
+ if is_local_path:
64
+ try:
65
+ logger.error(os.listdir(self.model_path))
66
+ except Exception as dir_e:
67
+ logger.error(f"Could not list directory: {dir_e}")
68
  raise
69
+
70
+ return text_generation_pipeline, model_format
 
 
71
 
72
+ def analyze_dream(self, dream_text):
73
+ if not dream_text or not dream_text.strip():
74
+ logger.warning("Empty dream text submitted")
75
+ return "Please enter a dream to analyze."
76
+
77
+ logger.info(f"Analyzing dream: {dream_text[:100]}...") # Log just the first 100 chars
78
+
79
+ try:
80
+ # Create a prompt based on the model format
81
+ prompt = f"You are a Freudian dream analyst. Analyze this dream from a Freudian perspective:\n\n{dream_text}\n\nProvide an analysis that includes manifest content, latent content, and psychological symbolism."
82
+
83
+ logger.info("Submitting prompt to model...")
84
+ try:
85
+ # Generate the analysis
86
+ result = self.pipe(prompt, return_full_text=False, max_new_tokens=512)
87
+ logger.info("Model returned a response")
88
+
89
+ if not result or len(result) == 0:
90
+ logger.error("Empty result from model")
91
+ return "<h3>Analysis Error</h3><p>The model returned an empty response. Please try again with a different dream description.</p>"
92
+
93
+ analysis_text = result[0]["generated_text"]
94
+ logger.info(f"Analysis text length: {len(analysis_text)}")
95
+ logger.info(f"First 100 chars of analysis: {analysis_text[:100]}")
96
+
97
+ formatted_analysis = self._format_analysis(analysis_text)
98
+ logger.info("Successfully formatted the analysis")
99
+ return formatted_analysis
100
+
101
+ except Exception as e:
102
+ logger.error(f"Error during model inference: {e}")
103
+ return f"<h3>Analysis Error</h3><p>Error during dream analysis: {str(e)}</p><p>Please try again with a shorter dream description or contact support.</p>"
104
+
105
+ except Exception as e:
106
+ logger.error(f"Error analyzing dream: {e}")
107
+ return f"<h3>Error Analyzing Dream</h3><p>There was a problem analyzing your dream: {str(e)}</p><p>Please try again or contact support if the issue persists.</p>"
108
+
109
+ def _format_analysis(self, analysis_text):
110
+ try:
111
+ logger.info("Formatting analysis...")
112
+
113
+ # Attempt to structure the analysis with headings
114
+ sections = [
115
+ ("Dream Analysis", ""),
116
+ ("Manifest Content", ""),
117
+ ("Potential Freudian Symbols", ""),
118
+ ("Latent Content Analysis", ""),
119
+ ("Possible Psychological Mechanisms", "")
120
+ ]
121
+
122
+ # Start with a formatted version that includes headings
123
+ formatted = "<h3>Freudian Dream Analysis</h3>"
124
+ formatted += f"<p>{analysis_text}</p>"
125
+
126
+ # If after all formatting we have no content, provide a default message
127
+ if not formatted.strip():
128
+ logger.warning("Empty formatted analysis")
129
+ return "<h3>Analysis</h3><p>The model did not provide a clear analysis. Please try again with a more detailed dream description.</p>"
130
+
131
+ logger.info("Successfully formatted analysis")
132
+ return formatted
133
+ except Exception as e:
134
+ logger.error(f"Error in _format_analysis: {e}")
135
+ # Return the original text if formatting fails
136
+ return f"<h3>Dream Analysis</h3><p>{analysis_text}</p>"
137
+
138
+ def record_feedback(self, dream_text, analysis, rating, improvement_suggestion, feedback_dir="feedback_data"):
139
+ try:
140
+ os.makedirs(feedback_dir, exist_ok=True)
141
+
142
+ feedback = {
143
+ "timestamp": datetime.now().isoformat(),
144
+ "dream_text": dream_text,
145
+ "model_analysis": analysis,
146
+ "rating": rating,
147
+ "improvement_suggestion": improvement_suggestion
148
+ }
149
+
150
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
151
+ feedback_file = os.path.join(feedback_dir, f"feedback_{timestamp}.json")
152
+
153
+ with open(feedback_file, "w") as f:
154
+ json.dump(feedback, f, indent=2)
155
+
156
+ master_file = os.path.join(feedback_dir, "all_feedback.json")
157
+
158
+ if os.path.exists(master_file):
159
+ with open(master_file, "r") as f:
160
+ try:
161
+ all_feedback = json.load(f)
162
+ except json.JSONDecodeError:
163
+ all_feedback = []
164
+ else:
165
+ all_feedback = []
166
+
167
+ all_feedback.append(feedback)
168
+
169
+ with open(master_file, "w") as f:
170
+ json.dump(all_feedback, f, indent=2)
171
+
172
+ return "Thank you for your feedback! It will help improve future versions of the dream analyzer."
173
+ except Exception as e:
174
+ logger.error(f"Error recording feedback: {e}")
175
+ return f"Error recording feedback: {str(e)}"
176
+
177
+ # This is the function that needs to be exported
178
+ def create_dream_interface(model_path):
179
  try:
180
+ logger.info(f"Creating dream interface with model path: {model_path}")
181
+ analyzer = DreamAnalyzer(model_path)
182
+
183
+ def analyze_dream_fn(dream_text):
184
+ logger.info(f"Analyze button clicked with dream text: {dream_text[:50]}...")
185
+ try:
186
+ result = analyzer.analyze_dream(dream_text)
187
+ logger.info("Analysis complete, returning result")
188
+ return result
189
+ except Exception as e:
190
+ logger.error(f"Error in analyze_dream_fn: {e}")
191
+ return f"<h3>Error</h3><p>An error occurred while analyzing your dream: {str(e)}</p>"
192
+
193
+ def submit_feedback(dream_text, analysis, rating, suggestion):
194
+ try:
195
+ return analyzer.record_feedback(dream_text, analysis, rating, suggestion)
196
+ except Exception as e:
197
+ logger.error(f"Error in submit_feedback: {e}")
198
+ return f"Error submitting feedback: {str(e)}"
199
 
200
+ with gr.Blocks(title="Freudian Dream Analyzer", theme=gr.themes.Soft()) as interface:
201
+ gr.Markdown("# Freudian Dream Analyzer")
202
+ gr.Markdown("""
203
+ This tool analyzes dreams from a Freudian psychoanalytic perspective.
204
+ Enter the description of a dream, and the model will provide an interpretation
205
+ based on Freudian theory of manifest content, latent content, and symbolic meanings.
206
+
207
+ *Note: Dream analysis is subjective and this tool provides one possible interpretation
208
+ based on Freudian psychoanalysis.*
209
+ """)
210
+
211
+ # Input section
212
+ with gr.Row():
213
+ with gr.Column(scale=2):
214
+ dream_input = gr.Textbox(
215
+ lines=10,
216
+ placeholder="Enter your dream here...",
217
+ label="Dream Description"
218
+ )
219
+ analyze_btn = gr.Button("Analyze Dream", variant="primary")
220
+
221
+ # Output section
222
+ with gr.Row():
223
+ with gr.Column(scale=2):
224
+ analysis_output = gr.HTML(
225
+ label="Freudian Analysis",
226
+ elem_id="analysis_output"
227
+ )
228
+
229
+ # Feedback section
230
+ with gr.Row():
231
+ with gr.Column():
232
+ gr.Markdown("### Help improve the Dream Analyzer")
233
+ feedback_rating = gr.Slider(
234
+ minimum=1,
235
+ maximum=5,
236
+ step=1,
237
+ value=3,
238
+ label="Rate the quality of this analysis (1-5)"
239
+ )
240
+ feedback_text = gr.Textbox(
241
+ lines=3,
242
+ placeholder="Suggestions for improvement (optional)",
243
+ label="Feedback"
244
+ )
245
+ feedback_btn = gr.Button("Submit Feedback")
246
+ feedback_output = gr.Textbox(label="")
247
+
248
+ # Add a simple example for testing
249
+ example_dreams = [
250
+ ["I was flying over a vast ocean, feeling both exhilarated and afraid. Suddenly I started falling, but before hitting the water I woke up."],
251
+ ["I was in my childhood home, but the rooms were bigger and had doors I had never seen before. I opened one door and found a room full of old toys."],
252
+ ["I was being chased through a dark forest by a figure I couldn't see clearly. I kept running but my legs felt heavy and I couldn't move fast enough."],
253
+ ["I was taking an important exam but realized I hadn't studied at all. The questions made no sense and the clock was ticking loudly. Everyone else seemed to know the answers."],
254
+ ["I was at a party with friends when I suddenly realized I wasn't wearing any clothes. Everyone was talking normally as if nothing was wrong, but I felt deeply embarrassed."]
255
+ ]
256
+
257
+ gr.Examples(
258
+ examples=example_dreams,
259
+ inputs=dream_input
260
  )
261
+
262
+ # Set up event handlers with progress indicator
263
+ analyze_btn.click(
264
+ fn=lambda x: "Analyzing dream... please wait.",
265
+ inputs=dream_input,
266
+ outputs=analysis_output,
267
+ queue=False # Run immediately without queuing
268
+ ).then(
269
+ fn=analyze_dream_fn,
270
+ inputs=dream_input,
271
+ outputs=analysis_output,
272
+ queue=True # Use queue for the actual analysis
273
  )
274
+
275
+ feedback_btn.click(
276
+ fn=submit_feedback,
277
+ inputs=[dream_input, analysis_output, feedback_rating, feedback_text],
278
+ outputs=feedback_output
279
+ )
280
+
281
+ return interface
282
  except Exception as e:
283
+ logger.error(f"Error creating dream interface: {e}")
 
 
 
 
 
 
 
 
284
  raise