Spaces:
Runtime error
Runtime error
| # π€ FIX VOICE GPU PROCESSING π | |
| echo "π FIXING GPU VOICE PROCESSING..." | |
| # Set CUDA paths | |
| export CUDA_HOME=/usr/local/cuda | |
| export PATH=$CUDA_HOME/bin:$PATH | |
| export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH | |
| # Fix cuDNN path | |
| export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH | |
| # Install ctranslate2 with CUDA support | |
| echo "π¦ Installing CTranslate2 with CUDA..." | |
| pip install --upgrade --force-reinstall ctranslate2 --no-cache-dir | |
| # Install faster-whisper with proper dependencies | |
| echo "π¦ Installing Faster-Whisper for GPU..." | |
| pip install --upgrade --force-reinstall "faster-whisper>=1.0.0" --no-cache-dir | |
| echo "β GPU voice processing fixed!" | |
| echo "" | |
| echo "Now updating golem_flask_server.py for GPU..." | |
| # Fix the ASR initialization in golem_flask_server.py | |
| python3 - <<'EOF' | |
| import fileinput | |
| import sys | |
| import os | |
| script_dir = os.path.dirname(os.path.abspath(__file__)) | |
| file_path = os.path.join(script_dir, "home", "chezy", "golem_flask_server.py") | |
| # Read and fix the file | |
| with open(file_path, 'r') as f: | |
| lines = f.readlines() | |
| # Fix the ASR initialization | |
| for i, line in enumerate(lines): | |
| # Force GPU for CT2 | |
| if 'os.environ.setdefault("CT2_USE_CUDA"' in line: | |
| lines[i] = ' os.environ.setdefault("CT2_USE_CUDA", "1") # FORCE GPU\n' | |
| print(f"β Fixed line {i+1}: Forced GPU for CT2") | |
| # Use int8_float16 for RTX 3050 | |
| elif '"FASTER_WHISPER_COMPUTE_TYPE"' in line and 'getenv' in line: | |
| lines[i] = ' compute_type = os.getenv("FASTER_WHISPER_COMPUTE_TYPE", "int8_float16") # RTX 3050 optimized\n' | |
| print(f"β Fixed line {i+1}: Set compute type for RTX 3050") | |
| # Force device to cuda in WhisperModel | |
| elif '_faster_whisper_model = WhisperModel(' in line: | |
| # Check if device parameter exists | |
| if 'device=' not in lines[i]: | |
| lines[i] = lines[i].rstrip()[:-1] + ', device="cuda")\n' | |
| print(f"β Fixed line {i+1}: Added device='cuda' to WhisperModel") | |
| # Write back | |
| with open(file_path, 'w') as f: | |
| f.writelines(lines) | |
| print("β golem_flask_server.py updated for GPU!") | |
| EOF | |
| echo "" | |
| echo "π― TESTING GPU VOICE..." | |
| python3 - <<'EOF' | |
| import os | |
| os.environ["CT2_USE_CUDA"] = "1" | |
| os.environ["CUDA_VISIBLE_DEVICES"] = "0" | |
| try: | |
| from faster_whisper import WhisperModel | |
| print("β Faster-Whisper imported successfully!") | |
| # Try to load model on GPU with int8_float16 | |
| model = WhisperModel( | |
| "Systran/faster-distil-whisper-large-v3", | |
| device="cuda", | |
| compute_type="int8_float16" | |
| ) | |
| print("β Whisper model loaded on GPU!") | |
| print("π GPU VOICE PROCESSING READY!") | |
| except Exception as e: | |
| print(f"β Error: {e}") | |
| print("\nTrying fallback to float16...") | |
| try: | |
| model = WhisperModel( | |
| "Systran/faster-distil-whisper-large-v3", | |
| device="cuda", | |
| compute_type="float16" | |
| ) | |
| print("β Whisper model loaded on GPU with float16!") | |
| except Exception as e2: | |
| print(f"β Float16 also failed: {e2}") | |
| EOF | |
| echo "" | |
| echo "β DONE! Restart the server with:" | |
| SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" | |
| ROOT_DIR="$(dirname "$SCRIPT_DIR")" | |
| echo " cd $ROOT_DIR && ./start_consciousness_ecosystem.sh" | |