Spaces:
Runtime error
Runtime error
| "use client"; | |
| import { Button } from '@/components/ui/button'; | |
| import { ScrollArea } from '@/components/ui/scroll-area'; | |
| import { Textarea } from '@/components/ui/textarea'; | |
| import { Bot, Paperclip, SendHorizonal, MessageSquarePlus } from 'lucide-react'; | |
| import React, { useEffect, useRef, useState } from 'react'; | |
| import { ChatMessage, LoadingMessage, ImageLoadingMessage } from './chat-message'; | |
| import { useIsMobile } from '@/hooks/use-mobile'; | |
| import type { Message } from '@/app/page'; | |
| type ChatPanelProps = { | |
| messages: Message[]; | |
| onSendMessage: (input: string, temperature: number, file: File | null) => Promise<void>; | |
| isLoading: boolean; | |
| isChatSelected: boolean; | |
| onNewChat: () => void; | |
| onConsciousnessDimensionSelect: (dimension: string) => void; | |
| selectedConsciousnessDimension: string; | |
| temperature: number; | |
| isWelcomeMode?: boolean; | |
| consciousnessColor?: string; | |
| onImageGeneratingChange?: (isGenerating: boolean) => void; | |
| isImageGenerating?: boolean; | |
| onOpenEditor?: (b64: string, prompt?: string) => void; | |
| imageProgress?: number; | |
| imageElapsed?: number; | |
| imageStatus?: string; | |
| }; | |
| export function ChatPanel({ | |
| messages, | |
| onSendMessage, | |
| isLoading, | |
| isChatSelected, | |
| onNewChat, | |
| onConsciousnessDimensionSelect, | |
| selectedConsciousnessDimension, | |
| temperature, | |
| isWelcomeMode = false, | |
| consciousnessColor = 'text-blue-600', | |
| onImageGeneratingChange, | |
| isImageGenerating, | |
| onOpenEditor, | |
| imageProgress = 0, | |
| imageElapsed = 0, | |
| imageStatus = 'starting', | |
| }: ChatPanelProps) { | |
| const isMobile = useIsMobile(); | |
| const [input, setInput] = useState(''); | |
| const [file, setFile] = useState<File | null>(null); | |
| const [imageMode, setImageMode] = useState<boolean>(false); | |
| const [safeMode, setSafeMode] = useState<boolean>(true); | |
| const scrollAreaRef = useRef<HTMLDivElement>(null); | |
| const messagesEndRef = useRef<HTMLDivElement>(null); | |
| const textareaRef = useRef<HTMLTextAreaElement>(null); | |
| const fileInputRef = useRef<HTMLInputElement>(null); | |
| // Compute backend/proxy base. Always use same-origin Next API proxy in the browser | |
| // to avoid CORS/mixed-origin issues (e.g., when dev host is 127.0.0.2). | |
| // Fallback to direct backend only during SSR where window is not available. | |
| function getApiUrl(path: string): string { | |
| const p = path.startsWith('/api') ? path : `/api${path}`; | |
| if (typeof window !== 'undefined') { | |
| return p; | |
| } | |
| const direct = process.env.NEXT_PUBLIC_BACKEND_URL || ''; | |
| return direct ? `${direct}${path}` : p; | |
| } | |
| // Compute backend base with runtime fallback when env is missing | |
| function getBackendBase(): string { | |
| const fromEnv = process.env.NEXT_PUBLIC_BACKEND_URL || ''; | |
| if (fromEnv) return fromEnv; | |
| if (typeof window !== 'undefined') { | |
| if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') { | |
| return 'http://localhost:5000'; | |
| } | |
| } | |
| return ''; | |
| } | |
| // Speech mode state | |
| const [speechMode, setSpeechMode] = useState<boolean>(false); | |
| const [isRecording, setIsRecording] = useState<boolean>(false); | |
| const mediaStreamRef = useRef<MediaStream | null>(null); | |
| const audioCtxRef = useRef<AudioContext | null>(null); | |
| const sourceRef = useRef<MediaStreamAudioSourceNode | null>(null); | |
| const processorRef = useRef<ScriptProcessorNode | null>(null); | |
| const analyserRef = useRef<AnalyserNode | null>(null); | |
| const pcmChunksRef = useRef<Float32Array[]>([]); | |
| const mediaRecorderRef = useRef<MediaRecorder | null>(null); | |
| const recChunksRef = useRef<BlobPart[]>([]); | |
| const levelsRef = useRef<number[]>([0,0,0,0,0]); | |
| const [, forceRender] = useState(0); // force rerender for animation | |
| const lastActiveRef = useRef<number>(Date.now()); | |
| const startedAtRef = useRef<number>(Date.now()); | |
| const SILENCE_MS = 1500; | |
| const MIN_CAPTURE_MS = 800; | |
| const SILENCE_THRESHOLD = 0.008; // less aggressive | |
| // Track last-spoken assistant message to avoid repeated TTS | |
| const lastSpokenIdRef = useRef<string | null>(null); | |
| const ttsAudioRef = useRef<HTMLAudioElement | null>(null); | |
| // Auto-scroll to bottom when messages change | |
| useEffect(() => { | |
| const scrollToBottom = () => { | |
| if (messagesEndRef.current) { | |
| messagesEndRef.current.scrollIntoView({ behavior: 'smooth' }); | |
| } | |
| }; | |
| // Scroll immediately | |
| scrollToBottom(); | |
| // Also scroll after a short delay to handle any dynamic content loading | |
| const timeoutId = setTimeout(scrollToBottom, 100); | |
| return () => clearTimeout(timeoutId); | |
| }, [messages, isLoading]); | |
| // Focus textarea when not loading | |
| useEffect(() => { | |
| if (!isLoading && textareaRef.current) { | |
| textareaRef.current.focus(); | |
| } | |
| }, [isLoading]); | |
| // Animated waveform (5 bars) + silence detection | |
| useEffect(() => { | |
| let rafId: number; | |
| const tick = () => { | |
| if (isRecording && analyserRef.current) { | |
| const analyser = analyserRef.current; | |
| const arr = new Uint8Array(analyser.fftSize); | |
| analyser.getByteTimeDomainData(arr); | |
| // RMS amplitude | |
| let sum = 0; | |
| for (let i = 0; i < arr.length; i++) { | |
| const v = (arr[i] - 128) / 128; | |
| sum += v * v; | |
| } | |
| const rms = Math.sqrt(sum / arr.length); | |
| const level = Math.min(1, rms * 4); | |
| const prev = levelsRef.current.slice(); | |
| prev.shift(); | |
| prev.push(level); | |
| levelsRef.current = prev; | |
| forceRender((x) => x + 1); | |
| // Silence detection | |
| if (rms > SILENCE_THRESHOLD) { | |
| lastActiveRef.current = Date.now(); | |
| } else if ( | |
| Date.now() - lastActiveRef.current > SILENCE_MS && | |
| Date.now() - startedAtRef.current > MIN_CAPTURE_MS | |
| ) { | |
| // Auto stop after 1.5s silence | |
| stopRecording(); | |
| } | |
| } | |
| rafId = requestAnimationFrame(tick); | |
| }; | |
| rafId = requestAnimationFrame(tick); | |
| return () => cancelAnimationFrame(rafId); | |
| }, [isRecording]); | |
| const startRecording = async () => { | |
| try { | |
| const stream = await navigator.mediaDevices.getUserMedia({ | |
| audio: { | |
| echoCancellation: true, | |
| noiseSuppression: true, | |
| autoGainControl: true, | |
| channelCount: 1, | |
| } as any, | |
| } as any); | |
| mediaStreamRef.current = stream; | |
| const audioCtx = new (window.AudioContext || (window as any).webkitAudioContext)(); | |
| audioCtxRef.current = audioCtx; | |
| if (audioCtx.state === 'suspended') { | |
| try { await audioCtx.resume(); } catch {} | |
| } | |
| const source = audioCtx.createMediaStreamSource(stream); | |
| sourceRef.current = source; | |
| const analyser = audioCtx.createAnalyser(); | |
| analyser.fftSize = 1024; | |
| analyserRef.current = analyser; | |
| // MediaRecorder fallback (WebM/Opus) | |
| try { | |
| const mr = new MediaRecorder(stream, { mimeType: 'audio/webm;codecs=opus' } as any); | |
| mediaRecorderRef.current = mr; | |
| recChunksRef.current = []; | |
| mr.ondataavailable = (ev) => { | |
| if (ev.data && ev.data.size > 0) recChunksRef.current.push(ev.data); | |
| }; | |
| mr.start(250); // gather in 250ms chunks | |
| } catch {} | |
| // Sink to keep processing running while staying silent | |
| const silentGain = audioCtx.createGain(); | |
| silentGain.gain.value = 0.0; | |
| pcmChunksRef.current = []; | |
| const now = Date.now(); | |
| lastActiveRef.current = now; | |
| startedAtRef.current = now; | |
| const processor = audioCtx.createScriptProcessor(4096, 1, 1); | |
| processorRef.current = processor; | |
| processor.onaudioprocess = (e: AudioProcessingEvent) => { | |
| if (!isRecording) return; | |
| const input = e.inputBuffer.getChannelData(0); | |
| // Copy the buffer since input is a live view | |
| const buf = new Float32Array(input.length); | |
| buf.set(input); | |
| pcmChunksRef.current.push(buf); | |
| }; | |
| // Connect graph: source -> analyser (for levels) and -> processor (for capture) | |
| source.connect(analyser); | |
| source.connect(processor); | |
| processor.connect(silentGain); | |
| silentGain.connect(audioCtx.destination); | |
| setIsRecording(true); | |
| } catch (err) { | |
| console.error('Mic access failed:', err); | |
| setSpeechMode(false); | |
| } | |
| }; | |
| const stopRecording = async () => { | |
| if (!isRecording) return; | |
| setIsRecording(false); | |
| // stop tracks | |
| mediaStreamRef.current?.getTracks().forEach((t) => t.stop()); | |
| mediaStreamRef.current = null; | |
| // disconnect audio graph | |
| try { | |
| processorRef.current?.disconnect(); | |
| analyserRef.current?.disconnect(); | |
| sourceRef.current?.disconnect(); | |
| if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') { | |
| mediaRecorderRef.current.stop(); | |
| } | |
| await audioCtxRef.current?.close(); | |
| } catch {} | |
| const sampleRate = audioCtxRef.current?.sampleRate || 48000; | |
| processorRef.current = null; | |
| analyserRef.current = null; | |
| sourceRef.current = null; | |
| audioCtxRef.current = null; | |
| // Build mono WAV from Float32 chunks using native sample rate | |
| const floatData = concatFloat32(pcmChunksRef.current); | |
| let wavBlob: Blob; | |
| if (!floatData || floatData.length === 0) { | |
| // Fallback: decode MediaRecorder chunks to PCM via AudioContext | |
| try { | |
| const webmBlob = new Blob(recChunksRef.current, { type: 'audio/webm' }); | |
| recChunksRef.current = []; | |
| const arr = await webmBlob.arrayBuffer(); | |
| const decodeCtx = new (window.AudioContext || (window as any).webkitAudioContext)(); | |
| const audioBuf = await decodeCtx.decodeAudioData(arr); | |
| const ch0 = audioBuf.getChannelData(0); | |
| const copy = new Float32Array(ch0.length); | |
| copy.set(ch0); | |
| wavBlob = encodeWAV(copy, 1, audioBuf.sampleRate || sampleRate); | |
| try { await decodeCtx.close(); } catch {} | |
| } catch (e) { | |
| console.warn('No audio captured (empty buffer) - aborting ASR request'); | |
| return; | |
| } | |
| } else { | |
| wavBlob = encodeWAV(floatData, 1, sampleRate); | |
| } | |
| pcmChunksRef.current = []; | |
| mediaRecorderRef.current = null; | |
| // Send to ASR backend | |
| const base64 = await blobToBase64(wavBlob); | |
| try { | |
| const res = await fetch(getApiUrl('/asr/transcribe'), { | |
| method: 'POST', | |
| headers: { 'Content-Type': 'application/json' }, | |
| body: JSON.stringify({ audio_base64: base64, language: 'en', vad: false, beam_size: 5 }), | |
| }); | |
| const ct = res.headers.get('content-type') || ''; | |
| const payload = ct.includes('application/json') ? await res.json() : { success: false, error: `${res.status} ${await res.text()}` }; | |
| if (payload?.success) { | |
| const primary = (payload?.text ?? '').toString(); | |
| const fallback = Array.isArray(payload?.segments) ? payload.segments.map((s: any) => s?.text || '').join(' ') : ''; | |
| const text = (primary || fallback).trim(); | |
| if (text.length > 0) { | |
| setInput(text); | |
| // Auto-send when in speech mode | |
| if (speechMode) { | |
| const messageToSend = imageMode | |
| ? `[[IMAGE_MODE]] ${text}${safeMode ? ' [[SAFE_MODE=ON]]' : ' [[SAFE_MODE=OFF]]'}` | |
| : text; | |
| await onSendMessage(messageToSend, temperature, file); | |
| setInput(''); | |
| setFile(null); | |
| if (fileInputRef.current) fileInputRef.current.value = ''; | |
| } | |
| } else { | |
| console.warn('ASR returned empty text', payload); | |
| } | |
| } else { | |
| console.warn('ASR failed:', payload?.error || `status ${res.status}`); | |
| } | |
| } catch (e) { | |
| console.error('ASR request failed:', e); | |
| } | |
| }; | |
| const toggleSpeech = async () => { | |
| const next = !speechMode; | |
| setSpeechMode(next); | |
| if (next) { | |
| await startRecording(); | |
| } else if (isRecording) { | |
| await stopRecording(); | |
| } | |
| }; | |
| // Auto TTS of assistant replies in speech mode | |
| useEffect(() => { | |
| if (!speechMode) return; | |
| if (!messages || messages.length === 0) return; | |
| const last = messages[messages.length - 1]; | |
| if (last.role !== 'assistant' || !last.content?.trim()) return; | |
| // Compose a simple id from content + index | |
| const msgId = `${messages.length}-${last.content.length}`; | |
| if (lastSpokenIdRef.current === msgId) return; | |
| lastSpokenIdRef.current = msgId; | |
| const runTts = async () => { | |
| try { | |
| const res = await fetch(getApiUrl('/tts/synthesize'), { | |
| method: 'POST', | |
| headers: { 'Content-Type': 'application/json' }, | |
| body: JSON.stringify({ text: last.content }), | |
| }); | |
| const ct = res.headers.get('content-type') || ''; | |
| const payload = ct.includes('application/json') ? await res.json() : { success: false, error: `${res.status} ${await res.text()}` }; | |
| if (payload?.success && payload?.audio_base64_wav) { | |
| const audio = new Audio(`data:audio/wav;base64,${payload.audio_base64_wav}`); | |
| ttsAudioRef.current?.pause(); | |
| ttsAudioRef.current = audio; | |
| audio.play().catch(() => {}); | |
| } else { | |
| console.warn('TTS failed:', payload?.error || `status ${res.status}`); | |
| } | |
| } catch (e) { | |
| console.error('TTS request failed:', e); | |
| } | |
| }; | |
| runTts(); | |
| }, [messages, speechMode]); | |
| // Helpers: concat, WAV encode, base64 | |
| function concatFloat32(chunks: Float32Array[]): Float32Array { | |
| let length = 0; | |
| for (const c of chunks) length += c.length; | |
| const out = new Float32Array(length); | |
| let o = 0; | |
| for (const c of chunks) { out.set(c, o); o += c.length; } | |
| return out; | |
| } | |
| function encodeWAV(samples: Float32Array, numChannels: number, sampleRate: number): Blob { | |
| // Convert float to 16-bit PCM | |
| const pcm = new Int16Array(samples.length); | |
| for (let i = 0; i < samples.length; i++) { | |
| let s = Math.max(-1, Math.min(1, samples[i])); | |
| pcm[i] = s < 0 ? s * 0x8000 : s * 0x7fff; | |
| } | |
| const byteRate = (sampleRate * numChannels * 16) / 8; | |
| const blockAlign = (numChannels * 16) / 8; | |
| const buffer = new ArrayBuffer(44 + pcm.byteLength); | |
| const view = new DataView(buffer); | |
| let offset = 0; | |
| // RIFF header | |
| writeString(view, offset, 'RIFF'); offset += 4; | |
| view.setUint32(offset, 36 + pcm.byteLength, true); offset += 4; | |
| writeString(view, offset, 'WAVE'); offset += 4; | |
| writeString(view, offset, 'fmt '); offset += 4; | |
| view.setUint32(offset, 16, true); offset += 4; // fmt chunk size | |
| view.setUint16(offset, 1, true); offset += 2; // PCM | |
| view.setUint16(offset, numChannels, true); offset += 2; | |
| view.setUint32(offset, sampleRate, true); offset += 4; | |
| view.setUint32(offset, byteRate, true); offset += 4; | |
| view.setUint16(offset, blockAlign, true); offset += 2; | |
| view.setUint16(offset, 16, true); offset += 2; | |
| writeString(view, offset, 'data'); offset += 4; | |
| view.setUint32(offset, pcm.byteLength, true); offset += 4; | |
| // PCM data | |
| const pcmView = new DataView(buffer, 44); | |
| for (let i = 0; i < pcm.length; i++) { | |
| pcmView.setInt16(i * 2, pcm[i], true); | |
| } | |
| return new Blob([buffer], { type: 'audio/wav' }); | |
| } | |
| function writeString(view: DataView, offset: number, str: string) { | |
| for (let i = 0; i < str.length; i++) view.setUint8(offset + i, str.charCodeAt(i)); | |
| } | |
| function blobToBase64(blob: Blob): Promise<string> { | |
| return new Promise((resolve) => { | |
| const reader = new FileReader(); | |
| reader.onloadend = () => { | |
| const res = (reader.result as string) || ''; | |
| const base64 = res.split(',')[1] || ''; | |
| resolve(base64); | |
| }; | |
| reader.readAsDataURL(blob); | |
| }); | |
| } | |
| const handleFileChange = (e: React.ChangeEvent<HTMLInputElement>) => { | |
| const selectedFile = e.target.files?.[0]; | |
| if (selectedFile) { | |
| setFile(selectedFile); | |
| } | |
| }; | |
| const handleSubmit = async (e: React.FormEvent<HTMLFormElement>) => { | |
| e.preventDefault(); | |
| if (!input.trim() || isLoading) return; | |
| // Only attach IMAGE_MODE when the toggle is ON. No auto intent here. | |
| const messageToSend = imageMode | |
| ? `[[IMAGE_MODE]] ${input}${safeMode ? ' [[SAFE_MODE=ON]]' : ' [[SAFE_MODE=OFF]]'}` | |
| : input; | |
| // Notify image generation start/end to parent | |
| if (imageMode) onImageGeneratingChange?.(true); | |
| await onSendMessage(messageToSend, temperature, file); | |
| if (imageMode) onImageGeneratingChange?.(false); | |
| setInput(''); | |
| setFile(null); | |
| if (fileInputRef.current) { | |
| fileInputRef.current.value = ''; | |
| } | |
| }; | |
| const handleKeyDown = (e: React.KeyboardEvent<HTMLTextAreaElement>) => { | |
| if (e.key === 'Enter' && !e.shiftKey) { | |
| e.preventDefault(); | |
| handleSubmit(e as any); | |
| } | |
| }; | |
| const removeFile = () => { | |
| setFile(null); | |
| if (fileInputRef.current) { | |
| fileInputRef.current.value = ''; | |
| } | |
| }; | |
| if (isWelcomeMode) { | |
| // Welcome mode - just show the input form | |
| return ( | |
| <div className="space-y-4"> | |
| {/* File attachment display */} | |
| {file && ( | |
| <div className="flex items-center gap-2 p-2 bg-muted rounded-md"> | |
| <Paperclip className="h-4 w-4" /> | |
| <span className="text-sm flex-1 truncate">{file.name}</span> | |
| <Button | |
| size="sm" | |
| variant="ghost" | |
| onClick={removeFile} | |
| className="h-6 w-6 p-0" | |
| > | |
| × | |
| </Button> | |
| </div> | |
| )} | |
| {/* Input form */} | |
| <form onSubmit={handleSubmit} className="space-y-2"> | |
| <div className="relative flex gap-2 items-stretch"> | |
| {/* Mic/Wave button (opposite side of send) */} | |
| <Button | |
| type="button" | |
| variant={speechMode ? 'default' : 'outline'} | |
| size="sm" | |
| onClick={toggleSpeech} | |
| disabled={isLoading} | |
| className={`shrink-0 h-auto min-h-[60px] ${speechMode ? 'bg-blue-600 text-white' : ''}`} | |
| title={speechMode ? (isRecording ? 'Stop & Transcribe' : 'Speech Mode On') : 'Enable Speech Mode'} | |
| > | |
| {speechMode ? ( | |
| <div className="flex items-center gap-2"> | |
| <span className="text-xs">{isRecording ? 'Listening' : 'Ready'}</span> | |
| <div className="flex items-end gap-[2px]"> | |
| {levelsRef.current.map((lvl, i) => ( | |
| <div key={i} className="w-[3px] bg-current" style={{ height: `${Math.max(8, 8 + lvl * 20)}px`, transition: 'height 80ms linear' }} /> | |
| ))} | |
| </div> | |
| </div> | |
| ) : ( | |
| <span className="text-xs">Speech</span> | |
| )} | |
| </Button> | |
| <input | |
| ref={fileInputRef} | |
| type="file" | |
| onChange={handleFileChange} | |
| className="hidden" | |
| accept=".txt,.md,.json,.py,.js,.ts,.tsx,.jsx,.csv,.xml,.html,.css" | |
| /> | |
| <Button | |
| type="button" | |
| variant="outline" | |
| size="sm" | |
| onClick={() => fileInputRef.current?.click()} | |
| disabled={isLoading} | |
| className="shrink-0 h-auto min-h-[60px]" | |
| > | |
| <Paperclip className="h-4 w-4" /> | |
| </Button> | |
| <div className="relative flex-1"> | |
| <Textarea | |
| ref={textareaRef} | |
| value={input} | |
| onChange={(e) => setInput(e.target.value)} | |
| onKeyDown={handleKeyDown} | |
| placeholder="Type your message or use Speech..." | |
| className="min-h-[60px] max-h-[200px] resize-none pr-20" | |
| disabled={isLoading} | |
| /> | |
| {/* Generate Image bullet toggle (restored) */} | |
| <button | |
| type="button" | |
| aria-pressed={imageMode} | |
| onClick={() => setImageMode(v => !v)} | |
| className={`absolute bottom-2 right-12 h-8 w-8 rounded-full border ${imageMode ? 'bg-emerald-500 text-white border-emerald-600' : 'bg-transparent text-foreground/80 border-border'} shadow-sm backdrop-blur-md`} | |
| title="Generate Image" | |
| > | |
| • | |
| </button> | |
| <Button | |
| type="submit" | |
| size="sm" | |
| className="absolute bottom-2 right-2 h-8 w-8 p-0" | |
| disabled={!input.trim() || isLoading} | |
| > | |
| <SendHorizonal className="h-4 w-4" /> | |
| </Button> | |
| </div> | |
| </div> | |
| <div className="flex justify-center"> | |
| <div className="text-xs text-muted-foreground"> | |
| Consciousness: <span className="font-medium capitalize">{selectedConsciousnessDimension}</span> | |
| </div> | |
| </div> | |
| </form> | |
| </div> | |
| ); | |
| } | |
| // Regular chat mode - full height with fixed input at bottom | |
| return ( | |
| <div className={`h-full flex flex-col ${isMobile ? 'mobile-chat-container' : ''}`}> | |
| {/* Chat Messages Area - Takes remaining space */} | |
| <div className="flex-1 min-h-0 overflow-hidden"> | |
| <ScrollArea ref={scrollAreaRef} className={`h-full auto-scroll ${isMobile ? 'mobile-scroll-fix' : ''}`}> | |
| <div className="flex flex-col gap-4 p-4 pb-8"> | |
| {messages.length === 0 && ( | |
| <div className="flex flex-col items-center justify-center h-full text-center py-12"> | |
| <Bot className="h-12 w-12 text-muted-foreground mb-4" /> | |
| <h3 className="text-lg font-medium mb-2">Welcome to Aether AI™</h3> | |
| <p className="text-muted-foreground max-w-md mb-4"> | |
| Start a conversation with our 5D consciousness AI. Use the settings buttons above to configure your experience. | |
| </p> | |
| {!isChatSelected && ( | |
| <Button onClick={onNewChat} variant="default"> | |
| <MessageSquarePlus className="h-4 w-4 mr-2" /> | |
| Start New Chat | |
| </Button> | |
| )} | |
| </div> | |
| )} | |
| {messages.map((message, index) => ( | |
| <ChatMessage | |
| key={index} | |
| message={message} | |
| onConsciousnessDimensionSelect={onConsciousnessDimensionSelect} | |
| selectedConsciousnessDimension={selectedConsciousnessDimension} | |
| onOpenEditor={onOpenEditor} | |
| /> | |
| ))} | |
| {isLoading && !isImageGenerating && <LoadingMessage />} | |
| {isImageGenerating && <ImageLoadingMessage progress={imageProgress} elapsedSeconds={imageElapsed} status={imageStatus} />} | |
| {/* Invisible element to scroll to */} | |
| <div ref={messagesEndRef} /> | |
| </div> | |
| </ScrollArea> | |
| </div> | |
| {/* Chat Input Area */} | |
| <div className={`p-4 flex-shrink-0 ${isMobile ? 'mobile-input-area' : ''}`}> | |
| {/* File attachment display */} | |
| {file && ( | |
| <div className="flex items-center gap-2 p-2 mb-2 bg-muted rounded-md mx-auto max-w-2xl"> | |
| <Paperclip className="h-4 w-4" /> | |
| <span className="text-sm flex-1 truncate">{file.name}</span> | |
| <Button | |
| size="sm" | |
| variant="ghost" | |
| onClick={removeFile} | |
| className="h-6 w-6 p-0" | |
| > | |
| × | |
| </Button> | |
| </div> | |
| )} | |
| {/* Input form */} | |
| <form onSubmit={handleSubmit} className="max-w-2xl mx-auto"> | |
| <div className="relative flex gap-2 items-stretch"> | |
| {/* Mic/Wave button (opposite side of send) */} | |
| <Button | |
| type="button" | |
| variant={speechMode ? 'default' : 'outline'} | |
| size="sm" | |
| onClick={toggleSpeech} | |
| disabled={isLoading} | |
| className={`shrink-0 h-auto min-h-[60px] ${speechMode ? 'bg-blue-600 text-white' : ''}`} | |
| title={speechMode ? (isRecording ? 'Stop & Transcribe' : 'Speech Mode On') : 'Enable Speech Mode'} | |
| > | |
| {speechMode ? ( | |
| <div className="flex items-center gap-2"> | |
| <span className="text-xs">{isRecording ? 'Listening' : 'Ready'}</span> | |
| <div className="flex items-end gap-[2px]"> | |
| {levelsRef.current.map((lvl, i) => ( | |
| <div key={i} className="w-[3px] bg-current" style={{ height: `${Math.max(8, 8 + lvl * 20)}px`, transition: 'height 80ms linear' }} /> | |
| ))} | |
| </div> | |
| </div> | |
| ) : ( | |
| <span className="text-xs">Speech</span> | |
| )} | |
| </Button> | |
| <input | |
| ref={fileInputRef} | |
| type="file" | |
| onChange={handleFileChange} | |
| className="hidden" | |
| accept=".txt,.md,.json,.py,.js,.ts,.tsx,.jsx,.csv,.xml,.html,.css" | |
| /> | |
| <Button | |
| type="button" | |
| variant="outline" | |
| size="sm" | |
| onClick={() => fileInputRef.current?.click()} | |
| disabled={isLoading} | |
| className="shrink-0 h-auto min-h-[60px]" | |
| > | |
| <Paperclip className="h-4 w-4" /> | |
| </Button> | |
| <div className="relative flex-1"> | |
| <Textarea | |
| ref={textareaRef} | |
| value={input} | |
| onChange={(e) => setInput(e.target.value)} | |
| onKeyDown={handleKeyDown} | |
| placeholder={speechMode ? (isRecording ? 'Listening...' : 'Tap Speech to speak, or type here...') : 'Type your message...'} | |
| className="min-h-[60px] max-h-[200px] resize-none pr-20" | |
| disabled={isLoading} | |
| /> | |
| {/* Generate Image bullet toggle (restored) */} | |
| <button | |
| type="button" | |
| aria-pressed={imageMode} | |
| onClick={() => setImageMode(v => !v)} | |
| className={`absolute bottom-2 right-12 h-8 w-8 rounded-full border ${imageMode ? 'bg-emerald-500 text-white border-emerald-600' : 'bg-transparent text-foreground/80 border-border'} shadow-sm backdrop-blur-md`} | |
| title="Generate Image" | |
| > | |
| • | |
| </button> | |
| <Button | |
| type="submit" | |
| size="sm" | |
| className="absolute bottom-2 right-2 h-8 w-8 p-0" | |
| disabled={!input.trim() || isLoading} | |
| > | |
| <SendHorizonal className="h-4 w-4" /> | |
| </Button> | |
| </div> | |
| </div> | |
| {/* Status row with Image Mode and Safe toggle remains */} | |
| <div className="flex justify-between items-center mt-2"> | |
| <div className="text-xs text-muted-foreground"> | |
| Consciousness: <span className={`font-medium capitalize ${consciousnessColor}`}>{selectedConsciousnessDimension}</span> | |
| </div> | |
| <div className="text-xs text-muted-foreground flex items-center gap-4"> | |
| <div className="flex items-center gap-1"> | |
| <span>Image mode:</span> | |
| <span className={`font-medium ${imageMode ? 'text-emerald-500' : 'text-muted-foreground'}`}>{imageMode ? 'On' : 'Off'}</span> | |
| </div> | |
| {imageMode && ( | |
| <div className="flex items-center gap-2"> | |
| <span>Safe:</span> | |
| <button | |
| type="button" | |
| aria-pressed={safeMode} | |
| onClick={() => setSafeMode(v => !v)} | |
| className={`relative inline-flex h-5 w-10 items-center rounded-full transition-colors duration-200 ${safeMode ? 'bg-blue-600' : 'bg-gray-500/40'}`} | |
| title="Toggle safety" | |
| > | |
| <span className={`inline-block h-4 w-4 transform rounded-full bg-white transition-transform duration-200 ${safeMode ? 'translate-x-5' : 'translate-x-1'}`}/> | |
| </button> | |
| </div> | |
| )} | |
| </div> | |
| </div> | |
| </form> | |
| </div> | |
| </div> | |
| ); | |
| } | |