Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | 1x 21x 21x 21x 21x 21x 6x 6x 1x 1x 5x 5x 5x 5x 3x 3x 6x 2x 2x 2x 2x 2x 2x 2x 2x 2x 1x 1x 2x 2x 2x 5x 5x 21x 3x 3x 1x 2x 2x 2x 21x 1x 1x | // src/pages/VoiceLabPage.tsx
import React, { useState } from 'react';
import { generateSpeechFromText, startVoiceSession } from '../services/aiApiClient';
import { logger } from '../services/logger.client';
import { notifyError } from '../services/notificationService';
import { LoadingSpinner } from '../components/LoadingSpinner';
import { SpeakerWaveIcon } from '../components/icons/SpeakerWaveIcon';
import { MicrophoneIcon } from '../components/icons/MicrophoneIcon';
export const VoiceLabPage: React.FC = () => {
const [textToSpeak, setTextToSpeak] = useState(
'Hello! This is a test of the text-to-speech generation.',
);
const [isGeneratingSpeech, setIsGeneratingSpeech] = useState(false);
const [audioPlayer, setAudioPlayer] = useState<HTMLAudioElement | null>(null);
// Debug log for rendering
logger.debug({ hasAudioPlayer: !!audioPlayer }, '[VoiceLabPage] Render');
const handleGenerateSpeech = async () => {
logger.debug('[VoiceLabPage] handleGenerateSpeech triggered');
if (!textToSpeak.trim()) {
notifyError('Please enter some text to generate speech.');
return;
}
setIsGeneratingSpeech(true);
try {
logger.debug('[VoiceLabPage] Calling generateSpeechFromText');
const response = await generateSpeechFromText(textToSpeak);
const base64Audio = await response.json(); // Extract the base64 audio string from the response
logger.debug({ audioLength: base64Audio?.length }, '[VoiceLabPage] Response JSON received');
if (base64Audio) {
const audioSrc = `data:audio/mpeg;base64,${base64Audio}`;
logger.debug('[VoiceLabPage] Creating new Audio()');
const audio = new Audio(audioSrc);
logger.debug('[VoiceLabPage] Audio created');
logger.debug('[VoiceLabPage] Calling setAudioPlayer');
setAudioPlayer(audio);
logger.debug('[VoiceLabPage] Calling audio.play()');
await audio.play();
logger.debug('[VoiceLabPage] audio.play() resolved');
} else {
logger.warn('[VoiceLabPage] base64Audio was falsy');
notifyError('The AI did not return any audio data.');
}
} catch (error) {
logger.error({ err: error }, '[VoiceLabPage] Failed to generate speech');
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
notifyError(`Speech generation failed: ${errorMessage}`);
} finally {
logger.debug('[VoiceLabPage] finally block - setting isGeneratingSpeech false');
setIsGeneratingSpeech(false);
}
};
const handleStartVoiceSession = () => {
try {
// This function is currently a stub and will throw an error.
// This is the placeholder for the future real-time voice implementation.
startVoiceSession({
onmessage: (message) => {
logger.info('Received voice session message:', message);
},
});
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
logger.error({ err: error }, 'Failed to start voice session:');
notifyError(`Could not start voice session: ${errorMessage}`);
}
};
return (
<div className="p-4 sm:p-6 lg:p-8 bg-gray-50 dark:bg-gray-900 min-h-screen">
<div className="max-w-4xl mx-auto">
<h1 className="text-3xl font-bold text-gray-900 dark:text-white mb-4">Admin Voice Lab</h1>
<p className="text-gray-600 dark:text-gray-400 mb-8">
This page is for testing and developing voice-related AI features.
</p>
{/* Text-to-Speech Section */}
<div className="bg-white dark:bg-gray-800 p-6 rounded-lg shadow-md mb-8">
<h2 className="text-xl font-semibold mb-4 flex items-center">
<SpeakerWaveIcon className="w-6 h-6 mr-3 text-brand-primary" />
Text-to-Speech Generation
</h2>
<div className="space-y-4">
<div>
<label
htmlFor="text-to-speak"
className="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-1"
>
Text to Generate
</label>
<textarea
id="text-to-speak"
rows={3}
className="w-full p-2 border border-gray-300 dark:border-gray-600 rounded-md dark:bg-gray-700"
value={textToSpeak}
onChange={(e) => setTextToSpeak(e.target.value)}
/>
</div>
<div className="flex items-center space-x-4">
<button
onClick={handleGenerateSpeech}
disabled={isGeneratingSpeech}
className="bg-brand-secondary hover:bg-brand-dark text-white font-bold py-2 px-4 rounded-lg flex items-center justify-center disabled:bg-gray-400"
>
{isGeneratingSpeech ? <LoadingSpinner /> : 'Generate & Play'}
</button>
{audioPlayer && (
<button
data-testid="replay-button"
onClick={() => audioPlayer.play()}
className="bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600 font-bold py-2 px-4 rounded-lg"
>
Replay
</button>
)}
</div>
</div>
</div>
{/* Real-time Voice Session Section */}
<div className="bg-white dark:bg-gray-800 p-6 rounded-lg shadow-md border-2 border-dashed border-yellow-400 dark:border-yellow-500">
<h2 className="text-xl font-semibold mb-4 flex items-center">
<MicrophoneIcon className="w-6 h-6 mr-3 text-yellow-500" />
Real-time Voice Session (Future Feature)
</h2>
<p className="text-gray-600 dark:text-gray-400 mb-4">
This feature will enable real-time, two-way voice conversations. It requires a backend
WebSocket proxy to be implemented.
</p>
<button
onClick={handleStartVoiceSession}
className="bg-yellow-500 hover:bg-yellow-600 text-white font-bold py-2 px-4 rounded-lg"
>
Attempt to Start Session (Will Error)
</button>
</div>
</div>
</div>
);
};
|