UI components for voice interactions including indicators, controls, and visualizations
VoiceIndicator
component provides visual feedback for voice states with smooth animations powered by Motion for React.
import { VoiceIndicator } from '@cedar/voice';
import { useCedarStore } from '@cedar/core';
function MyVoiceApp() {
const voice = useCedarStore((state) => state.voice);
return (
<div>
<VoiceIndicator voiceState={voice} />
{/* Your other components */}
</div>
);
}
// Listening animation - pulsing bars
{
[0, 1, 2].map((i) => (
<motion.div
key={i}
className='w-1 h-3 bg-red-500 rounded-full'
animate={{
scaleY: [1, 1.5, 1],
}}
transition={{
duration: 0.5,
repeat: Infinity,
delay: i * 0.1,
}}
/>
));
}
// Speaking animation - scaling effect
<motion.div
className='w-4 h-4'
animate={{
scale: [1, 1.2, 1],
}}
transition={{
duration: 0.8,
repeat: Infinity,
}}>
<div className='w-full h-full bg-green-500 rounded-full opacity-30' />
</motion.div>;
import React from 'react';
import { motion } from 'motion/react';
import { Mic, MicOff, Loader2 } from 'lucide-react';
import { useCedarStore } from '@cedar/core';
interface VoiceButtonProps {
size?: 'small' | 'medium' | 'large';
variant?: 'primary' | 'secondary' | 'outline';
className?: string;
}
export const VoiceButton: React.FC<VoiceButtonProps> = ({
size = 'medium',
variant = 'primary',
className = '',
}) => {
const voice = useCedarStore((state) => state.voice);
const handleClick = async () => {
if (voice.voicePermissionStatus === 'prompt') {
await voice.requestVoicePermission();
}
if (voice.voicePermissionStatus === 'granted') {
voice.toggleVoice();
}
};
const sizeClasses = {
small: 'w-8 h-8',
medium: 'w-12 h-12',
large: 'w-16 h-16',
};
const variantClasses = {
primary: 'bg-blue-500 hover:bg-blue-600 text-white',
secondary: 'bg-gray-500 hover:bg-gray-600 text-white',
outline: 'border-2 border-blue-500 text-blue-500 hover:bg-blue-50',
};
const isDisabled =
voice.voicePermissionStatus === 'denied' ||
voice.voicePermissionStatus === 'not-supported';
return (
<motion.button
onClick={handleClick}
disabled={isDisabled}
className={`
${sizeClasses[size]}
${variantClasses[variant]}
${className}
rounded-full flex items-center justify-center
transition-colors duration-200
disabled:opacity-50 disabled:cursor-not-allowed
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2
`}
whileHover={!isDisabled ? { scale: 1.05 } : {}}
whileTap={!isDisabled ? { scale: 0.95 } : {}}
animate={{
backgroundColor: voice.isListening ? '#ef4444' : undefined,
}}
style={{ willChange: 'transform' }}>
{voice.voicePermissionStatus === 'prompt' && (
<Loader2 className='w-5 h-5 animate-spin' />
)}
{voice.voicePermissionStatus === 'granted' && (
<>
{voice.isListening ? (
<motion.div
initial={{ scale: 0 }}
animate={{ scale: 1 }}
transition={{ type: 'spring', stiffness: 500, damping: 30 }}>
<MicOff className='w-5 h-5' />
</motion.div>
) : (
<motion.div
initial={{ scale: 0 }}
animate={{ scale: 1 }}
transition={{ type: 'spring', stiffness: 500, damping: 30 }}>
<Mic className='w-5 h-5' />
</motion.div>
)}
</>
)}
{voice.voicePermissionStatus === 'denied' && (
<MicOff className='w-5 h-5 opacity-50' />
)}
</motion.button>
);
};
import React, { useEffect, useRef, useState } from 'react';
import { motion } from 'motion/react';
import { useCedarStore } from '@cedar/core';
interface VoiceWaveformProps {
barCount?: number;
height?: number;
color?: string;
className?: string;
}
export const VoiceWaveform: React.FC<VoiceWaveformProps> = ({
barCount = 20,
height = 40,
color = '#3b82f6',
className = '',
}) => {
const voice = useCedarStore((state) => state.voice);
const [audioData, setAudioData] = useState<number[]>([]);
const analyserRef = useRef<AnalyserNode | null>(null);
const animationRef = useRef<number>();
useEffect(() => {
if (voice.isListening && voice.audioContext && voice.audioStream) {
// Create audio analyser
const analyser = voice.audioContext.createAnalyser();
const source = voice.audioContext.createMediaStreamSource(
voice.audioStream
);
source.connect(analyser);
analyser.fftSize = 256;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyserRef.current = analyser;
const updateWaveform = () => {
if (analyser) {
analyser.getByteFrequencyData(dataArray);
// Sample data for visualization
const step = Math.floor(bufferLength / barCount);
const samples = [];
for (let i = 0; i < barCount; i++) {
const sample = dataArray[i * step] || 0;
samples.push(sample / 255); // Normalize to 0-1
}
setAudioData(samples);
}
if (voice.isListening) {
animationRef.current = requestAnimationFrame(updateWaveform);
}
};
updateWaveform();
} else {
// Reset when not listening
setAudioData(new Array(barCount).fill(0));
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
}
}
return () => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
}
};
}, [voice.isListening, voice.audioContext, voice.audioStream, barCount]);
return (
<div
className={`flex items-end justify-center gap-1 ${className}`}
style={{ height: `${height}px` }}>
{Array.from({ length: barCount }).map((_, index) => {
const amplitude = audioData[index] || 0;
const barHeight = Math.max(2, amplitude * height);
return (
<motion.div
key={index}
className='w-1 rounded-full'
style={{
backgroundColor: color,
willChange: 'height',
}}
animate={{
height: barHeight,
}}
transition={{
duration: 0.1,
ease: 'easeOut',
}}
/>
);
})}
</div>
);
};
import React from 'react';
import { motion, AnimatePresence } from 'motion/react';
import {
Mic,
MicOff,
Volume2,
VolumeX,
Wifi,
WifiOff,
AlertCircle,
CheckCircle,
} from 'lucide-react';
import { useCedarStore } from '@cedar/core';
export const VoiceStatusPanel: React.FC = () => {
const voice = useCedarStore((state) => state.voice);
const getPermissionStatus = () => {
switch (voice.voicePermissionStatus) {
case 'granted':
return {
icon: CheckCircle,
color: 'text-green-500',
text: 'Microphone access granted',
};
case 'denied':
return {
icon: MicOff,
color: 'text-red-500',
text: 'Microphone access denied',
};
case 'not-supported':
return {
icon: AlertCircle,
color: 'text-orange-500',
text: 'Voice not supported',
};
default:
return {
icon: Mic,
color: 'text-gray-500',
text: 'Microphone permission needed',
};
}
};
const getConnectionStatus = () => {
// Assuming WebSocket connection status is available
const isConnected = voice.voiceEndpoint && !voice.voiceError;
return {
icon: isConnected ? Wifi : WifiOff,
color: isConnected ? 'text-green-500' : 'text-red-500',
text: isConnected
? 'Connected to voice service'
: 'Disconnected from voice service',
};
};
const permissionStatus = getPermissionStatus();
const connectionStatus = getConnectionStatus();
return (
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
className='bg-white rounded-lg shadow-lg p-6 max-w-md mx-auto'>
<h3 className='text-lg font-semibold mb-4'>Voice Status</h3>
{/* Permission Status */}
<div className='flex items-center gap-3 mb-4'>
<permissionStatus.icon
className={`w-5 h-5 ${permissionStatus.color}`}
/>
<span className='text-sm'>{permissionStatus.text}</span>
</div>
{/* Connection Status */}
<div className='flex items-center gap-3 mb-4'>
<connectionStatus.icon
className={`w-5 h-5 ${connectionStatus.color}`}
/>
<span className='text-sm'>{connectionStatus.text}</span>
</div>
{/* Current State */}
<div className='flex items-center gap-3 mb-4'>
{voice.isListening && (
<>
<Mic className='w-5 h-5 text-red-500' />
<span className='text-sm'>Listening...</span>
<motion.div
className='ml-auto flex gap-1'
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}>
{[0, 1, 2].map((i) => (
<motion.div
key={i}
className='w-1 h-3 bg-red-500 rounded-full'
animate={{
scaleY: [1, 1.5, 1],
}}
transition={{
duration: 0.5,
repeat: Infinity,
delay: i * 0.1,
}}
/>
))}
</motion.div>
</>
)}
{voice.isSpeaking && (
<>
<Volume2 className='w-5 h-5 text-green-500' />
<span className='text-sm'>Speaking...</span>
<motion.div
className='ml-auto w-4 h-4'
animate={{
scale: [1, 1.2, 1],
}}
transition={{
duration: 0.8,
repeat: Infinity,
}}>
<div className='w-full h-full bg-green-500 rounded-full opacity-30' />
</motion.div>
</>
)}
{!voice.isListening && !voice.isSpeaking && (
<>
<VolumeX className='w-5 h-5 text-gray-400' />
<span className='text-sm text-gray-500'>Idle</span>
</>
)}
</div>
{/* Voice Settings */}
<div className='border-t pt-4'>
<h4 className='text-sm font-medium mb-2'>Settings</h4>
<div className='text-xs text-gray-600 space-y-1'>
<div>Language: {voice.voiceSettings.language}</div>
<div>Voice: {voice.voiceSettings.voiceId || 'Default'}</div>
<div>Rate: {voice.voiceSettings.rate || 1.0}x</div>
<div>
Auto-add to messages:{' '}
{voice.voiceSettings.autoAddToMessages ? 'Yes' : 'No'}
</div>
</div>
</div>
{/* Error Display */}
<AnimatePresence>
{voice.voiceError && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: 'auto' }}
exit={{ opacity: 0, height: 0 }}
className='mt-4 p-3 bg-red-50 border border-red-200 rounded-md'>
<div className='flex items-center gap-2'>
<AlertCircle className='w-4 h-4 text-red-500' />
<span className='text-sm text-red-700'>{voice.voiceError}</span>
</div>
</motion.div>
)}
</AnimatePresence>
</motion.div>
);
};
import React from 'react';
import { motion } from 'motion/react';
import { Settings, Volume2, Mic } from 'lucide-react';
import { useCedarStore } from '@cedar/core';
export const VoiceSettings: React.FC = () => {
const voice = useCedarStore((state) => state.voice);
const voiceOptions = [
{ value: 'alloy', label: 'Alloy' },
{ value: 'echo', label: 'Echo' },
{ value: 'fable', label: 'Fable' },
{ value: 'onyx', label: 'Onyx' },
{ value: 'nova', label: 'Nova' },
{ value: 'shimmer', label: 'Shimmer' },
];
const languageOptions = [
{ value: 'en-US', label: 'English (US)' },
{ value: 'en-GB', label: 'English (UK)' },
{ value: 'es-ES', label: 'Spanish' },
{ value: 'fr-FR', label: 'French' },
{ value: 'de-DE', label: 'German' },
{ value: 'it-IT', label: 'Italian' },
{ value: 'pt-BR', label: 'Portuguese (Brazil)' },
{ value: 'ja-JP', label: 'Japanese' },
{ value: 'ko-KR', label: 'Korean' },
{ value: 'zh-CN', label: 'Chinese (Simplified)' },
];
return (
<motion.div
initial={{ opacity: 0, scale: 0.95 }}
animate={{ opacity: 1, scale: 1 }}
className='bg-white rounded-lg shadow-lg p-6 max-w-md mx-auto'>
<div className='flex items-center gap-2 mb-6'>
<Settings className='w-5 h-5' />
<h3 className='text-lg font-semibold'>Voice Settings</h3>
</div>
<div className='space-y-6'>
{/* Language Selection */}
<div>
<label className='block text-sm font-medium text-gray-700 mb-2'>
<Mic className='w-4 h-4 inline mr-1' />
Language
</label>
<select
value={voice.voiceSettings.language}
onChange={(e) =>
voice.updateVoiceSettings({ language: e.target.value })
}
className='w-full p-2 border border-gray-300 rounded-md focus:ring-2 focus:ring-blue-500 focus:border-transparent'>
{languageOptions.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</div>
{/* Voice Selection */}
<div>
<label className='block text-sm font-medium text-gray-700 mb-2'>
<Volume2 className='w-4 h-4 inline mr-1' />
Voice
</label>
<select
value={voice.voiceSettings.voiceId || 'alloy'}
onChange={(e) =>
voice.updateVoiceSettings({ voiceId: e.target.value })
}
className='w-full p-2 border border-gray-300 rounded-md focus:ring-2 focus:ring-blue-500 focus:border-transparent'>
{voiceOptions.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</div>
{/* Speech Rate */}
<div>
<label className='block text-sm font-medium text-gray-700 mb-2'>
Speech Rate: {voice.voiceSettings.rate || 1.0}x
</label>
<input
type='range'
min='0.5'
max='2.0'
step='0.1'
value={voice.voiceSettings.rate || 1.0}
onChange={(e) =>
voice.updateVoiceSettings({ rate: parseFloat(e.target.value) })
}
className='w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer'
/>
<div className='flex justify-between text-xs text-gray-500 mt-1'>
<span>0.5x</span>
<span>1.0x</span>
<span>2.0x</span>
</div>
</div>
{/* Volume */}
<div>
<label className='block text-sm font-medium text-gray-700 mb-2'>
Volume: {Math.round((voice.voiceSettings.volume || 1.0) * 100)}%
</label>
<input
type='range'
min='0'
max='1'
step='0.1'
value={voice.voiceSettings.volume || 1.0}
onChange={(e) =>
voice.updateVoiceSettings({ volume: parseFloat(e.target.value) })
}
className='w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer'
/>
</div>
{/* Auto-add to Messages */}
<div className='flex items-center justify-between'>
<label className='text-sm font-medium text-gray-700'>
Auto-add to Messages
</label>
<motion.button
onClick={() =>
voice.updateVoiceSettings({
autoAddToMessages: !voice.voiceSettings.autoAddToMessages,
})
}
className={`
relative inline-flex h-6 w-11 items-center rounded-full transition-colors
${
voice.voiceSettings.autoAddToMessages
? 'bg-blue-600'
: 'bg-gray-200'
}
`}
whileTap={{ scale: 0.95 }}>
<motion.span
className='inline-block h-4 w-4 transform rounded-full bg-white shadow-lg'
animate={{
x: voice.voiceSettings.autoAddToMessages ? 24 : 4,
}}
transition={{ type: 'spring', stiffness: 500, damping: 30 }}
/>
</motion.button>
</div>
{/* Browser TTS */}
<div className='flex items-center justify-between'>
<label className='text-sm font-medium text-gray-700'>
Use Browser TTS
</label>
<motion.button
onClick={() =>
voice.updateVoiceSettings({
useBrowserTTS: !voice.voiceSettings.useBrowserTTS,
})
}
className={`
relative inline-flex h-6 w-11 items-center rounded-full transition-colors
${
voice.voiceSettings.useBrowserTTS
? 'bg-blue-600'
: 'bg-gray-200'
}
`}
whileTap={{ scale: 0.95 }}>
<motion.span
className='inline-block h-4 w-4 transform rounded-full bg-white shadow-lg'
animate={{
x: voice.voiceSettings.useBrowserTTS ? 24 : 4,
}}
transition={{ type: 'spring', stiffness: 500, damping: 30 }}
/>
</motion.button>
</div>
</div>
</motion.div>
);
};
import React, { useState } from 'react';
import { motion, AnimatePresence } from 'motion/react';
import { Settings } from 'lucide-react';
import {
VoiceButton,
VoiceWaveform,
VoiceStatusPanel,
VoiceSettings,
} from './VoiceComponents';
export const VoiceInterface: React.FC = () => {
const [showSettings, setShowSettings] = useState(false);
return (
<div className='voice-interface'>
{/* Main Voice Controls */}
<div className='flex flex-col items-center gap-6 p-6'>
{/* Voice Button */}
<VoiceButton size='large' />
{/* Waveform Visualizer */}
<VoiceWaveform className='w-64' />
{/* Settings Toggle */}
<motion.button
onClick={() => setShowSettings(!showSettings)}
className='flex items-center gap-2 text-gray-600 hover:text-gray-800 transition-colors'
whileHover={{ scale: 1.05 }}
whileTap={{ scale: 0.95 }}>
<Settings className='w-4 h-4' />
<span className='text-sm'>Settings</span>
</motion.button>
</div>
{/* Status Panel */}
<div className='mb-6'>
<VoiceStatusPanel />
</div>
{/* Settings Panel */}
<AnimatePresence>
{showSettings && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: 'auto' }}
exit={{ opacity: 0, height: 0 }}
className='overflow-hidden'>
<VoiceSettings />
</motion.div>
)}
</AnimatePresence>
</div>
);
};
/* Voice button states */
.voice-button {
@apply transition-all duration-200 ease-out;
}
.voice-button:hover {
@apply transform scale-105;
}
.voice-button.listening {
@apply bg-red-500 shadow-lg shadow-red-500/25;
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
}
.voice-button.speaking {
@apply bg-green-500 shadow-lg shadow-green-500/25;
}
/* Voice indicator animations */
.voice-indicator {
@apply flex items-center gap-2 justify-center;
}
.voice-indicator .listening-bars {
@apply flex gap-1;
}
.voice-indicator .speaking-pulse {
@apply w-4 h-4 bg-green-500 rounded-full opacity-30;
animation: speaking-pulse 0.8s ease-in-out infinite;
}
/* Waveform visualizer */
.voice-waveform {
@apply flex items-end justify-center gap-1;
}
.voice-waveform .bar {
@apply w-1 rounded-full transition-all duration-100 ease-out;
min-height: 2px;
}
/* Keyframes */
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.8;
}
}
@keyframes speaking-pulse {
0%,
100% {
transform: scale(1);
}
50% {
transform: scale(1.2);
}
}
// Enhanced VoiceButton with accessibility
export const AccessibleVoiceButton: React.FC<VoiceButtonProps> = (props) => {
const voice = useCedarStore((state) => state.voice);
const getAriaLabel = () => {
if (voice.isListening) return 'Stop listening';
if (voice.voicePermissionStatus === 'denied')
return 'Microphone access denied';
if (voice.voicePermissionStatus === 'not-supported')
return 'Voice not supported';
return 'Start listening';
};
return (
<motion.button
{...props}
aria-label={getAriaLabel()}
aria-pressed={voice.isListening}
role='button'
tabIndex={0}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
handleClick();
}
}}>
{/* Button content */}
</motion.button>
);
};
// Respect user's motion preferences
const shouldReduceMotion = window.matchMedia(
'(prefers-reduced-motion: reduce)'
).matches;
const animationProps = shouldReduceMotion
? {}
: {
animate: { scale: [1, 1.2, 1] },
transition: { duration: 0.8, repeat: Infinity },
};
<motion.div {...animationProps}>{/* Content */}</motion.div>;
import { render, screen, fireEvent, waitFor } from '@testing-library/react';
import { VoiceButton } from './VoiceButton';
import { useCedarStore } from '@cedar/core';
// Mock the store
jest.mock('@cedar/core', () => ({
useCedarStore: jest.fn(),
}));
describe('VoiceButton', () => {
const mockVoice = {
isListening: false,
voicePermissionStatus: 'granted',
toggleVoice: jest.fn(),
requestVoicePermission: jest.fn(),
};
beforeEach(() => {
(useCedarStore as jest.Mock).mockReturnValue(mockVoice);
});
it('renders correctly', () => {
render(<VoiceButton />);
expect(screen.getByRole('button')).toBeInTheDocument();
});
it('calls toggleVoice when clicked', async () => {
render(<VoiceButton />);
fireEvent.click(screen.getByRole('button'));
await waitFor(() => {
expect(mockVoice.toggleVoice).toHaveBeenCalled();
});
});
it('shows correct state when listening', () => {
(useCedarStore as jest.Mock).mockReturnValue({
...mockVoice,
isListening: true,
});
render(<VoiceButton />);
expect(screen.getByLabelText('Stop listening')).toBeInTheDocument();
});
});