Skip to content

Instantly share code, notes, and snippets.

@pirate
Created December 24, 2024 09:11
Show Gist options
  • Save pirate/cfb797b6a8451b83b1d107362280e461 to your computer and use it in GitHub Desktop.
Save pirate/cfb797b6a8451b83b1d107362280e461 to your computer and use it in GitHub Desktop.
A very simple dumb v0 app that lets you chat with your cat (listens for cat meows using browser microphone and then lets you type meows back).
'use client'
import { useState, useEffect, useRef } from 'react'
import { Button } from "@/components/ui/button"
import { Input } from "@/components/ui/input"
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"
type Message = {
sender: 'human' | 'cat'
content: string
}
export default function Component() {
const [messages, setMessages] = useState<Message[]>([])
const [input, setInput] = useState('')
const [isCatTurn, setIsCatTurn] = useState(true)
const [timeLeft, setTimeLeft] = useState(10)
const recognitionRef = useRef<SpeechRecognition | null>(null)
const synthRef = useRef<SpeechSynthesis | null>(null)
const canvasRef = useRef<HTMLCanvasElement>(null)
const audioContextRef = useRef<AudioContext | null>(null)
const analyserRef = useRef<AnalyserNode | null>(null)
const dataArrayRef = useRef<Uint8Array | null>(null)
const animationFrameRef = useRef<number | null>(null)
const lastMeowTimeRef = useRef<number>(0)
useEffect(() => {
if (typeof window !== 'undefined') {
recognitionRef.current = new (window.SpeechRecognition || window.webkitSpeechRecognition)()
recognitionRef.current.lang = 'en-US'
recognitionRef.current.continuous = true
recognitionRef.current.interimResults = false
synthRef.current = window.speechSynthesis
// Set up audio context and analyzer
audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)()
analyserRef.current = audioContextRef.current.createAnalyser()
analyserRef.current.fftSize = 2048
const bufferLength = analyserRef.current.frequencyBinCount
dataArrayRef.current = new Uint8Array(bufferLength)
// Get microphone input
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const source = audioContextRef.current!.createMediaStreamSource(stream)
source.connect(analyserRef.current!)
})
.catch(err => console.error('Error accessing microphone:', err))
}
return () => {
if (recognitionRef.current) {
recognitionRef.current.stop()
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current)
}
if (audioContextRef.current) {
audioContextRef.current.close()
}
}
}, [])
useEffect(() => {
const timer = setInterval(() => {
setTimeLeft((prevTime) => {
if (prevTime === 1) {
setIsCatTurn((prev) => !prev)
return 10
}
return prevTime - 1
})
}, 1000)
return () => clearInterval(timer)
}, [])
useEffect(() => {
if (isCatTurn && recognitionRef.current) {
recognitionRef.current.start()
recognitionRef.current.onresult = (event) => {
const transcript = event.results[event.results.length - 1][0].transcript
addMessage('cat', transcript)
recognitionRef.current?.stop()
}
} else if (recognitionRef.current) {
recognitionRef.current.stop()
}
}, [isCatTurn])
useEffect(() => {
const drawWaveform = () => {
if (!canvasRef.current || !analyserRef.current || !dataArrayRef.current) return
const canvas = canvasRef.current
const canvasCtx = canvas.getContext('2d')
if (!canvasCtx) return
const WIDTH = canvas.width
const HEIGHT = canvas.height
analyserRef.current.getByteTimeDomainData(dataArrayRef.current)
// Check volume level
const volumeLevel = getVolumeLevel(dataArrayRef.current)
if (volumeLevel > 0.05 && Date.now() - lastMeowTimeRef.current > 1000) {
addMessage('cat', 'Meow!')
lastMeowTimeRef.current = Date.now()
}
canvasCtx.fillStyle = 'rgb(200, 200, 200)'
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT)
canvasCtx.lineWidth = 2
canvasCtx.strokeStyle = 'rgb(0, 0, 0)'
canvasCtx.beginPath()
const sliceWidth = WIDTH * 1.0 / dataArrayRef.current.length
let x = 0
for (let i = 0; i < dataArrayRef.current.length; i++) {
const v = dataArrayRef.current[i] / 128.0
const y = v * HEIGHT / 2
if (i === 0) {
canvasCtx.moveTo(x, y)
} else {
canvasCtx.lineTo(x, y)
}
x += sliceWidth
}
canvasCtx.lineTo(canvas.width, canvas.height / 2)
canvasCtx.stroke()
animationFrameRef.current = requestAnimationFrame(drawWaveform)
}
drawWaveform()
return () => {
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current)
}
}
}, [])
const getVolumeLevel = (dataArray: Uint8Array) => {
let sum = 0
for (let i = 0; i < dataArray.length; i++) {
sum += Math.abs(dataArray[i] - 128)
}
return sum / dataArray.length / 128
}
const addMessage = (sender: 'human' | 'cat', content: string) => {
setMessages((prevMessages) => [...prevMessages, { sender, content }])
}
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault()
if (input.trim()) {
addMessage('human', input)
playMeow(input)
setInput('')
}
}
const playMeow = (text: string) => {
if (synthRef.current) {
const utterance = new SpeechSynthesisUtterance(text.replace(/[a-z]/gi, 'meow'))
utterance.pitch = 2
utterance.rate = 0.8
synthRef.current.speak(utterance)
}
}
return (
<Card className="w-full max-w-md mx-auto">
<CardHeader>
<CardTitle>Cat Chat</CardTitle>
</CardHeader>
<CardContent>
<div className="space-y-4">
<div className="h-64 overflow-y-auto space-y-2 p-2 border rounded">
{messages.map((message, index) => (
<div
key={index}
className={`p-2 rounded-lg ${
message.sender === 'human' ? 'bg-blue-100 ml-auto' : 'bg-green-100'
} max-w-[80%] ${message.sender === 'human' ? 'text-right' : 'text-left'}`}
>
{message.content}
</div>
))}
</div>
<div className="text-center font-bold">
{isCatTurn ? "Cat's turn" : "Human's turn"} - {timeLeft}s left
</div>
<form onSubmit={handleSubmit} className="flex gap-2">
<Input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder={isCatTurn ? "Listening for meows..." : "Type your message..."}
disabled={isCatTurn}
/>
<Button type="submit" disabled={isCatTurn}>
Send
</Button>
</form>
<div className="mt-4">
<canvas ref={canvasRef} width="320" height="60" className="w-full"></canvas>
</div>
</div>
</CardContent>
</Card>
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment