Last active
February 7, 2025 09:42
The `useSpeechRecognition` hook is a reusable and generic React Hook that leverages the Web Speech API for real-time speech-to-text transcription in React applications.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
const { text, isListening, start, stop } = useSpeechRecognition({ | |
lang: "hi-IN", // Use Hindi as the language | |
continuous: true, | |
interimResults: true, | |
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import { useState, useRef, useEffect } from "react"; | |
const useSpeechRecognition = (options = {}) => { | |
const { | |
lang = "en-US", // Default language | |
continuous = true, | |
interimResults = true, | |
} = options; | |
const [text, setText] = useState(""); | |
const [isListening, setIsListening] = useState(false); | |
const recognitionRef = useRef(null); | |
useEffect(() => { | |
if (!("webkitSpeechRecognition" in window || "SpeechRecognition" in window)) { | |
console.warn("Web Speech API is not supported in this browser."); | |
return; | |
} | |
// Initialize the SpeechRecognition instance | |
const SpeechRecognition = | |
window.SpeechRecognition || window.webkitSpeechRecognition; | |
const recognition = new SpeechRecognition(); | |
recognition.lang = lang; | |
recognition.continuous = continuous; | |
recognition.interimResults = interimResults; | |
// Event handler for capturing speech results | |
recognition.onresult = (event) => { | |
let transcript = ""; | |
for (let i = event.resultIndex; i < event.results.length; i++) { | |
transcript += event.results[i][0].transcript; | |
} | |
setText((prev) => (continuous ? transcript.trim() : `${prev} ${transcript.trim()}`)); | |
}; | |
recognition.onerror = (event) => { | |
console.error("Speech recognition error:", event.error); | |
}; | |
recognitionRef.current = recognition; | |
return () => { | |
recognition.stop(); | |
}; | |
}, [lang, continuous, interimResults]); | |
// Start speech recognition | |
const start = () => { | |
if (recognitionRef.current && !isListening) { | |
recognitionRef.current.start(); | |
setIsListening(true); | |
} | |
}; | |
// Stop speech recognition | |
const stop = () => { | |
if (recognitionRef.current && isListening) { | |
recognitionRef.current.stop(); | |
setIsListening(false); | |
} | |
}; | |
return { | |
text, | |
isListening, | |
start, | |
stop, | |
}; | |
}; | |
export default useSpeechRecognition; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment