matt HOFFNER
commited on
Commit
·
862ccf9
1
Parent(s):
eab5884
add isComplete value to hook for easier access to autosending messages once complete
Browse files- app/hooks/useTranscriber.ts +7 -0
- app/input.tsx +32 -6
app/hooks/useTranscriber.ts
CHANGED
|
@@ -29,6 +29,7 @@ interface TranscriberCompleteData {
|
|
| 29 |
}
|
| 30 |
|
| 31 |
export interface TranscriberData {
|
|
|
|
| 32 |
isBusy: boolean;
|
| 33 |
text: string;
|
| 34 |
chunks: { text: string; timestamp: [number, number | null] }[];
|
|
@@ -37,6 +38,7 @@ export interface TranscriberData {
|
|
| 37 |
export interface Transcriber {
|
| 38 |
onInputChange: () => void;
|
| 39 |
isBusy: boolean;
|
|
|
|
| 40 |
isModelLoading: boolean;
|
| 41 |
progressItems: ProgressItem[];
|
| 42 |
start: (audioData: AudioBuffer | undefined) => void;
|
|
@@ -58,6 +60,7 @@ export function useTranscriber(): Transcriber {
|
|
| 58 |
undefined,
|
| 59 |
);
|
| 60 |
const [isBusy, setIsBusy] = useState(false);
|
|
|
|
| 61 |
const [isModelLoading, setIsModelLoading] = useState(false);
|
| 62 |
|
| 63 |
const [progressItems, setProgressItems] = useState<ProgressItem[]>([]);
|
|
@@ -99,6 +102,7 @@ export function useTranscriber(): Transcriber {
|
|
| 99 |
chunks: completeMessage.data.chunks,
|
| 100 |
});
|
| 101 |
setIsBusy(false);
|
|
|
|
| 102 |
break;
|
| 103 |
|
| 104 |
case "initiate":
|
|
@@ -149,6 +153,7 @@ export function useTranscriber(): Transcriber {
|
|
| 149 |
if (audioData) {
|
| 150 |
setTranscript(undefined);
|
| 151 |
setIsBusy(true);
|
|
|
|
| 152 |
|
| 153 |
let audio;
|
| 154 |
if (audioData.numberOfChannels === 2) {
|
|
@@ -184,6 +189,7 @@ export function useTranscriber(): Transcriber {
|
|
| 184 |
return {
|
| 185 |
onInputChange,
|
| 186 |
isBusy,
|
|
|
|
| 187 |
isModelLoading,
|
| 188 |
progressItems,
|
| 189 |
start: postRequest,
|
|
@@ -202,6 +208,7 @@ export function useTranscriber(): Transcriber {
|
|
| 202 |
}, [
|
| 203 |
onInputChange,
|
| 204 |
isBusy,
|
|
|
|
| 205 |
isModelLoading,
|
| 206 |
progressItems,
|
| 207 |
postRequest,
|
|
|
|
| 29 |
}
|
| 30 |
|
| 31 |
export interface TranscriberData {
|
| 32 |
+
isComplete?: boolean;
|
| 33 |
isBusy: boolean;
|
| 34 |
text: string;
|
| 35 |
chunks: { text: string; timestamp: [number, number | null] }[];
|
|
|
|
| 38 |
export interface Transcriber {
|
| 39 |
onInputChange: () => void;
|
| 40 |
isBusy: boolean;
|
| 41 |
+
isComplete: boolean;
|
| 42 |
isModelLoading: boolean;
|
| 43 |
progressItems: ProgressItem[];
|
| 44 |
start: (audioData: AudioBuffer | undefined) => void;
|
|
|
|
| 60 |
undefined,
|
| 61 |
);
|
| 62 |
const [isBusy, setIsBusy] = useState(false);
|
| 63 |
+
const [isComplete, setIsComplete] = useState(false);
|
| 64 |
const [isModelLoading, setIsModelLoading] = useState(false);
|
| 65 |
|
| 66 |
const [progressItems, setProgressItems] = useState<ProgressItem[]>([]);
|
|
|
|
| 102 |
chunks: completeMessage.data.chunks,
|
| 103 |
});
|
| 104 |
setIsBusy(false);
|
| 105 |
+
setIsComplete(true);
|
| 106 |
break;
|
| 107 |
|
| 108 |
case "initiate":
|
|
|
|
| 153 |
if (audioData) {
|
| 154 |
setTranscript(undefined);
|
| 155 |
setIsBusy(true);
|
| 156 |
+
setIsComplete(false);
|
| 157 |
|
| 158 |
let audio;
|
| 159 |
if (audioData.numberOfChannels === 2) {
|
|
|
|
| 189 |
return {
|
| 190 |
onInputChange,
|
| 191 |
isBusy,
|
| 192 |
+
isComplete,
|
| 193 |
isModelLoading,
|
| 194 |
progressItems,
|
| 195 |
start: postRequest,
|
|
|
|
| 208 |
}, [
|
| 209 |
onInputChange,
|
| 210 |
isBusy,
|
| 211 |
+
isComplete,
|
| 212 |
isModelLoading,
|
| 213 |
progressItems,
|
| 214 |
postRequest,
|
app/input.tsx
CHANGED
|
@@ -50,10 +50,13 @@ const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, se
|
|
| 50 |
const [recognizedText, setRecognizedText] = useState('');
|
| 51 |
const transcriber = useTranscriber();
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
const startListening = useCallback((audioData: any) => {
|
| 54 |
-
|
| 55 |
-
transcriber.start(audioData);
|
| 56 |
-
}
|
| 57 |
}, [transcriber]);
|
| 58 |
|
| 59 |
useEffect(() => {
|
|
@@ -62,6 +65,30 @@ const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, se
|
|
| 62 |
}
|
| 63 |
}, [transcriber.output, transcriber.isBusy]);
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
useEffect(() => {
|
| 66 |
if (recognizedText) {
|
| 67 |
setInput(recognizedText);
|
|
@@ -114,8 +141,6 @@ const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, se
|
|
| 114 |
|
| 115 |
let startTime = Date.now();
|
| 116 |
|
| 117 |
-
vad.start();
|
| 118 |
-
|
| 119 |
try {
|
| 120 |
if (!streamRef.current) {
|
| 121 |
streamRef.current = await navigator.mediaDevices.getUserMedia({
|
|
@@ -177,6 +202,7 @@ const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, se
|
|
| 177 |
}, [recording]);
|
| 178 |
|
| 179 |
const handleToggleRecording = () => {
|
|
|
|
| 180 |
if (recording) {
|
| 181 |
stopRecording();
|
| 182 |
} else {
|
|
@@ -201,7 +227,7 @@ const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, se
|
|
| 201 |
))}
|
| 202 |
</div>
|
| 203 |
)}
|
| 204 |
-
<form onSubmit={
|
| 205 |
<input
|
| 206 |
type="text"
|
| 207 |
value={input}
|
|
|
|
| 50 |
const [recognizedText, setRecognizedText] = useState('');
|
| 51 |
const transcriber = useTranscriber();
|
| 52 |
|
| 53 |
+
const onFormSubmit = (e: React.FormEvent<HTMLFormElement>) => {
|
| 54 |
+
e.preventDefault();
|
| 55 |
+
handleSubmit(input); // Assuming handleSubmit now takes the input as an argument
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
const startListening = useCallback((audioData: any) => {
|
| 59 |
+
transcriber.start(audioData);
|
|
|
|
|
|
|
| 60 |
}, [transcriber]);
|
| 61 |
|
| 62 |
useEffect(() => {
|
|
|
|
| 65 |
}
|
| 66 |
}, [transcriber.output, transcriber.isBusy]);
|
| 67 |
|
| 68 |
+
const handleTranscriptionComplete = () => {
|
| 69 |
+
// Create a synthetic event object
|
| 70 |
+
const syntheticEvent = {
|
| 71 |
+
preventDefault: () => {},
|
| 72 |
+
target: {
|
| 73 |
+
// Mimic the structure of your form's event.target here
|
| 74 |
+
elements: {
|
| 75 |
+
// Assuming the form has an input field named 'input'
|
| 76 |
+
input: {
|
| 77 |
+
value: recognizedText
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
handleSubmit(syntheticEvent);
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
useEffect(() => {
|
| 87 |
+
if (transcriber.isComplete) {
|
| 88 |
+
handleTranscriptionComplete();
|
| 89 |
+
}
|
| 90 |
+
}, [transcriber.isComplete]);
|
| 91 |
+
|
| 92 |
useEffect(() => {
|
| 93 |
if (recognizedText) {
|
| 94 |
setInput(recognizedText);
|
|
|
|
| 141 |
|
| 142 |
let startTime = Date.now();
|
| 143 |
|
|
|
|
|
|
|
| 144 |
try {
|
| 145 |
if (!streamRef.current) {
|
| 146 |
streamRef.current = await navigator.mediaDevices.getUserMedia({
|
|
|
|
| 202 |
}, [recording]);
|
| 203 |
|
| 204 |
const handleToggleRecording = () => {
|
| 205 |
+
vad.start();
|
| 206 |
if (recording) {
|
| 207 |
stopRecording();
|
| 208 |
} else {
|
|
|
|
| 227 |
))}
|
| 228 |
</div>
|
| 229 |
)}
|
| 230 |
+
<form onSubmit={onFormSubmit} className={styles.form}>
|
| 231 |
<input
|
| 232 |
type="text"
|
| 233 |
value={input}
|