aboutsummaryrefslogtreecommitdiff
path: root/src/client/util/DictationManager.ts
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/util/DictationManager.ts')
-rw-r--r--src/client/util/DictationManager.ts39
1 files changed, 21 insertions, 18 deletions
diff --git a/src/client/util/DictationManager.ts b/src/client/util/DictationManager.ts
index bc9fe813f..a0e1413b6 100644
--- a/src/client/util/DictationManager.ts
+++ b/src/client/util/DictationManager.ts
@@ -1,7 +1,5 @@
/* eslint-disable no-use-before-define */
import * as interpreter from 'words-to-numbers';
-// @ts-ignore bcz: how are you supposed to include these definitions since dom-speech-recognition isn't a module?
-import type {} from '@types/dom-speech-recognition';
import { ClientUtils } from '../../ClientUtils';
import { Doc, Opt } from '../../fields/Doc';
import { DocData } from '../../fields/DocSymbols';
@@ -33,17 +31,19 @@ import { UndoManager } from './UndoManager';
* In addition to compile-time default commands, you can invoke DictationManager.Commands.Register(Independent|Dependent)
* to add new commands as classes or components are constructed.
*/
+
export namespace DictationManager {
/**
* Some type maneuvering to access Webkit's built-in
* speech recognizer.
*/
+
namespace CORE {
export interface IWindow extends Window {
- webkitSpeechRecognition: any;
+ webkitSpeechRecognition: { new (): SpeechRecognition };
}
}
- const { webkitSpeechRecognition }: CORE.IWindow = window as any as CORE.IWindow;
+ const { webkitSpeechRecognition }: CORE.IWindow = window as unknown as CORE.IWindow;
export const placeholder = 'Listening...';
export namespace Controls {
@@ -74,7 +74,7 @@ export namespace DictationManager {
// eslint-disable-next-line new-cap
const recognizer: Opt<SpeechRecognition> = webkitSpeechRecognition ? new webkitSpeechRecognition() : undefined;
- export type InterimResultHandler = (results: string) => any;
+ export type InterimResultHandler = (results: string) => void;
export type ContinuityArgs = { indefinite: boolean } | false;
export type DelimiterArgs = { inter: string; intra: string };
export type ListeningUIStatus = { interim: boolean } | false;
@@ -117,11 +117,11 @@ export namespace DictationManager {
}
options?.tryExecute && (await DictationManager.Commands.execute(results));
}
- } catch (e: any) {
+ } catch (e) {
console.log(e);
if (overlay) {
DictationOverlay.Instance.isListening = false;
- DictationOverlay.Instance.dictatedPhrase = results = `dictation error: ${'error' in e ? e.error : 'unknown error'}`;
+ DictationOverlay.Instance.dictatedPhrase = results = `dictation error: ${(e as { error: string }).error || 'unknown error'}`;
DictationOverlay.Instance.dictationSuccess = false;
}
} finally {
@@ -156,11 +156,11 @@ export namespace DictationManager {
recognizer.start();
return new Promise<string>(resolve => {
- recognizer.onerror = (e: any) => {
+ recognizer.onerror = e => {
// e is SpeechRecognitionError but where is that defined?
if (!(indefinite && e.error === 'no-speech')) {
recognizer.stop();
- resolve(e);
+ resolve(e.message);
}
};
@@ -230,10 +230,10 @@ export namespace DictationManager {
export namespace Commands {
export const dictationFadeDuration = 2000;
- export type IndependentAction = (target: DocumentView) => any | Promise<any>;
+ export type IndependentAction = (target: DocumentView) => void | Promise<void>;
export type IndependentEntry = { action: IndependentAction; restrictTo?: DocumentType[] };
- export type DependentAction = (target: DocumentView, matches: RegExpExecArray) => any | Promise<any>;
+ export type DependentAction = (target: DocumentView, matches: RegExpExecArray) => void | Promise<void>;
export type DependentEntry = { expression: RegExp; action: DependentAction; restrictTo?: DocumentType[] };
export const RegisterIndependent = (key: string, value: IndependentEntry) => Independent.set(key, value);
@@ -295,7 +295,6 @@ export namespace DictationManager {
[DocumentType.COL, listSpec(Doc)],
[DocumentType.AUDIO, AudioField],
[DocumentType.IMG, ImageField],
- [DocumentType.IMPORT, listSpec(Doc)],
[DocumentType.RTF, 'string'],
]);
@@ -397,8 +396,8 @@ export namespace DictationManager {
];
}
export function recordAudioAnnotation(dataDoc: Doc, field: string, onRecording?: (stop: () => void) => void, onEnd?: () => void) {
- let gumStream: any;
- let recorder: any;
+ let gumStream: MediaStream | undefined;
+ let recorder: MediaRecorder | undefined;
navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
let audioTextAnnos = Cast(dataDoc[field + '_audioAnnotations_text'], listSpec('string'), null);
if (audioTextAnnos) audioTextAnnos.push('');
@@ -415,8 +414,12 @@ export namespace DictationManager {
gumStream = stream;
recorder = new MediaRecorder(stream);
- recorder.ondataavailable = async (e: any) => {
- const [{ result }] = await Networking.UploadFilesToServer({ file: e.data });
+ recorder.ondataavailable = async (e: BlobEvent) => {
+ const file: Blob & { name?: string; lastModified?: number; webkitRelativePath?: string } = e.data;
+ file.name = '';
+ file.lastModified = 0;
+ file.webkitRelativePath = '';
+ const [{ result }] = await Networking.UploadFilesToServer({ file: file as Blob & { name: string; lastModified: number; webkitRelativePath: string } });
if (!(result instanceof Error)) {
const audioField = new AudioField(result.accessPaths.agnostic.client);
const audioAnnos = Cast(dataDoc[field + '_audioAnnotations'], listSpec(AudioField), null);
@@ -426,10 +429,10 @@ export namespace DictationManager {
};
recorder.start();
const stopFunc = () => {
- recorder.stop();
+ recorder?.stop();
DictationManager.Controls.stop(/* false */);
dataDoc.audioAnnoState = AudioAnnoState.stopped;
- gumStream.getAudioTracks()[0].stop();
+ gumStream?.getAudioTracks()[0].stop();
};
if (onRecording) onRecording(stopFunc);
else setTimeout(stopFunc, 5000);