import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; import { Tooltip } from '@mui/material'; import axios from 'axios'; import { IReactionDisposer, action, computed, makeObservable, observable, reaction, runInAction } from 'mobx'; import { observer } from 'mobx-react'; import * as React from 'react'; import ReactLoading from 'react-loading'; import { imageUrlToBase64, returnFalse, returnNone, returnTrue, returnZero, setupMoveUpEvents } from '../../../ClientUtils'; import { emptyFunction } from '../../../Utils'; import { Doc, Opt } from '../../../fields/Doc'; import { DocData } from '../../../fields/DocSymbols'; import { Id } from '../../../fields/FieldSymbols'; import { RichTextField } from '../../../fields/RichTextField'; import { DocCast, NumCast, RTFCast, StrCast, toList } from '../../../fields/Types'; import { nullAudio } from '../../../fields/URLField'; import { GPTCallType, gptAPICall, gptImageLabel } from '../../apis/gpt/GPT'; import { DocUtils, FollowLinkScript } from '../../documents/DocUtils'; import { CollectionViewType, DocumentType } from '../../documents/DocumentTypes'; import { Docs } from '../../documents/Documents'; import { DragManager } from '../../util/DragManager'; import { dropActionType } from '../../util/DropActionTypes'; import { SnappingManager } from '../../util/SnappingManager'; import { undoable } from '../../util/UndoManager'; import { ContextMenu } from '../ContextMenu'; import { ViewBoxAnnotatableComponent } from '../DocComponent'; import { PinDocView, PinProps } from '../PinFuncs'; import { StyleProp } from '../StyleProp'; import '../pdf/GPTPopup/GPTPopup.scss'; import './ComparisonBox.scss'; import { DocumentView } from './DocumentView'; import { FieldView, FieldViewProps } from './FieldView'; import { FormattedTextBox } from './formattedText/FormattedTextBox'; import { practiceMode } from '../collections/FlashcardPracticeUI'; const API_URL = 'https://api.unsplash.com/search/photos'; @observer export class ComparisonBox extends ViewBoxAnnotatableComponent() { public static LayoutString(fieldKey: string) { return FieldView.LayoutString(ComparisonBox, fieldKey); } private SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; private _closeRef = React.createRef(); private _disposers: (DragManager.DragDropDisposer | undefined)[] = [undefined, undefined]; private _reactDisposer: IReactionDisposer | undefined; constructor(props: FieldViewProps) { super(props); makeObservable(this); } @observable private _inputValue = ''; @observable private _outputValue = ''; @observable private _loading = false; @observable private _isEmpty = false; @observable private _childActive = false; @observable private _animating = ''; @observable private _listening = false; @observable private _frontSide = false; @observable recognition = new this.SpeechRecognition(); @computed get revealOpKey() { return `_${this._props.fieldKey}_revealOp`; } // prettier-ignore @computed get clipHeightKey() { return `_${this._props.fieldKey}_clipHeight`; } // prettier-ignore @computed get clipWidthKey() { return `_${this._props.fieldKey}_clipWidth`; } // prettier-ignore @computed get clipWidth() { return NumCast(this.layoutDoc[this.clipWidthKey], 50); } // prettier-ignore @computed get clipHeight() { return NumCast(this.layoutDoc[this.clipHeightKey], 200); } // prettier-ignore @computed get revealOp() { return StrCast(this.layoutDoc[this.revealOpKey], StrCast(this._props.docViewPath().slice(-2)[0]?.Document.revealOp)); } // prettier-ignore set revealOp(value:string) { this.layoutDoc[this.revealOpKey] = value; } // prettier-ignore @computed get overlayAlternateIcon() { return ( flip}>
setupMoveUpEvents(e.target, e, returnFalse, emptyFunction, () => { if (!this.revealOp || this.revealOp === 'flip') { this.flipFlashcard(); } }) } style={{ background: this.revealOp === 'hover' ? 'gray' : this._frontSide ? 'white' : 'black', color: this.revealOp === 'hover' ? 'black' : this._frontSide ? 'black' : 'white', display: 'inline-block', }}>
); } _sideBtnWidth = 35; /** * How much the content of the view is being scaled based on its nesting and its fit-to-width settings */ @computed get viewScaling() { return this.ScreenToLocalBoxXf().Scale; } // prettier-ignore /** * The maximum size a UI widget can be scaled so that it won't be bigger in screen pixels than its normal 35 pixel size. */ @computed get maxWidgetSize() { return Math.min(this._sideBtnWidth * this.viewScaling, 0.25 * Math.min(NumCast(this.Document.width), NumCast(this.Document.height))); } // prettier-ignore /** * How much to reactively scale a UI element so that it is as big as it can be (up to its normal 35pixel size) without being too big for the Doc content */ @computed get uiBtnScaling() { return Math.max(this.maxWidgetSize / this._sideBtnWidth, 1) * Math.min(1, this.viewScaling)* (this._props.NativeDimScaling?.() ?? 1); } // prettier-ignore @computed get flashcardMenu() { return SnappingManager.HideDecorations ? null : (
{this.revealOp === 'hover' || !this._props.isSelected() ? null : this.overlayAlternateIcon} {!this._props.isSelected() ? null : ( <> {!this._frontSide ? null : ( { !this._frontSide ? "Flip to front side to use GPT": "Ask GPT to create an answer on the back side of the flashcard based on your question on the front"}
// prettier-ignore }>
(this._frontSide ? this.findImageTags() : null)}>
)} {DocCast(this.Document.embedContainer)?.type_collection !== CollectionViewType.Freeform ? null : ( Create new flashcard stack based on text}>
)} )} ); } @action handleInputChange = (e: React.ChangeEvent) => { this._inputValue = e.target.value; }; @action activateContent = () => { this._childActive = true; }; @action handleRenderClick = () => { this._frontSide = !this._frontSide; }; @action handleRenderGPTClick = () => { const phonTrans = DocCast(this.Document.audio) ? DocCast(this.Document.audio).phoneticTranscription : undefined; if (phonTrans) { this._inputValue = StrCast(phonTrans); this.askGPTPhonemes(this._inputValue); } else if (this._inputValue) this.askGPT(GPTCallType.QUIZ); this._frontSide = false; this._outputValue = ''; }; onPointerMove = ({ movementX }: PointerEvent) => { const width = movementX * this.ScreenToLocalBoxXf().Scale + (this.clipWidth / 100) * this._props.PanelWidth(); if (width && width > 5 && width < this._props.PanelWidth()) { this.layoutDoc[this.clipWidthKey] = (width * 100) / this._props.PanelWidth(); } return false; }; componentDidMount() { this._props.setContentViewBox?.(this); this._reactDisposer = reaction( () => this._props.isSelected(), // when this reaction should update selected => !selected && (this._childActive = false) // what it should update to ); } componentWillUnmount() { this._reactDisposer?.(); } protected createDropTarget = (ele: HTMLDivElement | null, fieldKey: string, disposerId: number) => { this._disposers[disposerId]?.(); if (ele) { this._disposers[disposerId] = DragManager.MakeDropTarget(ele, (e, dropEvent) => this.internalDrop(e, dropEvent, fieldKey), this.layoutDoc); } }; private internalDrop = undoable((e: Event, dropEvent: DragManager.DropEvent, fieldKey: string) => { if (dropEvent.complete.docDragData) { const { droppedDocuments } = dropEvent.complete.docDragData; const added = dropEvent.complete.docDragData.moveDocument?.(droppedDocuments, this.Document, (doc: Doc | Doc[]) => this.addDoc(toList(doc).lastElement(), fieldKey)); Doc.SetContainer(droppedDocuments.lastElement(), this.dataDoc); !added && e.preventDefault(); e.stopPropagation(); // prevent parent Doc from registering new position so that it snaps back into place // this.childActive = false; return added; } return undefined; }, 'internal drop'); getAnchor = (addAsAnnotation: boolean, pinProps?: PinProps) => { const anchor = Docs.Create.ConfigDocument({ title: 'CompareAnchor:' + this.Document.title, // set presentation timing properties for restoring view presentation_transition: 1000, annotationOn: this.Document, }); if (anchor) { if (!addAsAnnotation) anchor.backgroundColor = 'transparent'; /* addAsAnnotation && */ this.addDocument(anchor); PinDocView(anchor, { pinDocLayout: pinProps?.pinDocLayout, pinData: { ...(pinProps?.pinData ?? {}), clippable: true } }, this.Document); return anchor; } return this.Document; }; clearDoc = undoable((fieldKey: string) => { this.dataDoc[fieldKey] = undefined; }, 'clear doc'); moveDoc = (doc: Doc, addDocument: (document: Doc | Doc[]) => boolean, which: string) => this.remDoc(doc, which) && addDocument(doc); addDoc = (doc: Doc, which: string) => { if (this.dataDoc[which] && !this._isEmpty) return false; this.dataDoc[which] = doc; return true; }; remDoc = (doc: Doc, which: string) => { if (this.dataDoc[which] === doc) { this._isEmpty = true; this.dataDoc[which] = undefined; return true; } return false; }; closeDown = (e: React.PointerEvent, which: string) => { setupMoveUpEvents( this, e, moveEv => { const de = new DragManager.DocumentDragData([DocCast(this.dataDoc[which])], dropActionType.move); de.moveDocument = (doc: Doc | Doc[], targetCollection: Doc | undefined, addDocument: (doc: Doc | Doc[]) => boolean): boolean => addDocument(doc); de.canEmbed = true; DragManager.StartDocumentDrag([this._closeRef.current!], de, moveEv.clientX, moveEv.clientY); return true; }, emptyFunction, () => this.clearDoc(which) ); }; docStyleProvider = (doc: Opt, props: Opt, property: string) => { switch (property) { case StyleProp.PointerEvents: return 'none'; default: return this._props.styleProvider?.(doc, props, property); } // prettier-ignore }; moveDoc1 = (docs: Doc | Doc[], targetCol: Doc | undefined, addDoc: (doc: Doc | Doc[]) => boolean) => toList(docs).reduce((res, doc: Doc) => res && this.moveDoc(doc, addDoc, this.fieldKey + '_1'), true); moveDoc2 = (docs: Doc | Doc[], targetCol: Doc | undefined, addDoc: (doc: Doc | Doc[]) => boolean) => toList(docs).reduce((res, doc: Doc) => res && this.moveDoc(doc, addDoc, this.fieldKey + '_2'), true); remDoc1 = (docs: Doc | Doc[]) => toList(docs).reduce((res, doc) => res && this.remDoc(doc, this.fieldKey + '_1'), true); remDoc2 = (docs: Doc | Doc[]) => toList(docs).reduce((res, doc) => res && this.remDoc(doc, this.fieldKey + '_2'), true); registerSliding = (e: React.PointerEvent, targetWidth: number) => { if (e.button !== 2) { setupMoveUpEvents( this, e, this.onPointerMove, emptyFunction, action((moveEv, doubleTap) => { if (doubleTap) { this._childActive = true; if (!this.dataDoc[this.fieldKey + '_1'] && !this.dataDoc[this.fieldKey]) this.dataDoc[this.fieldKey + '_1'] = DocUtils.copyDragFactory(Doc.UserDoc().emptyNote as Doc); if (!this.dataDoc[this.fieldKey + '_2'] && !this.dataDoc[this.fieldKey + '_alternate']) this.dataDoc[this.fieldKey + '_2'] = DocUtils.copyDragFactory(Doc.UserDoc().emptyNote as Doc); } }), false, undefined, action(() => { if (this._childActive) return; this._animating = 'all 200ms'; // on click, animate slider movement to the targetWidth this.layoutDoc[this.clipWidthKey] = (targetWidth * 100) / this._props.PanelWidth(); setTimeout( action(() => { this._animating = ''; }), 200 ); }) ); } }; /** * Set up speech to text tool. */ setListening = () => { if (this.SpeechRecognition) { this.recognition.continuous = true; this.recognition.interimResults = true; this.recognition.lang = 'en-US'; this.recognition.onresult = this.handleResult.bind(this); } ContextMenu.Instance.setLangIndex(0); }; startListening = () => { this.recognition.start(); this._listening = true; }; stopListening = () => { this.recognition.stop(); this._listening = false; }; setLanguage = (language: string, ind: number) => { this.recognition.lang = language; ContextMenu.Instance.setLangIndex(ind); }; /** * Determine which language the speech to text tool is in. * @returns */ convertAbr = () => { switch (this.recognition.lang) { case 'en-US': return 'English'; //prettier-ignore case 'es-ES': return 'Spanish'; //prettier-ignore case 'fr-FR': return 'French'; //prettier-ignore case 'it-IT': return 'Italian'; //prettier-ignore case 'zh-CH': return 'Mandarin Chinese'; //prettier-ignore case 'ja': return 'Japanese'; //prettier-ignore default: return 'Korean'; //prettier-ignore } }; openContextMenu = (x: number, y: number, evalu: boolean) => { ContextMenu.Instance.clearItems(); ContextMenu.Instance.addItem({ description: 'English', event: () => this.setLanguage('en-US', 0), icon: 'question' }); //prettier-ignore ContextMenu.Instance.addItem({ description: 'Spanish', event: () => this.setLanguage('es-ES', 1 ), icon: 'question'}); //prettier-ignore ContextMenu.Instance.addItem({ description: 'French', event: () => this.setLanguage('fr-FR', 2), icon: 'question' }); //prettier-ignore ContextMenu.Instance.addItem({ description: 'Italian', event: () => this.setLanguage('it-IT', 3), icon: 'question' }); //prettier-ignore if (!evalu) ContextMenu.Instance.addItem({ description: 'Mandarin Chinese', event: () => this.setLanguage('zh-CH', 4), icon: 'question' }); //prettier-ignore ContextMenu.Instance.addItem({ description: 'Japanese', event: () => this.setLanguage('ja', 5), icon: 'question' }); //prettier-ignore ContextMenu.Instance.addItem({ description: 'Korean', event: () => this.setLanguage('ko', 6), icon: 'question' }); //prettier-ignore ContextMenu.Instance.displayMenu(x, y); }; /** * Creates an AudioBox to record a user's audio. */ evaluatePronunciation = () => { const newAudio = Docs.Create.AudioDocument(nullAudio, { _width: 200, _height: 100 }); this.Document.audio = newAudio[DocData]; this._props.DocumentView?.()._props.addDocument?.(newAudio); }; /** * Gets the transcription of an audio recording by sending the * recording to backend. */ pushInfo = async () => { const audio = { file: DocCast(this.Document.audio)[DocData].url, }; const response = await axios.post('http://localhost:105/recognize/', audio, { headers: { 'Content-Type': 'application/json', }, }); this.Document.phoneticTranscription = response.data.transcription; }; /** * Extracts the id of the youtube video url. * @param url * @returns */ getYouTubeVideoId = (url: string) => { const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=|\?v=)([^#&?]*).*/; const match = url.match(regExp); return match && match[2].length === 11 ? match[2] : null; }; /** * Gets the transcript of a youtube video by sending the video url to the backend. * @returns transcription of youtube recording */ youtubeUpload = async () => { const audio = { file: this.getYouTubeVideoId(StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_1']).text)?.Text)), }; const response = await axios.post('http://localhost:105/youtube/', audio, { headers: { 'Content-Type': 'application/json', }, }); return response.data.transcription; }; createFlashcardPile(collectionArr: Doc[], gpt: boolean) { const newCol = Docs.Create.CarouselDocument(collectionArr, { _width: NumCast(this.layoutDoc['_' + this._props.fieldKey + '_width'], 250) + 50, _height: NumCast(this.layoutDoc['_' + this._props.fieldKey + '_width'], 200) + 50, _layout_fitWidth: false, _layout_autoHeight: true, _xMargin: 5, _yMargin: 5, }); newCol.x = this.layoutDoc.x; newCol.y = NumCast(this.layoutDoc.y) + 50; newCol.type_collection = CollectionViewType.Carousel as string; for (let i = 0; i < collectionArr.length; i++) { DocCast(collectionArr[i])[DocData].embedContainer = newCol; } if (gpt) { this._props.DocumentView?.()._props.addDocument?.(newCol); this._props.removeDocument?.(this.Document); } else { this._props.addDocument?.(newCol); this._props.removeDocument?.(this.Document); this.Document.embedContainer = newCol; } } /** * Transfers the content of flashcards into a flashcard pile. */ gptFlashcardPile = async () => { const text = await this.askGPT(GPTCallType.STACK); const senArr = text?.split('Question: ') ?? []; const collectionArr: Doc[] = []; for (let i = 1; i < senArr.length; i++) { const newDoc = Docs.Create.ComparisonDocument(senArr[i], { _layout_isFlashcard: true, _width: 300, _height: 300 }); if (senArr[i].includes('Keyword: ')) { const question = StrCast(senArr![i]).split('Keyword: '); const questionTxt = question[0].includes('Answer: ') ? question[0].split('Answer: ')[0] : question[0]; const answerTxt = question[0].includes('Answer: ') ? question[0].split('Answer: ')[1] : question[1]; this.fetchImages(question[1]).then(img => { const rtfiel = new RichTextField( JSON.stringify({ // this is a RichText json that has the question text placed above a related image doc: { type: 'doc', content: [ { type: 'paragraph', attrs: { align: null, color: null, id: null, indent: null, inset: null, lineSpacing: null, paddingBottom: null, paddingTop: null }, content: [{ type: 'text', text: questionTxt }, img ? { type: 'dashDoc', attrs: { width: '200px', height: '200px', title: 'dashDoc', float: 'unset', hidden: false, docId: img[Id] } } : {}], }, ], }, selection: { type: 'text', anchor: 2, head: 2 }, }), questionTxt ); newDoc[DocData][this.fieldKey + '_1'] = Docs.Create.TextDocument(questionTxt, { text: rtfiel }); newDoc[DocData][this.fieldKey + '_0'] = Docs.Create.TextDocument(answerTxt); }); } collectionArr.push(newDoc); } this.createFlashcardPile(collectionArr, true); }; /** * Calls GPT for each flashcard type. */ askGPT = async (callType: GPTCallType): Promise => { const questionText = 'Question: ' + StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_1']).text)?.Text); // const rubricText = ' Rubric: ' + StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_0']).text)?.Text); // const queryText = questionText + ' UserAnswer: ' + this._inputValue + '. ' + rubricText; this._loading = true; if (callType == GPTCallType.CHATCARD) { if (StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_1']).text)?.Text) === '') { this._loading = false; return; } } try { const res = await gptAPICall(questionText, GPTCallType.FLASHCARD); runInAction(() => { if (!res) { console.error('GPT call failed'); return; } if (callType == GPTCallType.CHATCARD) { DocCast(this.dataDoc[this.props.fieldKey + '_0'])[DocData].text = res; } else if (callType == GPTCallType.QUIZ) { this._frontSide = true; this._outputValue = res.replace(/UserAnswer/g, "user's answer").replace(/Rubric/g, 'rubric'); } else if (callType === GPTCallType.FLASHCARD) { this._loading = false; return res; } this._loading = false; }); return res; } catch (err) { console.error('GPT call failed', err); } this._loading = false; }; layoutWidth = () => NumCast(this.layoutDoc.width, 200); layoutHeight = () => NumCast(this.layoutDoc.height, 200); findImageTags = async () => { const c = this.DocumentView?.().ContentDiv?.getElementsByTagName('img'); if (c?.length === 0) this.askGPT(GPTCallType.CHATCARD); if (c) { this._loading = true; for (const i of c) { if (i.className !== 'ProseMirror-separator') this.getImageDesc(i.src); } this._loading = false; } }; /** * Ask GPT for advice on how to improve speech by comparing the phonetic transcription of * a users audio recording with the phonetic transcription of their intended sentence. * @param phonemes */ askGPTPhonemes = async (phonemes: string) => { const sentence = StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_1']).text)?.Text); const phon6 = 'huː ɑɹ juː tədeɪ'; const phon4 = 'kamo estas hɔi'; const promptEng = 'Consider all possible phonetic transcriptions of the intended sentence "' + sentence + '" that is standard in American speech without showing the user. Compare each word in the following phonemes with those phonetic transcriptions without displaying anything to the user: "' + phon6 + '". Steps to do this: Align the words with each word in the intended sentence by combining the phonemes to get a pronunciation that resembles the word in order. Do not describe phonetic corrections with the phonetic alphabet - describe it by providing other examples of how it should sound. Note if a word or sound missing, including missing vowels and consonants. If there is an additional word that does not match with the provided sentence, say so. For each word, if any letters mismatch and would sound weird in American speech and they are not allophones of the same phoneme and they are far away from each on the ipa vowel chat and that pronunciation is not normal for the meaning of the word, note this difference and explain how it is supposed to sound. Only note the difference if they are not allophones of the same phoneme and if they are far away on the vowel chart. The goal is to be understood, not sound like a native speaker. Just so you know, "i" sounds like "ee" as in "bee", not "ih" as an "lick". Interpret "ɹ" as the same as "r". Interpret "ʌ" as the same as "ə". If "ɚ", "ɔː", and "ɔ" are options for pronunciation, do not choose "ɚ". Ignore differences with colons. Ignore redundant letters and words and sounds and the splitting of words; do not mention this since there could be repeated words in the sentence. Provide a response like this: "Lets work on improving the pronunciation of "coffee." You said "ceeffee," which is close, but we need to adjust the vowel sound. In American English, "coffee" is pronounced /ˈkɔːfi/, with a long "aw" sound. Try saying "kah-fee." Your intonation is good, but try putting a bit more stress on "like" in the sentence "I would like a coffee with milk." This will make your speech sound more natural. Keep practicing, and lets try saying the whole sentence again!"'; const promptSpa = 'Consider all possible phonetic transcriptions of the intended sentence "' + 'como estás hoy' + '" that is standard in Spanish speech without showing the user. Compare each word in the following phonemes with those phonetic transcriptions without displaying anything to the user: "' + phon4 + '". Steps to do this: Align the words with each word in the intended sentence by combining the phonemes to get a pronunciation that resembles the word in order. Do not describe phonetic corrections with the phonetic alphabet - describe it by providing other examples of how it should sound. Note if a word or sound missing, including missing vowels and consonants. If there is an additional word that does not match with the provided sentence, say so. For each word, if any letters mismatch and would sound weird in Spanish speech and they are not allophones of the same phoneme and they are far away from each on the ipa vowel chat and that pronunciation is not normal for the meaning of the word, note this difference and explain how it is supposed to sound. Only note the difference if they are not allophones of the same phoneme and if they are far away on the vowel chart; say good job if it would be understood by a native Spanish speaker. Just so you know, "i" sounds like "ee" as in "bee", not "ih" as an "lick". Interpret "ɹ" as the same as "r". Interpret "ʌ" as the same as "ə". Do not make "θ" and "f" interchangable. Do not make "n" and "ɲ" interchangable. Do not make "e" and "i" interchangable. If "ɚ", "ɔː", and "ɔ" are options for pronunciation, do not choose "ɚ". Ignore differences with colons. Ignore redundant letters and words and sounds and the splitting of words; do not mention this since there could be repeated words in the sentence. Identify "ɔi" sounds like "oy". Ignore accents and do not say anything to the user about this.'; const promptAll = 'Consider all possible phonetic transcriptions of the intended sentence "' + sentence + '" that is standard in ' + this.convertAbr() + ' speech without showing the user. Compare each word in the following phonemes with those phonetic transcriptions without displaying anything to the user: "' + phonemes + '". Steps to do this: Align the words with each word in the intended sentence by combining the phonemes to get a pronunciation that resembles the word in order. Do not describe phonetic corrections with the phonetic alphabet - describe it by providing other examples of how it should sound. Note if a word or sound missing, including missing vowels and consonants. If there is an additional word that does not match with the provided sentence, say so. For each word, if any letters mismatch and would sound weird in ' + this.convertAbr() + ' speech and they are not allophones of the same phoneme and they are far away from each on the ipa vowel chat and that pronunciation is not normal for the meaning of the word, note this difference and explain how it is supposed to sound. Just so you know, "i" sounds like "ee" as in "bee", not "ih" as an "lick". Interpret "ɹ" as the same as "r". Interpret "ʌ" as the same as "ə". Do not make "θ" and "f" interchangable. Do not make "n" and "ɲ" interchangable. Do not make "e" and "i" interchangable. If "ɚ", "ɔː", and "ɔ" are options for pronunciation, do not choose "ɚ". Ignore differences with colons. Ignore redundant letters and words and sounds and the splitting of words; do not mention this since there could be repeated words in the sentence. Provide a response like this: "Lets work on improving the pronunciation of "coffee." You said "cawffee," which is close, but we need to adjust the vowel sound. In American English, "coffee" is pronounced /ˈkɔːfi/, with a long "aw" sound. Try saying "kah-fee." Your intonation is good, but try putting a bit more stress on "like" in the sentence "I would like a coffee with milk." This will make your speech sound more natural. Keep practicing, and lets try saying the whole sentence again!"'; switch (this.recognition.lang) { case 'en-US': this._outputValue = await gptAPICall(promptEng, GPTCallType.PRONUNCIATION); break; case 'es-ES': this._outputValue = await gptAPICall(promptSpa, GPTCallType.PRONUNCIATION); break; default: this._outputValue = await gptAPICall(promptAll, GPTCallType.PRONUNCIATION); break; } }; /** * Display a user's speech to text result. * @param e */ handleResult = (e: SpeechRecognitionEvent) => { let finalTranscript = ''; for (let i = e.resultIndex; i < e.results.length; i++) { const transcript = e.results[i][0].transcript; if (e.results[i].isFinal) { finalTranscript += transcript; } } this._inputValue += finalTranscript; }; /** * Get images from unsplash api and place that will be placed inside generated flashcard. * @param selection * @returns Image Document */ fetchImages = async (selection: string) => { try { const { data } = await axios.get(`${API_URL}?query=${selection}&page=1&per_page=${1}&client_id=Q4zruu6k6lum2kExiGhLNBJIgXDxD6NNj0SRHH_XXU0`); const imageSnapshot = Docs.Create.ImageDocument(data.results[0].urls.small, { _nativeWidth: Doc.NativeWidth(this.layoutDoc), _nativeHeight: Doc.NativeHeight(this.layoutDoc), x: NumCast(this.layoutDoc.x), y: NumCast(this.layoutDoc.y), onClick: FollowLinkScript(), _width: 150, _height: 150, title: '--snapshot' + NumCast(this.layoutDoc._layout_currentTimecode) + ' image-', }); imageSnapshot.x = this.layoutDoc.x; imageSnapshot.y = this.layoutDoc.y; return imageSnapshot; } catch (error) { console.log(error); } }; getImageDesc = async (u: string) => { try { const hrefBase64 = await imageUrlToBase64(u); const response = await gptImageLabel(hrefBase64, 'Answer the following question as a short flashcard response. Do not include a label.' + (this.dataDoc.text as RichTextField)?.Text); DocCast(this.dataDoc[this.props.fieldKey + '_0'])[DocData].text = response; } catch (error) { console.log('Error', error); } }; @action flipFlashcard = () => { this._frontSide = !this._frontSide; }; hoverFlip = (side: boolean) => { if (this.revealOp === 'hover') this._frontSide = side; }; testForTextFields = (whichSlot: string) => { const slotData = Doc.Get(this.dataDoc, whichSlot, true); const slotHasText = slotData instanceof RichTextField || typeof slotData === 'string'; const subjectText = RTFCast(this.Document[this.fieldKey])?.Text.trim(); const altText = RTFCast(this.Document[this.fieldKey + '_alternate'])?.Text.trim(); const layoutTemplateString = slotHasText ? FormattedTextBox.LayoutString(whichSlot): whichSlot.endsWith('1') ? (subjectText !== undefined ? FormattedTextBox.LayoutString(this.fieldKey) : undefined) : altText !== undefined ? FormattedTextBox.LayoutString(this.fieldKey + '_alternate'): undefined; // prettier-ignore // A bit hacky to try out the concept of using GPT to fill in flashcards // If the second slot doesn't have anything in it, but the fieldKey slot has text (e.g., this.text is a string) // and the fieldKey + "_alternate" has text that includes a GPT query (indicated by (( && )) ) that is parameterized (optionally) by the fieldKey text (this) or other metadata (this.). // eg., this.text_alternate is // "((Provide a one sentence definition for (this) that doesn't use any word in (this.excludeWords) ))" // where (this) is replaced by the text in the fieldKey slot abd this.excludeWords is repalced by the conetnts of the excludeWords field // The GPT call will put the "answer" in the second slot of the comparison (eg., text_2) if (whichSlot.endsWith('2') && !layoutTemplateString?.includes(whichSlot)) { const queryText = altText?.replace('(this)', subjectText); // TODO: this should be done in Doc.setField but it doesn't know about the fieldKey ... if (queryText?.match(/\(\(.*\)\)/)) { Doc.SetField(this.Document, whichSlot, ':=' + queryText, false); // make the second slot be a computed field on the data doc that calls ChatGpt } } return layoutTemplateString; }; childActiveFunc = () => this._childActive; contentScreenToLocalXf = () => this._props.ScreenToLocalTransform().scale(this._props.NativeDimScaling?.() || 1); render() { const clearButton = (which: string) => ( remove}>
this.closeDown(e, which)} // prevent triggering slider movement in registerSliding >
); const displayDoc = (whichSlot: string) => { const whichDoc = DocCast(this.dataDoc[whichSlot]); const targetDoc = DocCast(whichDoc?.annotationOn, whichDoc); const layoutString = targetDoc ? '' : this.testForTextFields(whichSlot); return targetDoc || layoutString ? ( <> {!this.Document._layout_isFlashcard ? clearButton(whichSlot) : null} ) : (
); }; const displayBox = (which: string, index: number, cover: number) => (
{ this.registerSliding(e, cover); this.Document._layout_isFlashcard && this.activateContent(); }} ref={ele => this.createDropTarget(ele, which, index)}> {!this._isEmpty ? displayDoc(which) : null}
); if (this.Document._layout_isFlashcard) { const side = this._frontSide ? 1 : 0; const dataSplit = StrCast(this.dataDoc.data).includes('Keyword: ') ? StrCast(this.dataDoc.data).split('Keyword: ') : StrCast(this.dataDoc.data).split('Answer: '); const textCreator = (which: number, title: string, text: string) => { const newDoc = Docs.Create.TextDocument(text, { title, // _layout_autoHeight: true, _layout_centered: true, text_align: 'center', _layout_fitWidth: true, }); this.addDoc(newDoc, this.fieldKey + '_' + which); return newDoc; }; // add text box to each side when comparison box is first created if (!this.dataDoc[this.fieldKey + '_0'] && !this._isEmpty) { textCreator(0, 'answer', dataSplit[1]); } if (!this.dataDoc[this.fieldKey + '_1'] && !this._isEmpty) { const question = textCreator(1, 'question', dataSplit[0] || 'hint: Enter a topic, select this document and click the stack button to have GPT create a deck of cards'); Doc.SelectOnLoad = dataSplit[0] ? undefined : question; } if (DocCast(this.Document.embedContainer).practiceMode === practiceMode.QUIZ) { const text = StrCast(RTFCast(DocCast(this.dataDoc[this.fieldKey + '_1']).text)?.Text); return (

{text}

Return to all flashcards and add text to both sides.

{this._loading ? (
) : null}
this.openContextMenu(e.clientX, e.clientY, false)}>
this.openContextMenu(e.clientX, e.clientY, true)} style={{ left: '50px', zIndex: '100' }}>
); } // render a normal flashcard when not a QuizCard return (
this.hoverFlip(true)} onMouseLeave={() => this.hoverFlip(false)}> {displayBox(`${this.fieldKey}_${side === 0 ? 1 : 0}`, side, this._props.PanelWidth() - 3)} {this._loading ? (
) : null} {this.flashcardMenu}
); } // render a comparison box that compares items side by side return (
{displayBox(`${this.fieldKey}_2`, 1, this._props.PanelWidth() - 3)}
{displayBox(`${this.fieldKey}_1`, 0, 0)}
(this._props.PanelWidth() - 5) / this._props.PanelWidth() ? 'w-resize' : undefined, }} onPointerDown={e => !this._isAnyChildContentActive && this.registerSliding(e, this._props.PanelWidth() / 2)} /* if clicked, return slide-bar to center */ >
); } } Docs.Prototypes.TemplateMap.set(DocumentType.COMPARISON, { layout: { view: ComparisonBox, dataField: 'data' }, options: { acl: '', backgroundColor: 'gray', dropAction: dropActionType.move, waitForDoubleClickToClick: 'always', _layout_reflowHorizontal: true, _layout_reflowVertical: true, _layout_nativeDimEditable: true, systemIcon: 'BsLayoutSplit', }, });