aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/nodes/audio/AudioWaveform.tsx
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/views/nodes/audio/AudioWaveform.tsx')
-rw-r--r--src/client/views/nodes/audio/AudioWaveform.tsx127
1 files changed, 127 insertions, 0 deletions
diff --git a/src/client/views/nodes/audio/AudioWaveform.tsx b/src/client/views/nodes/audio/AudioWaveform.tsx
new file mode 100644
index 000000000..1b1a85800
--- /dev/null
+++ b/src/client/views/nodes/audio/AudioWaveform.tsx
@@ -0,0 +1,127 @@
+import axios from 'axios';
+import { computed, IReactionDisposer, makeObservable, observable, reaction } from 'mobx';
+import { observer } from 'mobx-react';
+import * as React from 'react';
+import { Doc, NumListCast } from '../../../../fields/Doc';
+import { List } from '../../../../fields/List';
+import { listSpec } from '../../../../fields/Schema';
+import { Cast } from '../../../../fields/Types';
+import { copyProps, numberRange } from '../../../../Utils';
+import { Colors } from './../../global/globalEnums';
+import './AudioWaveform.scss';
+import { WaveCanvas } from './WaveCanvas';
+
+/**
+ * AudioWaveform
+ *
+ * Used in CollectionStackedTimeline to render a canvas with a visual of an audio waveform for AudioBox and VideoBox documents.
+ * Uses react-audio-waveform package.
+ * Bins the audio data into audioBuckets which are passed to package to render the lines.
+ * Calculates new buckets each time a new zoom factor or new set of trim bounds is created and stores it in a field on the layout doc with a title indicating the bounds and zoom for that list (see audioBucketField)
+ */
+
+export interface AudioWaveformProps {
+ duration: number; // length of media clip
+ rawDuration: number; // length of underlying media data
+ mediaPath: string;
+ layoutDoc: Doc;
+ clipStart: number;
+ clipEnd: number;
+ zoomFactor: number;
+ PanelHeight: number;
+ PanelWidth: number;
+ fieldKey: string;
+ progress?: number;
+}
+
+@observer
+export class AudioWaveform extends React.Component<AudioWaveformProps> {
+ public static NUMBER_OF_BUCKETS = 100; // number of buckets data is divided into to draw waveform lines
+ _disposer: IReactionDisposer | undefined;
+ _prevProps: React.PropsWithChildren<AudioWaveformProps>;
+ @observable _props: React.PropsWithChildren<AudioWaveformProps>;
+ constructor(props: React.PropsWithChildren<AudioWaveformProps>) {
+ super(props);
+ this._props = this._prevProps = props;
+ makeObservable(this);
+ }
+
+ componentDidUpdate() {
+ copyProps(this);
+ }
+
+ get waveHeight() {
+ return Math.max(50, this._props.PanelHeight);
+ }
+
+ get clipStart() {
+ return this._props.clipStart;
+ }
+ get clipEnd() {
+ return this._props.clipEnd;
+ }
+ get zoomFactor() {
+ return this._props.zoomFactor;
+ }
+
+ @computed get audioBuckets() {
+ return NumListCast(this._props.layoutDoc[this.audioBucketField(this.clipStart, this.clipEnd, this.zoomFactor)]);
+ }
+
+ audioBucketField = (start: number, end: number, zoomFactor: number) => this._props.fieldKey + '_audioBuckets/' + '/' + start.toFixed(2).replace('.', '_') + '/' + end.toFixed(2).replace('.', '_') + '/' + zoomFactor * 10;
+
+ componentWillUnmount() {
+ this._disposer?.();
+ }
+
+ componentDidMount() {
+ this._disposer = reaction(
+ () => ({ clipStart: this.clipStart, clipEnd: this.clipEnd, fieldKey: this.audioBucketField(this.clipStart, this.clipEnd, this.zoomFactor), zoomFactor: this._props.zoomFactor }),
+ ({ clipStart, clipEnd, fieldKey, zoomFactor }) => {
+ if (!this._props.layoutDoc[fieldKey] && this._props.layoutDoc.layout_fieldKey != 'layout_icon') {
+ // setting these values here serves as a "lock" to prevent multiple attempts to create the waveform at nerly the same time.
+ const waveform = Cast(this._props.layoutDoc[this.audioBucketField(0, this._props.rawDuration, 1)], listSpec('number'));
+ this._props.layoutDoc[fieldKey] = waveform && new List<number>(waveform.slice((clipStart / this._props.rawDuration) * waveform.length, (clipEnd / this._props.rawDuration) * waveform.length));
+ setTimeout(() => this.createWaveformBuckets(fieldKey, clipStart, clipEnd, zoomFactor));
+ }
+ },
+ { fireImmediately: true }
+ );
+ }
+
+ // decodes the audio file into peaks for generating the waveform
+ createWaveformBuckets = (fieldKey: string, clipStart: number, clipEnd: number, zoomFactor: number) => {
+ axios({ url: this._props.mediaPath, responseType: 'arraybuffer' }).then(response =>
+ new window.AudioContext().decodeAudioData(response.data, buffer => {
+ const rawDecodedAudioData = buffer.getChannelData(0);
+ const startInd = clipStart / this._props.rawDuration;
+ const endInd = clipEnd / this._props.rawDuration;
+ const decodedAudioData = rawDecodedAudioData.slice(Math.floor(startInd * rawDecodedAudioData.length), Math.floor(endInd * rawDecodedAudioData.length));
+ const numBuckets = Math.floor(AudioWaveform.NUMBER_OF_BUCKETS * zoomFactor);
+
+ const bucketDataSize = Math.floor(decodedAudioData.length / numBuckets);
+ const brange = Array.from(Array(bucketDataSize));
+ const bucketList = numberRange(numBuckets).map((i: number) => brange.reduce((p, x, j) => Math.abs(Math.max(p, decodedAudioData[i * bucketDataSize + j])), 0) / 2);
+ this._props.layoutDoc[fieldKey] = new List<number>(bucketList);
+ })
+ );
+ };
+
+ render() {
+ return (
+ <div className="audioWaveform">
+ <WaveCanvas
+ color={Colors.LIGHT_GRAY}
+ progressColor={Colors.MEDIUM_BLUE_ALT}
+ progress={this._props.progress ?? 1}
+ barWidth={200 / this.audioBuckets.length}
+ //gradientColors={this.props.gradientColors}
+ peaks={this.audioBuckets}
+ width={(this._props.PanelWidth ?? 0) * window.devicePixelRatio}
+ height={this.waveHeight * window.devicePixelRatio}
+ pixelRatio={window.devicePixelRatio}
+ />
+ </div>
+ );
+ }
+}