1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
import axios from 'axios';
import { computed, IReactionDisposer, makeObservable, reaction } from 'mobx';
import { observer } from 'mobx-react';
import * as React from 'react';
import { Doc, NumListCast } from '../../../../fields/Doc';
import { List } from '../../../../fields/List';
import { listSpec } from '../../../../fields/Schema';
import { Cast } from '../../../../fields/Types';
import { numberRange } from '../../../../Utils';
import { Colors } from '../../global/globalEnums';
import { ObservableReactComponent } from '../../ObservableReactComponent';
import './AudioWaveform.scss';
import { WaveCanvas } from './WaveCanvas';
/**
* AudioWaveform
*
* Used in CollectionStackedTimeline to render a canvas with a visual of an audio waveform for AudioBox and VideoBox documents.
* Bins the audio data into audioBuckets which are passed to package to render the lines.
* Calculates new buckets each time a new zoom factor or new set of trim bounds is created and stores it in a field on the layout doc with a title indicating the bounds and zoom for that list (see audioBucketField)
*/
export interface AudioWaveformProps {
duration: number; // length of media clip
rawDuration: number; // length of underlying media data
mediaPath: string;
layoutDoc: Doc;
clipStart: number;
clipEnd: number;
zoomFactor: number;
PanelHeight: number;
PanelWidth: number;
fieldKey: string;
progress?: number;
}
@observer
export class AudioWaveform extends ObservableReactComponent<AudioWaveformProps> {
public static NUMBER_OF_BUCKETS = 100; // number of buckets data is divided into to draw waveform lines
_disposer: IReactionDisposer | undefined;
constructor(props: AudioWaveformProps) {
super(props);
makeObservable(this);
}
get waveHeight() {
return Math.max(50, this._props.PanelHeight);
}
get clipStart() {
return this._props.clipStart;
}
get clipEnd() {
return this._props.clipEnd;
}
get zoomFactor() {
return this._props.zoomFactor;
}
@computed get audioBuckets() {
return NumListCast(this._props.layoutDoc[this.audioBucketField(this.clipStart, this.clipEnd, this.zoomFactor)]);
}
audioBucketField = (start: number, end: number, zoomFactor: number) => this._props.fieldKey + '_audioBuckets//' + start.toFixed(2).replace('.', '_') + '/' + end.toFixed(2).replace('.', '_') + '/' + zoomFactor * 10;
componentWillUnmount() {
this._disposer?.();
}
componentDidMount() {
this._disposer = reaction(
() => ({ clipStart: this.clipStart, clipEnd: this.clipEnd, fieldKey: this.audioBucketField(this.clipStart, this.clipEnd, this.zoomFactor), zoomFactor: this._props.zoomFactor }),
({ clipStart, clipEnd, fieldKey, zoomFactor }) => {
if (!this._props.layoutDoc[fieldKey] && this._props.layoutDoc.layout_fieldKey !== 'layout_icon') {
// setting these values here serves as a "lock" to prevent multiple attempts to create the waveform at nerly the same time.
const waveform = Cast(this._props.layoutDoc[this.audioBucketField(0, this._props.rawDuration, 1)], listSpec('number'));
this._props.layoutDoc[fieldKey] = waveform && new List<number>(waveform.slice((clipStart / this._props.rawDuration) * waveform.length, (clipEnd / this._props.rawDuration) * waveform.length));
setTimeout(() => this.createWaveformBuckets(fieldKey, clipStart, clipEnd, zoomFactor));
}
},
{ fireImmediately: true }
);
}
// decodes the audio file into peaks for generating the waveform
createWaveformBuckets = (fieldKey: string, clipStart: number, clipEnd: number, zoomFactor: number) => {
axios({ url: this._props.mediaPath, responseType: 'arraybuffer' }).then(response =>
new window.AudioContext().decodeAudioData(response.data, buffer => {
const rawDecodedAudioData = buffer.getChannelData(0);
const startInd = clipStart / this._props.rawDuration;
const endInd = clipEnd / this._props.rawDuration;
const decodedAudioData = rawDecodedAudioData.slice(Math.floor(startInd * rawDecodedAudioData.length), Math.floor(endInd * rawDecodedAudioData.length));
const numBuckets = Math.floor(AudioWaveform.NUMBER_OF_BUCKETS * zoomFactor);
const bucketDataSize = Math.floor(decodedAudioData.length / numBuckets);
const brange = Array.from(Array(bucketDataSize));
const bucketList = numberRange(numBuckets).map((i: number) => brange.reduce((p, x, j) => Math.abs(Math.max(p, decodedAudioData[i * bucketDataSize + j])), 0) / 2);
this._props.layoutDoc[fieldKey] = new List<number>(bucketList);
})
);
};
render() {
return (
<div className="audioWaveform">
<WaveCanvas
color={Colors.LIGHT_GRAY}
progressColor={Colors.MEDIUM_BLUE_ALT}
progress={this._props.progress ?? 1}
barWidth={200 / this.audioBuckets.length}
// gradientColors={this._props.gradientColors}
peaks={this.audioBuckets}
width={(this._props.PanelWidth ?? 0) * window.devicePixelRatio}
height={this.waveHeight * window.devicePixelRatio}
pixelRatio={window.devicePixelRatio}
/>
</div>
);
}
}
|