优化升降调性能占用

pull/1402/head
lyswhut 2023-06-04 12:53:46 +08:00
parent 2a2fdc57bd
commit b7fd32067f
4 changed files with 181 additions and 101 deletions

View File

@ -1,6 +1,6 @@
### 新增
- 新增音效设置实验性功能支持10段均衡器设置、内置的一些环境混响音效、音调升降调节、3D立体环绕音效据测试升降调可能会导致意外的CPU占用调整过升降调后若想完全关闭需将其重置为1.00x并重启软件
- 新增音效设置实验性功能支持10段均衡器设置、内置的一些环境混响音效、音调升降调节、3D立体环绕音效由于升降调需要实时处理音频数据这会导致额外的CPU占用
- 播放速率设置面板新增是否音调补偿设置,在调整播放速率后,可以选择是否启用音调补偿,默认启用
### 修复

View File

@ -219,27 +219,33 @@ export const startPanner = () => {
}, pannerInfo.speed * 10)
}
const loadPitchShifterNode = () => {
pitchShifterNodeLoadStatus = 'loading'
initAdvancedAudioFeatures()
// source -> analyser -> biquadFilter -> audioWorklet(pitch shifter) -> [(convolver & convolverSource)->convolverDynamicsCompressor] -> panner -> gain
void audioContext.audioWorklet.addModule(new URL(
/* webpackChunkName: 'pitch_shifter.audioWorklet' */
'./pitch-shifter/phase-vocoder.js',
import.meta.url,
)).then(() => {
console.log('pitch shifter audio worklet loaded')
pitchShifterNode = new AudioWorkletNode(audioContext, 'phase-vocoder-processor')
let pitchFactorParam = pitchShifterNode.parameters.get('pitchFactor')
if (!pitchFactorParam) return
pitchShifterNodePitchFactor = pitchFactorParam
pitchShifterNodeLoadStatus = 'unconnect'
if (pitchShifterNodeTempValue == 1) return
connectPitchShifterNode()
})
let isConnected = true
const connectNode = () => {
if (isConnected) return
console.log('connect Node')
analyser?.connect(biquads.get(`hz${freqs[0]}`) as BiquadFilterNode)
isConnected = true
if (pitchShifterNodeTempValue == 1 && pitchShifterNodeLoadStatus == 'connected') {
disconnectPitchShifterNode()
}
}
const disconnectNode = () => {
if (!isConnected) return
console.log('disconnect Node')
analyser?.disconnect()
isConnected = false
if (pitchShifterNodeTempValue == 1 && pitchShifterNodeLoadStatus == 'connected') {
disconnectPitchShifterNode()
}
}
const connectPitchShifterNode = () => {
console.log('connect Pitch Shifter Node')
audio!.addEventListener('playing', connectNode)
audio!.addEventListener('pause', disconnectNode)
audio!.addEventListener('waiting', disconnectNode)
audio!.addEventListener('emptied', disconnectNode)
if (audio!.paused) disconnectNode()
const lastBiquadFilter = (biquads.get(`hz${freqs.at(-1) as Freqs}`) as BiquadFilterNode)
lastBiquadFilter.disconnect()
lastBiquadFilter.connect(pitchShifterNode)
@ -252,20 +258,45 @@ const connectPitchShifterNode = () => {
pitchShifterNodeLoadStatus = 'connected'
pitchShifterNodePitchFactor.value = pitchShifterNodeTempValue
}
// const disconnectPitchShifterNode = () => {
// const lastBiquadFilter = (biquads.get(`hz${freqs.at(-1) as Freqs}`) as BiquadFilterNode)
// lastBiquadFilter.disconnect()
// lastBiquadFilter.connect(convolver)
// lastBiquadFilter.connect(convolverSourceGainNode)
// pitchShifterNodeLoadStatus = 'unconnect'
// }
const disconnectPitchShifterNode = () => {
console.log('disconnect Pitch Shifter Node')
const lastBiquadFilter = (biquads.get(`hz${freqs.at(-1) as Freqs}`) as BiquadFilterNode)
lastBiquadFilter.disconnect()
lastBiquadFilter.connect(convolver)
lastBiquadFilter.connect(convolverSourceGainNode)
pitchShifterNodeLoadStatus = 'unconnect'
audio!.removeEventListener('playing', connectNode)
audio!.removeEventListener('pause', disconnectNode)
audio!.removeEventListener('waiting', disconnectNode)
audio!.removeEventListener('emptied', disconnectNode)
connectNode()
}
const loadPitchShifterNode = () => {
pitchShifterNodeLoadStatus = 'loading'
initAdvancedAudioFeatures()
// source -> analyser -> biquadFilter -> audioWorklet(pitch shifter) -> [(convolver & convolverSource)->convolverDynamicsCompressor] -> panner -> gain
void audioContext.audioWorklet.addModule(new URL(
/* webpackChunkName: 'pitch_shifter.audioWorklet' */
'./pitch-shifter/phase-vocoder.js',
import.meta.url,
)).then(() => {
console.log('pitch shifter audio worklet loaded')
// https://github.com/olvb/phaze/issues/26#issuecomment-1574629971
pitchShifterNode = new AudioWorkletNode(audioContext, 'phase-vocoder-processor', { outputChannelCount: [2] })
let pitchFactorParam = pitchShifterNode.parameters.get('pitchFactor')
if (!pitchFactorParam) return
pitchShifterNodePitchFactor = pitchFactorParam
pitchShifterNodeLoadStatus = 'unconnect'
if (pitchShifterNodeTempValue == 1) return
connectPitchShifterNode()
})
}
export const setPitchShifter = (val: number) => {
// console.log('setPitchShifter', val)
pitchShifterNodeTempValue = val
// if (val == 1 && pitchShifterNodeLoadStatus == 'connected') {
// disconnectPitchShifterNode()
// return
// }
switch (pitchShifterNodeLoadStatus) {
case 'loading':
break

View File

@ -1,13 +1,18 @@
/* eslint-disable no-var */
const WEBAUDIO_BLOCK_SIZE = 128
/** Overlap-Add Node */
class OLAProcessor extends globalThis.AudioWorkletProcessor {
constructor(options) {
super(options)
this.keepReturnTrue = true
this.processNow = false
this.nbInputs = options.numberOfInputs
this.nbOutputs = options.numberOfOutputs
this.paused = true
this.blockSize = options.processorOptions.blockSize
// TODO for now, the only support hop size is the size of a web audio block
@ -15,46 +20,56 @@ class OLAProcessor extends globalThis.AudioWorkletProcessor {
this.nbOverlaps = this.blockSize / this.hopSize
this.lastSilencedHopCount = 0
this.nbOverlaps2x = this.nbOverlaps * 2
this.fakeEmptyInputs = [new Array(2).fill(new Float32Array(WEBAUDIO_BLOCK_SIZE))]
// pre-allocate input buffers (will be reallocated if needed)
this.inputBuffers = new Array(this.nbInputs)
this.inputBuffersHead = new Array(this.nbInputs)
this.inputBuffersToSend = new Array(this.nbInputs)
// default to 1 channel per input until we know more
for (let i = 0; i < this.nbInputs; i++) {
this.allocateInputChannels(i, 1)
// assume 2 channels per input
for (var i = 0; i < this.nbInputs; i++) {
this.allocateInputChannels(i, 2)
}
// pre-allocate input buffers (will be reallocated if needed)
this.outputBuffers = new Array(this.nbOutputs)
this.outputBuffersToRetrieve = new Array(this.nbOutputs)
// default to 1 channel per output until we know more
for (let i = 0; i < this.nbOutputs; i++) {
this.allocateOutputChannels(i, 1)
// assume 2 channels per output
for (i = 0; i < this.nbOutputs; i++) {
this.allocateOutputChannels(i, 2)
}
this.port.onmessage = (e) => this.keepReturnTrue = false
}
/** Handles dynamic reallocation of input/output channels buffer
(channel numbers may vary during lifecycle) **/
reallocateChannelsIfNeeded(inputs, outputs) {
for (let i = 0; i < this.nbInputs; i++) {
reallocateChannelsIfNeeded(inputs, outputs, force) {
for (var i = 0; i < this.nbInputs; i++) {
let nbChannels = inputs[i].length
if (nbChannels != this.inputBuffers[i].length) {
if (force || (nbChannels != this.inputBuffers[i].length)) {
this.allocateInputChannels(i, nbChannels)
// console.log("reallocateChannelsIfNeeded");
}
}
for (let i = 0; i < this.nbOutputs; i++) {
for (i = 0; i < this.nbOutputs; i++) {
let nbChannels = outputs[i].length
if (nbChannels != this.outputBuffers[i].length) {
if (force || (nbChannels != this.outputBuffers[i].length)) {
this.allocateOutputChannels(i, nbChannels)
// console.log("reallocateChannelsIfNeeded");
}
}
}
allocateInputChannels(inputIndex, nbChannels) {
// allocate input buffers
// console.log("allocateInputChannels");
this.inputBuffers[inputIndex] = new Array(nbChannels)
for (let i = 0; i < nbChannels; i++) {
for (var i = 0; i < nbChannels; i++) {
this.inputBuffers[inputIndex][i] = new Float32Array(this.blockSize + WEBAUDIO_BLOCK_SIZE)
this.inputBuffers[inputIndex][i].fill(0)
}
@ -63,7 +78,7 @@ class OLAProcessor extends globalThis.AudioWorkletProcessor {
// (cannot directly send a pointer/subarray because input may be modified)
this.inputBuffersHead[inputIndex] = new Array(nbChannels)
this.inputBuffersToSend[inputIndex] = new Array(nbChannels)
for (let i = 0; i < nbChannels; i++) {
for (i = 0; i < nbChannels; i++) {
this.inputBuffersHead[inputIndex][i] = this.inputBuffers[inputIndex][i].subarray(0, this.blockSize)
this.inputBuffersToSend[inputIndex][i] = new Float32Array(this.blockSize)
}
@ -72,47 +87,40 @@ class OLAProcessor extends globalThis.AudioWorkletProcessor {
allocateOutputChannels(outputIndex, nbChannels) {
// allocate output buffers
this.outputBuffers[outputIndex] = new Array(nbChannels)
for (let i = 0; i < nbChannels; i++) {
for (var i = 0; i < nbChannels; i++) {
this.outputBuffers[outputIndex][i] = new Float32Array(this.blockSize)
this.outputBuffers[outputIndex][i].fill(0)
}
// allocate output buffers to retrieve
// (cannot send a pointer/subarray because new output has to be add to exising output)
// (cannot send a pointer/subarray because new output has to be add to existing output)
this.outputBuffersToRetrieve[outputIndex] = new Array(nbChannels)
for (let i = 0; i < nbChannels; i++) {
for (i = 0; i < nbChannels; i++) {
this.outputBuffersToRetrieve[outputIndex][i] = new Float32Array(this.blockSize)
this.outputBuffersToRetrieve[outputIndex][i].fill(0)
}
}
checkForNotSilence(value) {
return value !== 0
}
/** Read next web audio block to input buffers **/
readInputs(inputs) {
// when playback is paused, we may stop receiving new samples
// if (inputs[0].length && inputs[0][0].length == 0) {
if (!inputs[0].length || !inputs[0][0].length || inputs[0][0][0] == 0) {
for (let i = 0; i < this.nbInputs; i++) {
for (let j = 0; j < this.inputBuffers[i].length; j++) {
this.inputBuffers[i][j].fill(0, this.blockSize)
}
}
return
}
/* if (inputs[0].length && inputs[0][0].length == 0) {
for (var i = 0; i < this.nbInputs; i++) {
for (var j = 0; j < this.inputBuffers[i].length; j++) {
this.inputBuffers[i][j].fill(0, this.blockSize);
}
}
return;
} */
for (let i = 0; i < this.nbInputs; i++) {
for (let j = 0; j < this.inputBuffers[i].length; j++) {
let webAudioBlock = inputs[i][j]
this.inputBuffers[i][j].set(webAudioBlock, this.blockSize)
}
}
}
/** Write next web audio block from output buffers **/
writeOutputs(outputs) {
for (let i = 0; i < this.nbInputs; i++) {
for (let j = 0; j < this.inputBuffers[i].length; j++) {
let webAudioBlock = this.outputBuffers[i][j].subarray(0, WEBAUDIO_BLOCK_SIZE)
outputs[i][j].set(webAudioBlock)
this.inputBuffers[i][j]?.set(webAudioBlock, this.blockSize)
}
}
}
@ -126,16 +134,6 @@ class OLAProcessor extends globalThis.AudioWorkletProcessor {
}
}
/** Shift left content of output buffers to receive new web audio block **/
shiftOutputBuffers() {
for (let i = 0; i < this.nbOutputs; i++) {
for (let j = 0; j < this.outputBuffers[i].length; j++) {
this.outputBuffers[i][j].copyWithin(0, WEBAUDIO_BLOCK_SIZE)
this.outputBuffers[i][j].subarray(this.blockSize - WEBAUDIO_BLOCK_SIZE).fill(0)
}
}
}
/** Copy contents of input buffers to buffer actually sent to process **/
prepareInputBuffersToSend() {
for (let i = 0; i < this.nbInputs; i++) {
@ -156,24 +154,71 @@ class OLAProcessor extends globalThis.AudioWorkletProcessor {
}
}
/** Write next web audio block from output buffers **/
writeOutputs(outputs) {
for (let i = 0; i < this.nbInputs; i++) {
for (let j = 0; j < this.inputBuffers[i].length; j++) {
let webAudioBlock = this.outputBuffers[i][j].subarray(0, WEBAUDIO_BLOCK_SIZE)
outputs[i][j]?.set(webAudioBlock)
}
}
}
/** Shift left content of output buffers to receive new web audio block **/
shiftOutputBuffers() {
for (let i = 0; i < this.nbOutputs; i++) {
for (let j = 0; j < this.outputBuffers[i].length; j++) {
this.outputBuffers[i][j].copyWithin(0, WEBAUDIO_BLOCK_SIZE)
this.outputBuffers[i][j].subarray(this.blockSize - WEBAUDIO_BLOCK_SIZE).fill(0)
}
}
}
process(inputs, outputs, params) {
// if (!inputs[0].length || !inputs[0][0].length || inputs[0][0][0] == 0) return true
// this.reallocateChannelsIfNeeded(inputs, outputs)
this.readInputs(inputs)
this.shiftInputBuffers()
this.prepareInputBuffersToSend()
this.processOLA(this.inputBuffersToSend, this.outputBuffersToRetrieve, params)
this.handleOutputBuffersToRetrieve()
this.writeOutputs(outputs)
this.shiftOutputBuffers()
return true
// console.log(inputs[0].length ? "active" : "inactive");
// this.reallocateChannelsIfNeeded(inputs, outputs);
// if (inputs[0][0].some(this.checkForNotSilence) || inputs[0][1].some(this.checkForNotSilence))
// console.log(inputs[0].length)
if (inputs[0].length < 2) {
// DUE TO CHROME BUG/INCONSISTENCY, WHEN INACTIVE SILENT NODE IS CONNECTED, inputs[0] IS EITHER EMPTY OR CONTAINS 1 CHANNEL OF SILENT AUDIO DATA, REQUIRES SPECIAL HANDLING
// if (inputs[0][0].some(this.checkForNotSilence)) console.warn("single channel not silence exception!");
if (this.lastSilencedHopCount < this.nbOverlaps2x) {
// ALLOW nbOverlaps2x BLOCKS OF SILENCE TO COME THROUGH TO ACCOMODATE LATENCY TAIL
this.lastSilencedHopCount++
inputs = this.fakeEmptyInputs
this.processNow = true
} else {
// console.warn("skipping processing");
if (this.lastSilencedHopCount === this.nbOverlaps2x) {
this.lastSilencedHopCount++
this.reallocateChannelsIfNeeded(this.fakeEmptyInputs, outputs, true)
// console.warn("reallocateChannels");
}
this.processNow = false // ENABLES SKIPPING UNNEEDED PROCESSING OF SILENT INPUT
}
} else {
if (this.lastSilencedHopCount) {
this.lastSilencedHopCount = 0
// this.reallocateChannelsIfNeeded(inputs, outputs, true);
// console.warn("reallocateChannels");
}
this.processNow = true
}
if (this.processNow) {
this.readInputs(inputs)
this.shiftInputBuffers()
this.prepareInputBuffersToSend()
this.processOLA(this.inputBuffersToSend, this.outputBuffersToRetrieve, params)
this.handleOutputBuffersToRetrieve()
this.writeOutputs(outputs)
this.shiftOutputBuffers()
}
return this.keepReturnTrue
}
processOLA(inputs, outputs, params) {
console.assert(false, 'Not overriden')
}
/* processOLA(inputs, outputs, params) {
console.assert(false, "Not overriden");
} */
}
export default OLAProcessor

View File

@ -1,9 +1,10 @@
// https://github.com/olvb/phaze/issues/26#issuecomment-1573938170
// https://github.com/olvb/phaze
import FFT from './fft'
import OLAProcessor from './ola-processor'
const BUFFERED_BLOCK_SIZE = 4096
const DEFAULT_BUFFERED_BLOCK_SIZE = 4096
function genHannWindow(length) {
let win = new Float32Array(length)
@ -18,13 +19,17 @@ class PhaseVocoderProcessor extends OLAProcessor {
return [{
name: 'pitchFactor',
defaultValue: 1.0,
}]
automationRate: 'k-rate',
}, /* ,
{
name: 'pitchCents',
defaultValue: 0.0,
automationRate: 'k-rate'
} */]
}
constructor(options) {
options.processorOptions = {
blockSize: BUFFERED_BLOCK_SIZE,
}
(options.processorOptions ??= {}).blockSize ??= DEFAULT_BUFFERED_BLOCK_SIZE
super(options)
this.fftSize = this.blockSize
@ -43,9 +48,8 @@ class PhaseVocoderProcessor extends OLAProcessor {
}
processOLA(inputs, outputs, parameters) {
// no automation, take last value
// const pitchFactor = parameters.pitchFactor[parameters.pitchFactor.length - 1]
const pitchFactor = parameters.pitchFactor[0]
// k-rate automation, param arrays only have single value
const pitchFactor = parameters.pitchFactor[0]/* || Math.pow(2, (parameters.pitchCents[0]/12)) */
for (let i = 0; i < this.nbInputs; i++) {
for (let j = 0; j < inputs[i].length; j++) {