Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 38 additions & 13 deletions apps/common-app/src/examples/Oscillator/Oscillator.tsx
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import React, { useRef, useState, useEffect, FC } from 'react';
import { StyleSheet, Text, View, Pressable } from 'react-native';
import { StyleSheet, Text, View, Pressable, Image } from 'react-native';
import {
AudioContext,
GainNode,
OscillatorNode,
StereoPannerNode,
} from 'react-native-audio-api';
import type { OscillatorType } from 'react-native-audio-api';
import type { OscillatorType, AudioBuffer, ConvolverNode } from 'react-native-audio-api';

import { Container, Slider, Spacer, Button } from '../../components';
import { layout, colors } from '../../styles';
Expand All @@ -31,6 +31,35 @@ const Oscillator: FC = () => {
const oscillatorRef = useRef<OscillatorNode | null>(null);
const gainRef = useRef<GainNode | null>(null);
const panRef = useRef<StereoPannerNode | null>(null);
const convolverRef = useRef<ConvolverNode | null>(null);

useEffect(() => {
const fetchImpulseResponse = async () => {
if (!audioContextRef.current) {
audioContextRef.current = new AudioContext();
}

const length = audioContextRef.current.sampleRate * 2; // 2 seconds
const impulse = audioContextRef.current.createBuffer(2, length, audioContextRef.current.sampleRate);

for (let channel = 0; channel < impulse.numberOfChannels; channel++) {
const channelData = impulse.getChannelData(channel);
for (let i = 0; i < length; i++) {
// Exponentially decay the impulse
channelData[i] = (Math.random() * 2 - 1) * Math.pow(1 - i / length, 2);
}
}

convolverRef.current = audioContextRef.current?.createConvolver();
convolverRef.current.buffer = impulse;
}

fetchImpulseResponse();

return () => {
audioContextRef.current?.close();
};
}, []);

const setup = () => {
if (!audioContextRef.current) {
Expand All @@ -50,7 +79,12 @@ const Oscillator: FC = () => {

oscillatorRef.current.connect(gainRef.current);
gainRef.current.connect(panRef.current);
panRef.current.connect(audioContextRef.current.destination);
if (convolverRef.current) {
panRef.current.connect(convolverRef.current);
convolverRef.current.connect(audioContextRef.current.destination);
} else {
panRef.current.connect(audioContextRef.current.destination);
}
};

const handleGainChange = (newValue: number) => {
Expand Down Expand Up @@ -91,6 +125,7 @@ const Oscillator: FC = () => {
} else {
setup();
oscillatorRef.current?.start(0);
oscillatorRef.current?.stop(audioContextRef.current?.currentTime!! + 2);
}

setIsPlaying((prev) => !prev);
Expand All @@ -103,16 +138,6 @@ const Oscillator: FC = () => {
}
};

useEffect(() => {
if (!audioContextRef.current) {
audioContextRef.current = new AudioContext();
}

return () => {
audioContextRef.current?.close();
};
}, []);

return (
<Container centered>
<Button onPress={handlePlayPause} title={isPlaying ? 'Pause' : 'Play'} />
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <audioapi/HostObjects/PeriodicWaveHostObject.h>
#include <audioapi/HostObjects/StereoPannerNodeHostObject.h>
#include <audioapi/HostObjects/AnalyserNodeHostObject.h>
#include <audioapi/HostObjects/ConvolverNodeHostObject.h>

#include <jsi/jsi.h>
#include <memory>
Expand Down Expand Up @@ -50,6 +51,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBuffer),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createAnalyser),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createConvolver),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, decodeAudioData),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, decodeAudioDataSource),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, decodePCMAudioDataInBase64));
Expand Down Expand Up @@ -167,6 +169,13 @@ JSI_HOST_FUNCTION(createBufferQueueSource) {
return jsi::Object::createFromHostObject(runtime, analyserHostObject);
}

JSI_HOST_FUNCTION(createConvolver) {
auto convolver = context_->createConvolver();
auto convolverHostObject =
std::make_shared<ConvolverNodeHostObject>(convolver);
return jsi::Object::createFromHostObject(runtime, convolverHostObject);
}

JSI_HOST_FUNCTION(decodeAudioDataSource) {
auto sourcePath = args[0].getString(runtime).utf8(runtime);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#pragma once

#include <audioapi/HostObjects/AudioNodeHostObject.h>
#include <audioapi/core/effects/ConvolverNode.h>
#include <audioapi/HostObjects/AudioBufferHostObject.h>
#include <iostream>

#include <memory>
#include <vector>

namespace audioapi {
using namespace facebook;

class ConvolverNodeHostObject : public AudioNodeHostObject {
public:
explicit ConvolverNodeHostObject(const std::shared_ptr<ConvolverNode> &node)
: AudioNodeHostObject(node) {
addGetters(JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, normalize),
JSI_EXPORT_PROPERTY_GETTER(ConvolverNodeHostObject, buffer));
addSetters(
JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, normalize),
JSI_EXPORT_PROPERTY_SETTER(ConvolverNodeHostObject, buffer));
}

JSI_PROPERTY_GETTER(normalize) {
auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
return {convolverNode->getNormalize_()};
}

JSI_PROPERTY_GETTER(buffer) {
auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
auto buffer = convolverNode->getBuffer();
auto bufferHostObject =
std::make_shared<AudioBufferHostObject>(buffer);
return jsi::Object::createFromHostObject(runtime, bufferHostObject);
}

JSI_PROPERTY_SETTER(normalize) {
auto convolverNode = std::static_pointer_cast<ConvolverNode>(node_);
convolverNode->setNormalize(value.getBool());
}

JSI_PROPERTY_SETTER(buffer) {
auto convolverNode =
std::static_pointer_cast<ConvolverNode>(node_);
if (value.isNull()) {
convolverNode->setBuffer(std::shared_ptr<AudioBuffer>(nullptr));
return;
}

auto bufferHostObject =
value.getObject(runtime).asHostObject<AudioBufferHostObject>(runtime);
convolverNode->setBuffer(bufferHostObject->audioBuffer_);
}
};
} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include <audioapi/core/analysis/AnalyserNode.h>
#include <audioapi/core/destinations/AudioDestinationNode.h>
#include <audioapi/core/effects/BiquadFilterNode.h>
#include <audioapi/core/effects/ConvolverNode.h>
#include <audioapi/core/effects/CustomProcessorNode.h>
#include <audioapi/core/effects/GainNode.h>
#include <audioapi/core/effects/StereoPannerNode.h>
Expand All @@ -15,6 +16,7 @@
#include <audioapi/utils/AudioArray.h>
#include <audioapi/utils/AudioBus.h>
#include <audioapi/utils/CircularAudioArray.h>
#include <iostream>

namespace audioapi {

Expand Down Expand Up @@ -117,6 +119,12 @@ std::shared_ptr<AnalyserNode> BaseAudioContext::createAnalyser() {
return analyser;
}

std::shared_ptr<ConvolverNode> BaseAudioContext::createConvolver() {
auto convolver = std::make_shared<ConvolverNode>(this);
nodeManager_->addProcessingNode(convolver);
return convolver;
}

std::shared_ptr<AudioBuffer> BaseAudioContext::decodeAudioDataSource(
const std::string &path) {
auto audioBus = audioDecoder_->decodeWithFilePath(path);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class AudioBufferQueueSourceNode;
class AudioDecoder;
class AnalyserNode;
class AudioEventHandlerRegistry;
class ConvolverNode;

class BaseAudioContext {
public:
Expand All @@ -55,6 +56,7 @@ class BaseAudioContext {
bool disableNormalization,
int length);
std::shared_ptr<AnalyserNode> createAnalyser();
std::shared_ptr<ConvolverNode> createConvolver();

std::shared_ptr<AudioBuffer> decodeAudioDataSource(const std::string &path);
std::shared_ptr<AudioBuffer> decodeAudioData(const void *data, size_t size);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
#include <audioapi/core/BaseAudioContext.h>
#include <audioapi/core/effects/ConvolverNode.h>
#include <audioapi/core/sources/AudioBuffer.h>
#include <audioapi/dsp/AudioUtils.h>
#include <audioapi/dsp/FFT.h>
#include <audioapi/utils/AudioArray.h>
#include <iostream>

namespace audioapi {
ConvolverNode::ConvolverNode(BaseAudioContext *context) : AudioNode(context) {
normalize_ = true;
buffer_ = std::shared_ptr<AudioBuffer>(nullptr);
convolver_ = std::shared_ptr<Convolver>(nullptr);
isInitialized_ = true;
}

bool ConvolverNode::getNormalize_() const {
return normalize_;
}

const std::shared_ptr<AudioBuffer> &ConvolverNode::getBuffer() const {
return buffer_;
}

void ConvolverNode::setNormalize(bool normalize) {
if (normalize_ != normalize) {
normalize_ = normalize;
if (normalize_ && buffer_)
calculateNormalizationScale();
}
if (!normalize_) {
scaleFactor_ = 1.0f;
}
}

void ConvolverNode::setBuffer(const std::shared_ptr<AudioBuffer> &buffer) {
if (buffer_ != buffer) {
buffer_ = buffer;
if (normalize_)
calculateNormalizationScale();
convolver_ = std::make_shared<Convolver>();
auto audioArray = AudioArray(buffer->getLength());
memcpy(
audioArray.getData(), buffer->getChannelData(0), buffer->getLength());
convolver_->init(128, audioArray, audioArray.getSize());
}
}

void ConvolverNode::processNode(
const std::shared_ptr<AudioBus> &processingBus,
int framesToProcess) {
// printf("scale factor: %f\n", scaleFactor_);
convolver_->process(
*processingBus->getChannel(0),
*processingBus->getChannel(0),
framesToProcess);
for (int i = 0; i < framesToProcess; i++) {
processingBus->getChannel(0)->getData()[i] *= scaleFactor_;
}
if (processingBus->getNumberOfChannels() > 1) {
for (int channel = 1; channel < processingBus->getNumberOfChannels();
++channel) {
processingBus->getChannel(channel)->copy(processingBus->getChannel(0));
}
}
}

void ConvolverNode::calculateNormalizationScale() {
int numberOfChannels = buffer_->getNumberOfChannels();
int length = buffer_->getLength();

float power = 0;

for (int channel = 0; channel < numberOfChannels; ++channel) {
float channelPower = 0;
auto channelData = buffer_->getChannelData(channel);
for (int i = 0; i < length; ++i) {
float sample = channelData[i];
channelPower += sample * sample;
}
power += channelPower;
}

power = std::sqrtf(power / (numberOfChannels * length));
if (power < MinPower) {
power = MinPower;
}
scaleFactor_ = 1 / power;
scaleFactor_ *= GainCalibration;
scaleFactor_ *= GainCalibrationSampleRate / buffer_->getSampleRate();

if (numberOfChannels == 4)
scaleFactor_ *= 0.5;
}
} // namespace audioapi
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#pragma once

#include <audioapi/core/AudioNode.h>
#include <audioapi/core/AudioParam.h>
#include <audioapi/dsp/Convolver.h>

#include <memory>
#include <vector>

namespace audioapi {

class AudioBus;
class AudioBuffer;

class ConvolverNode : public AudioNode {
public:
explicit ConvolverNode(BaseAudioContext *context);

[[nodiscard]] bool getNormalize_() const;
[[nodiscard]] const std::shared_ptr<AudioBuffer> &getBuffer() const;
void setNormalize(bool normalize);
void setBuffer(const std::shared_ptr<AudioBuffer> &buffer);

protected:
void processNode(const std::shared_ptr<AudioBus>& processingBus, int framesToProcess) override;

private:
bool normalize_ = true;
std::shared_ptr<AudioBuffer> buffer_;
void calculateNormalizationScale();
float scaleFactor_ = 1.0f;
float GainCalibration = 0.00125;
float GainCalibrationSampleRate = 44100.0f;
float MinPower = 0.000125;
std::shared_ptr<Convolver> convolver_;
};

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ float linearToDecibels(float value) {
float decibelsToLinear(float value) {
return powf(10, value / 20);
}
} // namespace audioapi::dsp
} // namespace audioapi::dsp
Loading