Skip to content

Commit ea268f4

Browse files
authored
feat: custom worklet processor node (#704)
* feat: made context hold multiple worklet runtimes * chore: renamed worklet runner * feat: implemented worklet source node * chore: some leftover * docs: updated documentation * docs: neatpicks * feat: implemented worklet processing node * fix: fixed tests * fix: replaced magic numbers * fix: requested changes * fix: fixed newer tests
1 parent c7f242d commit ea268f4

40 files changed

+1030
-97
lines changed

apps/common-app/src/examples/Worklets/Worklets.tsx

Lines changed: 38 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@ import { Text, View, StyleSheet } from 'react-native';
33
import {
44
AudioContext,
55
AudioManager,
6-
AudioRecorder,
7-
RecorderAdapterNode,
8-
WorkletNode
6+
WorkletNode,
7+
WorkletProcessingNode,
8+
WorkletSourceNode
99
} from 'react-native-audio-api';
1010
import { Container, Button } from "../../components";
1111
import { Extrapolation, useSharedValue } from "react-native-reanimated";
@@ -16,13 +16,12 @@ import Animated, {
1616
} from "react-native-reanimated";
1717
import { colors } from "../../styles";
1818

19-
2019
function Worklets() {
2120
const SAMPLE_RATE = 44100;
22-
const recorderRef = useRef<AudioRecorder | null>(null);
2321
const aCtxRef = useRef<AudioContext | null>(null);
24-
const recorderAdapterRef = useRef<RecorderAdapterNode | null>(null);
2522
const workletNodeRef = useRef<WorkletNode | null>(null);
23+
const workletProcessingNodeRef = useRef<WorkletProcessingNode | null>(null);
24+
const workletSourceNodeRef = useRef<WorkletSourceNode | null>(null);
2625

2726
const bar0 = useSharedValue(0);
2827
const bar1 = useSharedValue(0);
@@ -36,65 +35,71 @@ function Worklets() {
3635
iosMode: 'spokenAudio',
3736
iosOptions: ['defaultToSpeaker', 'allowBluetoothA2DP'],
3837
});
39-
40-
AudioManager.requestRecordingPermissions();
41-
recorderRef.current = new AudioRecorder({
42-
sampleRate: SAMPLE_RATE,
43-
bufferLengthInSamples: 1024,
44-
});
4538
}, []);
4639

4740
const start = () => {
48-
if (!recorderRef.current) {
49-
console.error("Recorder is not initialized");
50-
return;
41+
42+
const processingWorklet = (inputAudioData: Array<Float32Array>, outputAudioData: Array<Float32Array>, framesToProcess: number, currentTime: number) => {
43+
'worklet';
44+
const gain = 0.5;
45+
for (let channel = 0; channel < inputAudioData.length; channel++) {
46+
const inputChannelData = inputAudioData[channel];
47+
const outputChannelData = outputAudioData[channel];
48+
for (let i = 0; i < framesToProcess; i++) {
49+
outputChannelData[i] = inputChannelData[i] * gain;
50+
}
51+
}
52+
};
53+
54+
const sourceWorklet = (audioData: Array<Float32Array>, framesToProcess: number, currentTime: number, startOffset: number) => {
55+
'worklet';
56+
for (let i = 0; i < audioData.length; i++) {
57+
for (let j = 0; j < framesToProcess; j++) {
58+
audioData[i][j] = Math.sin((currentTime + startOffset + j));
59+
}
60+
}
5161
}
5262

5363
const worklet = (audioData: Array<Float32Array>, inputChannelCount: number) => {
5464
'worklet';
5565

56-
5766
// Calculates RMS amplitude
5867
let sum = 0;
5968
for (let i = 0; i < audioData.length; i++) {
6069
sum += audioData[0][i] * audioData[0][i];
6170
}
6271
const rms = Math.sqrt(sum / audioData[0].length);
63-
const scaledAmplitude = Math.min(rms * 1000, 1);
72+
const scaledAmplitude = Math.min(rms * 10, 1);
6473

65-
console.log(`RMS: ${rms}, Scaled: ${scaledAmplitude}`);
74+
// console.log(`RMS: ${rms}, Scaled: ${scaledAmplitude}`);
6675

6776
bar0.value = withSpring(bar1.value, { damping: 20, stiffness: 150 });
6877
bar1.value = withSpring(bar2.value, { damping: 20, stiffness: 150 });
6978
bar3.value = withSpring(bar2.value, { damping: 20, stiffness: 150 });
7079
bar4.value = withSpring(bar3.value, { damping: 20, stiffness: 150 });
71-
bar2.value = withSpring(scaledAmplitude, { damping: 20, stiffness: 200 });
80+
bar2.value = withSpring(scaledAmplitude, { damping: 20, stiffness: 150 });
7281
};
7382

7483
aCtxRef.current = new AudioContext({ sampleRate: SAMPLE_RATE });
75-
recorderAdapterRef.current = aCtxRef.current.createRecorderAdapter();
76-
workletNodeRef.current = aCtxRef.current.createWorkletNode(worklet, 512, 1);
77-
recorderAdapterRef.current.connect(workletNodeRef.current);
84+
workletSourceNodeRef.current = aCtxRef.current.createWorkletSourceNode(sourceWorklet, 'AudioRuntime');
85+
workletNodeRef.current = aCtxRef.current.createWorkletNode(worklet, 256, 1, 'UIRuntime');
86+
workletProcessingNodeRef.current = aCtxRef.current.createWorkletProcessingNode(processingWorklet, 'AudioRuntime');
87+
88+
// Connect nodes
89+
workletSourceNodeRef.current.connect(workletProcessingNodeRef.current);
90+
workletProcessingNodeRef.current.connect(workletNodeRef.current);
91+
workletSourceNodeRef.current.connect(workletNodeRef.current);
7892
workletNodeRef.current.connect(aCtxRef.current.destination);
7993

80-
recorderRef.current.connect(recorderAdapterRef.current);
81-
recorderRef.current.start();
82-
console.log("Recording started");
83-
94+
workletSourceNodeRef.current.start();
8495
if (aCtxRef.current.state === 'suspended') {
8596
aCtxRef.current.resume();
8697
}
8798
}
8899

89100
const stop = () => {
90-
if (!recorderRef.current) {
91-
console.error("Recorder is not initialized");
92-
return;
93-
}
94-
recorderRef.current.stop();
95-
recorderAdapterRef.current = null;
96-
aCtxRef.current = null;
97101
console.log("Recording stopped");
102+
workletSourceNodeRef.current?.stop();
98103
bar0.value = withSpring(0, { damping: 20, stiffness: 100 });
99104
bar1.value = withSpring(0, { damping: 20, stiffness: 100 });
100105
bar2.value = withSpring(0, { damping: 20, stiffness: 100 });

packages/audiodocs/docs/core/base-audio-context.mdx

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ Creates [`WorkletNode`](/docs/worklets/worklet-node).
6767
| `worklet` | `(Array<Float32Array>, number) => void` | The worklet to be executed. |
6868
| `bufferLength` | `number` | The size of the buffer that will be passed to the worklet on each call. |
6969
| `inputChannelCount` | `number` | The number of channels that the node expects as input (it will get min(expected, provided)). |
70+
| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. |
7071

7172
#### Errors
7273

@@ -78,6 +79,40 @@ Creates [`WorkletNode`](/docs/worklets/worklet-node).
7879

7980
#### Returns `WorkletNode`.
8081

82+
### `createWorkletSourceNode` <MobileOnly />
83+
84+
Creates [`WorkletSourceNode`](/docs/worklets/worklet-source-node).
85+
86+
| Parameters | Type | Description |
87+
| :---: | :---: | :---- |
88+
| `worklet` | `(Array<Float32Array>, number, number, number) => void` | The worklet to be executed. |
89+
| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. |
90+
91+
#### Errors
92+
93+
| Error type | Description |
94+
| :---: | :---- |
95+
| `Error` | `react-native-worklet` is not found as dependency. |
96+
97+
#### Returns `WorkletSourceNode`.
98+
99+
### `createWorkletProcessingNode` <MobileOnly />
100+
101+
Creates [`WorkletProcessingNode`](/docs/worklets/worklet-processing-node).
102+
103+
| Parameters | Type | Description |
104+
| :---: | :---: | :---- |
105+
| `worklet` | `(Array<Float32Array>, Array<Float32Array>, number, number) => void` | The worklet to be executed. |
106+
| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. |
107+
108+
#### Errors
109+
110+
| Error type | Description |
111+
| :---: | :---- |
112+
| `Error` | `react-native-worklet` is not found as dependency. |
113+
114+
#### Returns `WorkletProcessingNode`.
115+
81116
### `createBuffer`
82117

83118
Creates [`AudioBuffer`](/docs/sources/audio-buffer).

packages/audiodocs/docs/worklets/introduction.mdx

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,27 @@ Simply put, a worklet is a piece of code that can be executed on a runtime diffe
2626

2727
## What kind of worklets are used in react-native-audio-api?
2828

29-
All of our worklets are currently executed on the UI runtime provided by the `RNWorklets` library.
29+
We support two types of worklet runtimes, each optimized for different use cases:
3030

31-
This allows the use of Reanimated utilities and features inside the worklets. Our main goal is to enable seamless integration with the UI - for example, creating animations from audio data.
31+
### UIRuntime
32+
Worklets executed on the UI runtime provided by the `RNWorklets` library. This allows the use of Reanimated utilities and features inside the worklets. The main goal is to enable seamless integration with the UI - for example, creating animations from audio data.
33+
34+
**Use UIRuntime when:**
35+
- You need to update UI elements from audio data
36+
- Creating visualizations or animations based on audio
37+
- Integrating with Reanimated shared values
38+
- Performance is less critical than UI responsiveness
39+
40+
### AudioRuntime
41+
Worklets executed on the audio rendering thread for maximum performance and minimal latency. This runtime is optimized for real-time audio processing where timing is critical.
42+
43+
**Use AudioRuntime when:**
44+
- Performance and low latency are crucial
45+
- Processing audio in real-time without dropouts
46+
- Generating audio with precise timing
47+
- Audio processing doesn't need to interact with UI
48+
49+
You can specify the runtime type when creating worklet nodes using the `workletRuntime` parameter.
3250

3351
## How to use worklets in react-native-audio-api mindfully?
3452

packages/audiodocs/docs/worklets/worklet-node.mdx

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ This node lets you execute a worklet on the UI thread. bufferLength specifies th
1313

1414
## Constructor
1515

16-
[`BaseAudioContext.createWorkletNode(worklet, bufferLength, inputChannelCount)`](/docs/core/base-audio-context#createworkletnode-)
16+
[`BaseAudioContext.createWorkletNode(worklet, bufferLength, inputChannelCount, workletRuntime)`](/docs/core/base-audio-context#createworkletnode-)
1717

1818
## Example
1919
```tsx
2020
import { AudioContext, AudioRecorder } from 'react-native-audio-api';
2121

22-
/// This example shows how we can use a WorkletNode to process microphone audio data in real-time.
22+
// This example shows how we can use a WorkletNode to process microphone audio data in real-time.
2323
function App() {
2424
const recorder = new AudioRecorder({
2525
sampleRate: 16000,
@@ -34,7 +34,7 @@ function App() {
3434
// !IMPORTANT: here you can only read audio data any modifications will not be reflected in the audio output of this node
3535
// !VERY IMPORTANT: please read the Known Issue section below
3636
};
37-
const workletNode = audioContext.createWorkletNode(worklet, 1024, 2);
37+
const workletNode = audioContext.createWorkletNode(worklet, 1024, 2, 'UIRuntime');
3838
const adapterNode = audioContext.createRecorderAdapter();
3939

4040
adapterNode.connect(workletNode);
@@ -45,13 +45,13 @@ function App() {
4545
```
4646

4747
## Properties
48-
It has no own properties but inherits from `AudioNode`.
48+
It has no own properties but inherits from [`AudioNode`](/docs/core/audio-node).
4949

5050
## Methods
51-
It has no own methods but inherits from `AudioNode`.
51+
It has no own methods but inherits from [`AudioNode`](/docs/core/audio-node).
5252

5353
## Known Issue
54-
It might happen that the worklet side effect is not visible on the UI. For example you have some animated style which depends on some shared value modified in the worklet.
54+
It might happen that the worklet side effect is not visible on the UI (when you are using UIRuntime kind). For example you have some animated style which depends on some shared value modified in the worklet.
5555
This is happening because microtask queue is not always being flushed properly, bla bla bla...
5656

5757
To workaround this issue just add this line at the end of your worklet callback function:
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
---
2+
sidebar_position: 4
3+
---
4+
5+
import { ReadOnly } from '@site/src/components/Badges';
6+
7+
# WorkletProcessingNode
8+
9+
The `WorkletProcessingNode` interface represents a node in the audio processing graph that can process audio using a worklet function. Unlike [`WorkletNode`](/docs/worklets/worklet-node) which only provides read-only access to audio data, `WorkletProcessingNode` allows you to modify the audio signal by providing both input and output buffers.
10+
11+
This node lets you execute a worklet that receives input audio data and produces output audio data, making it perfect for creating custom audio effects, filters, and processors. The worklet processes the exact number of frames provided by the audio system in each call.
12+
13+
For more information about worklets, see our [Introduction to worklets](/docs/worklets/worklets-introduction).
14+
15+
## Constructor
16+
17+
[`BaseAudioContext.createWorkletProcessingNode(worklet, workletRuntime)`](/docs/core/base-audio-context#createworkletprocessingnode-)
18+
19+
## Example
20+
21+
```tsx
22+
import { AudioContext, AudioRecorder } from 'react-native-audio-api';
23+
24+
// This example shows how to create a simple gain effect using WorkletProcessingNode
25+
function App() {
26+
const recorder = new AudioRecorder({
27+
sampleRate: 16000,
28+
bufferLengthInSamples: 16000,
29+
});
30+
31+
const audioContext = new AudioContext({ sampleRate: 16000 });
32+
33+
// Create a simple gain worklet that multiplies the input by a gain value
34+
const gainWorklet = (
35+
inputData: Array<Float32Array>,
36+
outputData: Array<Float32Array>,
37+
framesToProcess: number,
38+
currentTime: number
39+
) => {
40+
'worklet';
41+
const gain = 0.5; // 50% volume
42+
43+
for (let ch = 0; ch < inputData.length; ch++) {
44+
const input = inputData[ch];
45+
const output = outputData[ch];
46+
47+
for (let i = 0; i < framesToProcess; i++) {
48+
output[i] = input[i] * gain;
49+
}
50+
}
51+
};
52+
53+
const workletProcessingNode = audioContext.createWorkletProcessingNode(
54+
gainWorklet,
55+
'AudioRuntime'
56+
);
57+
const adapterNode = audioContext.createRecorderAdapter();
58+
59+
adapterNode.connect(workletProcessingNode);
60+
workletProcessingNode.connect(audioContext.destination);
61+
recorder.connect(adapterNode);
62+
recorder.start();
63+
}
64+
}
65+
```
66+
67+
## Worklet Parameters Explanation
68+
69+
The worklet function receives four parameters:
70+
71+
### `inputData: Array<Float32Array>`
72+
A two-dimensional array where:
73+
- First dimension represents the audio channel (0 = left, 1 = right for stereo)
74+
- Second dimension contains the input audio samples for that channel
75+
- You should **read** from these buffers to get the input audio data
76+
- The length of each `Float32Array` equals the `framesToProcess` parameter
77+
78+
### `outputData: Array<Float32Array>`
79+
A two-dimensional array where:
80+
- First dimension represents the audio channel (0 = left, 1 = right for stereo)
81+
- Second dimension contains the output audio samples for that channel
82+
- You must **write** to these buffers to produce the processed audio output
83+
- The length of each `Float32Array` equals the `framesToProcess` parameter
84+
85+
### `framesToProcess: number`
86+
The number of audio samples to process in this call. This determines how many samples you need to process in each channel's buffer. This value will be at most 128.
87+
88+
### `currentTime: number`
89+
The current audio context time in seconds when this worklet call begins. This represents the absolute time since the audio context was created.
90+
91+
## Audio Processing Pattern
92+
93+
A typical WorkletProcessingNode worklet follows this pattern:
94+
95+
```tsx
96+
const audioProcessor = (
97+
inputData: Array<Float32Array>,
98+
outputData: Array<Float32Array>,
99+
framesToProcess: number,
100+
currentTime: number
101+
) => {
102+
'worklet';
103+
104+
for (let channel = 0; channel < inputData.length; channel++) {
105+
const input = inputData[channel];
106+
const output = outputData[channel];
107+
108+
for (let sample = 0; sample < framesToProcess; sample++) {
109+
// Process each sample
110+
// Read from: input[sample]
111+
// Write to: output[sample]
112+
output[sample] = processAudioSample(input[sample]);
113+
}
114+
}
115+
};
116+
```
117+
118+
## Properties
119+
120+
It has no own properties but inherits from [`AudioNode`](/docs/core/audio-node).
121+
122+
## Methods
123+
124+
It has no own methods but inherits from [`AudioNode`](/docs/core/audio-node).
125+
126+
## Performance Considerations
127+
128+
Since `WorkletProcessingNode` processes audio in real-time, performance is critical:
129+
130+
- Keep worklet functions lightweight and efficient
131+
- Avoid complex calculations that could cause audio dropouts
132+
- Process samples in-place when possible
133+
- Consider using lookup tables for expensive operations
134+
- Use `AudioRuntime` for better performance, `UIRuntime` for UI integration
135+
- Test on target devices to ensure smooth audio processing
136+
137+
## Use Cases
138+
139+
- **Audio Effects**: Reverb, delay, distortion, filters
140+
- **Audio Processing**: Compression, limiting, normalization
141+
- **Real-time Filters**: EQ, high-pass, low-pass, band-pass filters
142+
- **Custom Algorithms**: Noise reduction, pitch shifting, spectral processing
143+
- **Signal Analysis**: Feature extraction while passing audio through

0 commit comments

Comments
 (0)