Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion apps/common-app/src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ const App: FC = () => {
headerTintColor: colors.white,
headerBackTitle: 'Back',
headerBackAccessibilityLabel: 'Go back',
}}>
}}
>
<Stack.Screen
name="MainTabs"
component={MainTabsScreen}
Expand Down
172 changes: 172 additions & 0 deletions apps/common-app/src/examples/LFCHooks/LFCHooks.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
import React, { useEffect, useMemo, useRef, useState } from 'react';
import { StyleSheet, Text, View } from 'react-native';
import {
AudioBuffer,
AudioBufferSourceNode,
AudioContext,
AudioContextProvider,
useAudioContext,
} from 'react-native-audio-api';

import { Button, Container, Spacer } from '../../components';

type ComponentType = 'global' | 'provider' | 'customProvider';

const filePath =
'https://software-mansion.github.io/react-native-audio-api/audio/voice/example-voice-01.mp3';

const ContextUserComponent: React.FC = () => {
const audioContext = useAudioContext();
const [sampleBuffer, setSampleBuffer] = useState<AudioBuffer | null>(null);
const [sourceNode, setSourceNode] = useState<AudioBufferSourceNode | null>(
null
);
const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);

useEffect(() => {
sourceNodeRef.current = sourceNode;
}, [sourceNode]);

const onToggleState = async () => {
if (audioContext.state === 'suspended') {
await audioContext.resume();
} else {
await audioContext.suspend();
}
};

useEffect(() => {
async function prepareSample() {
const buffer = await audioContext.decodeAudioData(filePath);
setSampleBuffer(buffer);
}

prepareSample();
}, [audioContext]);

const onPlaySample = () => {
if (!sampleBuffer) {
return;
}

const node = audioContext.createBufferSource();
node.buffer = sampleBuffer;
node.connect(audioContext.destination);
node.start();

setSourceNode(node);
};

const onStopSample = () => {
if (!sourceNode) {
return;
}

sourceNode.stop();
setSourceNode(null);
};

useEffect(() => {
return () => {
if (sourceNodeRef.current) {
sourceNodeRef.current.stop();
}
};
}, []);

return (
<>
<Text style={style.label}>state: {audioContext.state}</Text>
<Spacer.Vertical size={20} />
<Button title="Toggle state" onPress={onToggleState} />
{!!sampleBuffer && (
<>
<Spacer.Vertical size={20} />
<Text style={style.label}>
Sample buffer loaded with {sampleBuffer.length} frames
</Text>
<Spacer.Vertical size={20} />
{!sourceNode ? (
<Button title="Play sample" onPress={onPlaySample} />
) : (
<Button title="Stop sample" onPress={onStopSample} />
)}
</>
)}
</>
);
};

const ComponentWithProvider: React.FC = () => {
return (
<AudioContextProvider options={{ sampleRate: 16000 }}>
<ContextUserComponent />
</AudioContextProvider>
);
};

const ComponentWithCustomProvider: React.FC = () => {
const context = useMemo(() => new AudioContext({ sampleRate: 8000 }), []);

return (
<AudioContextProvider context={context}>
<ContextUserComponent />
</AudioContextProvider>
);
};

const LFCHooks: React.FC = () => {
const [type, setType] = useState<ComponentType | null>(null);

const component = useMemo(() => {
if (!type) {
return null;
}

switch (type) {
case 'global':
return <ContextUserComponent />;
case 'provider':
return <ComponentWithProvider />;
case 'customProvider':
return <ComponentWithCustomProvider />;
}
}, [type]);

return (
<Container>
<View style={style.buttons}>
{!!type && <Button title="Reset" onPress={() => setType(null)} />}
<Spacer.Vertical size={20} />
<Button
title="Use global audio context"
onPress={() => setType('global')}
/>
<Spacer.Vertical size={20} />
<Button
title="Use audio context from provider"
onPress={() => setType('provider')}
/>
<Spacer.Vertical size={20} />
<Button
title="Use audio context from custom provider"
onPress={() => setType('customProvider')}
/>
</View>
<Spacer.Vertical size={20} />
{component}
</Container>
);
};

export default LFCHooks;

const style = StyleSheet.create({
buttons: {
flexDirection: 'column',
alignItems: 'stretch',
},
label: {
color: 'white',
},
});
1 change: 1 addition & 0 deletions apps/common-app/src/examples/LFCHooks/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export { default } from './LFCHooks';
8 changes: 8 additions & 0 deletions apps/common-app/src/examples/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { icons } from 'lucide-react-native';
import AudioFile from './AudioFile';
import AudioVisualizer from './AudioVisualizer';
import DrumMachine from './DrumMachine';
import LFCHooks from './LFCHooks';
import Metronome from './Metronome';
import OfflineRendering from './OfflineRendering';
import Oscillator from './Oscillator';
Expand All @@ -26,6 +27,7 @@ type NavigationParamList = {
Record: undefined;
Worklets: undefined;
Streamer: undefined;
LFCHooks: undefined;
};

export type ExampleKey = keyof NavigationParamList;
Expand Down Expand Up @@ -110,4 +112,10 @@ export const Examples: Example[] = [
Icon: icons.Radio,
screen: Streaming,
},
{
key: 'LFCHooks',
title: 'LFC Hooks',
Icon: icons.Wifi,
screen: LFCHooks,
},
] as const;
2 changes: 1 addition & 1 deletion apps/fabric-example/ios/Podfile.lock
Original file line number Diff line number Diff line change
Expand Up @@ -3321,7 +3321,7 @@ SPEC CHECKSUMS:
FBLazyVector: 309703e71d3f2f1ed7dc7889d58309c9d77a95a4
fmt: a40bb5bd0294ea969aaaba240a927bd33d878cdd
glog: 5683914934d5b6e4240e497e0f4a3b42d1854183
hermes-engine: f93b5009d8ccd9429fe2a772351980df8a22a413
hermes-engine: 42d6f09ee6ede2feb220e2fb772e8bebb42ca403
RCT-Folly: 846fda9475e61ec7bcbf8a3fe81edfcaeb090669
RCTDeprecation: a41bbdd9af30bf2e5715796b313e44ec43eefff1
RCTRequired: 7be34aabb0b77c3cefe644528df0fa0afad4e4d0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,5 @@ enum class AudioEvent {
POSITION_CHANGED,
BUFFER_ENDED,
RECORDER_ERROR,
CONTEXT_STATE_CHANGED,
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ BaseAudioContextHostObject::BaseAudioContextHostObject(
JSI_EXPORT_PROPERTY_GETTER(BaseAudioContextHostObject, state),
JSI_EXPORT_PROPERTY_GETTER(BaseAudioContextHostObject, sampleRate),
JSI_EXPORT_PROPERTY_GETTER(BaseAudioContextHostObject, currentTime));

addSetters(JSI_EXPORT_PROPERTY_SETTER(BaseAudioContextHostObject, onStateChanged));

addFunctions(
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createWorkletSourceNode),
Expand Down Expand Up @@ -89,6 +91,10 @@ JSI_PROPERTY_GETTER_IMPL(BaseAudioContextHostObject, currentTime) {
return {context_->getCurrentTime()};
}

JSI_PROPERTY_SETTER_IMPL(BaseAudioContextHostObject, onStateChanged) {
context_->setOnStateChangedCallbackId(std::stoull(value.getString(runtime).utf8(runtime)));
}

JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createWorkletSourceNode) {
#if RN_AUDIO_API_ENABLE_WORKLETS
auto shareableWorklet =
Expand Down Expand Up @@ -312,4 +318,5 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createWaveShaper) {
std::make_shared<WaveShaperNodeHostObject>(context_, waveShaperOptions);
return jsi::Object::createFromHostObject(runtime, waveShaperHostObject);
}

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ class BaseAudioContextHostObject : public JsiHostObject {
JSI_PROPERTY_GETTER_DECL(sampleRate);
JSI_PROPERTY_GETTER_DECL(currentTime);

JSI_PROPERTY_SETTER_DECL(onStateChanged);

JSI_HOST_FUNCTION_DECL(createWorkletSourceNode);
JSI_HOST_FUNCTION_DECL(createWorkletNode);
JSI_HOST_FUNCTION_DECL(createWorkletProcessingNode);
Expand All @@ -55,4 +57,5 @@ class BaseAudioContextHostObject : public JsiHostObject {

std::shared_ptr<AudioDestinationNodeHostObject> destination_;
};

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ namespace audioapi::js_enum_parser {
return AudioEvent::BUFFER_ENDED;
if (event == "recorderError")
return AudioEvent::RECORDER_ERROR;
if (event == "contextStateChanged")
return AudioEvent::CONTEXT_STATE_CHANGED;

throw std::invalid_argument("Unknown audio event: " + event);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ AudioContext::AudioContext(
const std::shared_ptr<IAudioEventHandlerRegistry> &audioEventHandlerRegistry,
const RuntimeRegistry &runtimeRegistry)
: BaseAudioContext(sampleRate, audioEventHandlerRegistry, runtimeRegistry),
isInitialized_(false) {}
isInitialized_(false) { }

AudioContext::~AudioContext() {
if (getState() != ContextState::CLOSED) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ std::shared_ptr<AudioDestinationNode> BaseAudioContext::getDestination() const {

void BaseAudioContext::setState(audioapi::ContextState state) {
state_.store(state, std::memory_order_release);
sendOnStateChangedEvent();
}

std::shared_ptr<WorkletSourceNode> BaseAudioContext::createWorkletSourceNode(
Expand Down Expand Up @@ -261,4 +262,21 @@ const RuntimeRegistry &BaseAudioContext::getRuntimeRegistry() const {
return runtimeRegistry_;
}

void BaseAudioContext::setOnStateChangedCallbackId(uint64_t callbackId) {
auto oldCallbackId = onStateChangedCallbackId_.exchange(callbackId, std::memory_order_acq_rel);

if (oldCallbackId != 0) {
audioEventHandlerRegistry_->unregisterHandler(AudioEvent::CONTEXT_STATE_CHANGED, oldCallbackId);
}
}

void BaseAudioContext::sendOnStateChangedEvent() {
auto onStateChangedCallbackId = onStateChangedCallbackId_.load(std::memory_order_acquire);

if (onStateChangedCallbackId != 0) {
audioEventHandlerRegistry_->invokeHandlerWithEventBody(
AudioEvent::CONTEXT_STATE_CHANGED, onStateChangedCallbackId, {});
}
}

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,13 @@ class BaseAudioContext : public std::enable_shared_from_this<BaseAudioContext> {

virtual void initialize();

void setOnStateChangedCallbackId(uint64_t callbackId);

protected:
std::shared_ptr<AudioDestinationNode> destination_;

private:
std::atomic<uint64_t> onStateChangedCallbackId_ = 0; // 0 means no callback
std::atomic<ContextState> state_;
std::atomic<float> sampleRate_;
std::shared_ptr<AudioGraphManager> graphManager_;
Expand All @@ -129,6 +132,7 @@ class BaseAudioContext : public std::enable_shared_from_this<BaseAudioContext> {
std::shared_ptr<PeriodicWave> cachedTriangleWave_ = nullptr;

[[nodiscard]] virtual bool isDriverRunning() const = 0;
void sendOnStateChangedEvent();
};

} // namespace audioapi
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,7 @@ enum class AudioEvent {
POSITION_CHANGED,
BUFFER_ENDED,
RECORDER_ERROR,
CONTEXT_STATE_CHANGED,
};

} // namespace audioapi
2 changes: 2 additions & 0 deletions packages/react-native-audio-api/src/core/AudioContext.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import BaseAudioContext from './BaseAudioContext';

export default class AudioContext extends BaseAudioContext {
constructor(options?: AudioContextOptions) {
console.log('Creating AudioContext with options:', options);

if (
options &&
options.sampleRate &&
Expand Down
Loading