diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c134d8661..ace274b22e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -151,6 +151,7 @@ Breaking changes in this release: - 👷🏻 Added `npm run build-browser` script for building test harness package only, in PR [#5667](https://github.com/microsoft/BotFramework-WebChat/pull/5667), by [@compulim](https://github.com/compulim) - Added pull-based capabilities system for dynamically discovering adapter capabilities at runtime, in PR [#5679](https://github.com/microsoft/BotFramework-WebChat/pull/5679), by [@pranavjoshi001](https://github.com/pranavjoshi001) - Added Speech-to-Speech (S2S) support for real-time voice conversations, in PR [#5654](https://github.com/microsoft/BotFramework-WebChat/pull/5654), by [@pranavjoshi](https://github.com/pranavjoshi001) +- Added core mute/unmute functionality for speech-to-speech via `useRecorder` hook (silent chunks keep server connection alive), in PR [#5688](https://github.com/microsoft/BotFramework-WebChat/pull/5688), by [@pranavjoshi](https://github.com/pranavjoshi001) ### Changed diff --git a/packages/api/src/boot/hook.ts b/packages/api/src/boot/hook.ts index dda4464d10..3f88e1f5f6 100644 --- a/packages/api/src/boot/hook.ts +++ b/packages/api/src/boot/hook.ts @@ -39,6 +39,7 @@ export { useMarkActivityAsSpoken, useMarkActivityKeyAsRead, useMarkAllAsAcknowledged, + useMuteVoice, useNotifications, usePerformCardAction, usePonyfill, @@ -74,6 +75,7 @@ export { useTrackException, useTrackTiming, useUIState, + useUnmuteVoice, useUserID, useUsername, useVoiceSelector, diff --git a/packages/api/src/hooks/Composer.tsx b/packages/api/src/hooks/Composer.tsx index 3b1794d86b..cd570166bb 100644 --- a/packages/api/src/hooks/Composer.tsx +++ b/packages/api/src/hooks/Composer.tsx @@ -13,6 +13,7 @@ import { dismissNotification, emitTypingIndicator, markActivity, + muteVoiceRecording, postActivity, sendEvent, sendFiles, @@ -35,6 +36,7 @@ import { stopSpeakingActivity, stopVoiceRecording, submitSendBox, + unmuteVoiceRecording, type DirectLineJSBotConnection, type GlobalScopePonyfill, type OneOrMany, @@ -381,6 +383,14 @@ const ComposerCore = ({ dispatch(stopVoiceRecording()); }, [dispatch, voiceHandlers]); + const muteVoice = useCallback(() => { + dispatch(muteVoiceRecording()); + }, [dispatch]); + + const unmuteVoice = useCallback(() => { + dispatch(unmuteVoiceRecording()); + }, [dispatch]); + const patchedLocalizedStrings = useMemo( () => mergeStringsOverrides(getAllLocalizedStrings()[normalizeLanguage(locale)], locale, overrideLocalizedStrings), [locale, overrideLocalizedStrings] @@ -563,6 +573,7 @@ const ComposerCore = ({ language: locale, localizedGlobalizeState: [localizedGlobalize], localizedStrings: patchedLocalizedStrings, + muteVoice, onTelemetry, renderMarkdown, scrollToEndButtonRenderer, @@ -575,6 +586,7 @@ const ComposerCore = ({ trackDimension, typingIndicatorRenderer: patchedTypingIndicatorRenderer, uiState, + unmuteVoice, userID, username }), @@ -585,6 +597,7 @@ const ComposerCore = ({ hoistedDispatchers, locale, localizedGlobalize, + muteVoice, onTelemetry, patchedActivityStatusRenderer, patchedAttachmentForScreenReaderRenderer, @@ -604,6 +617,7 @@ const ComposerCore = ({ telemetryDimensionsRef, trackDimension, uiState, + unmuteVoice, userID, username ] diff --git a/packages/api/src/hooks/index.ts b/packages/api/src/hooks/index.ts index f1fb41c3ce..7d5cb38fde 100644 --- a/packages/api/src/hooks/index.ts +++ b/packages/api/src/hooks/index.ts @@ -37,6 +37,7 @@ import useLocalizer from './useLocalizer'; import useMarkActivityAsSpoken from './useMarkActivityAsSpoken'; import useMarkActivityKeyAsRead from './useMarkActivityKeyAsRead'; import useMarkAllAsAcknowledged from './useMarkAllAsAcknowledged'; +import useMuteVoice from './useMuteVoice'; import useNotifications from './useNotifications'; import usePerformCardAction from './usePerformCardAction'; import usePonyfill from './usePonyfill'; @@ -71,6 +72,7 @@ import useTrackEvent from './useTrackEvent'; import useTrackException from './useTrackException'; import useTrackTiming from './useTrackTiming'; import useUIState from './useUIState'; +import useUnmuteVoice from './useUnmuteVoice'; import useUserID from './useUserID'; import useUsername from './useUsername'; import useVoiceSelector from './useVoiceSelector'; @@ -119,6 +121,7 @@ export { useMarkActivityAsSpoken, useMarkActivityKeyAsRead, useMarkAllAsAcknowledged, + useMuteVoice, useNotifications, usePerformCardAction, usePonyfill, @@ -153,6 +156,7 @@ export { useTrackException, useTrackTiming, useUIState, + useUnmuteVoice, useUserID, useUsername, useVoiceSelector, diff --git a/packages/api/src/hooks/internal/WebChatAPIContext.ts b/packages/api/src/hooks/internal/WebChatAPIContext.ts index a0bc434b51..7fb55082db 100644 --- a/packages/api/src/hooks/internal/WebChatAPIContext.ts +++ b/packages/api/src/hooks/internal/WebChatAPIContext.ts @@ -44,6 +44,7 @@ export type WebChatAPIContextType = { localizedGlobalizeState?: PrecompiledGlobalize[]; localizedStrings?: { [language: string]: LocalizedStrings }; markActivity?: (activity: { id: string }, name: string, value?: any) => void; + muteVoice?: () => void; onCardAction?: PerformCardAction; onTelemetry?: (event: TelemetryMeasurementEvent) => void; postActivity?: (activity: WebChatActivity) => Observable; @@ -81,6 +82,7 @@ export type WebChatAPIContextType = { trackDimension?: (name: string, data: any) => void; typingIndicatorRenderer?: any; // TODO uiState: 'blueprint' | 'disabled' | undefined; + unmuteVoice?: () => void; userID?: string; username?: string; }; diff --git a/packages/api/src/hooks/useMuteVoice.ts b/packages/api/src/hooks/useMuteVoice.ts new file mode 100644 index 0000000000..38f92e313f --- /dev/null +++ b/packages/api/src/hooks/useMuteVoice.ts @@ -0,0 +1,9 @@ +import useWebChatAPIContext from './internal/useWebChatAPIContext'; + +/** + * Hook to mute voice mode (stops microphone input but keeps connection alive with silent chunks). + * The session remains active and can be unmuted to resume listening. + */ +export default function useMuteVoice(): () => void { + return useWebChatAPIContext().muteVoice; +} diff --git a/packages/api/src/hooks/useUnmuteVoice.ts b/packages/api/src/hooks/useUnmuteVoice.ts new file mode 100644 index 0000000000..57d8d5f1ad --- /dev/null +++ b/packages/api/src/hooks/useUnmuteVoice.ts @@ -0,0 +1,9 @@ +import useWebChatAPIContext from './internal/useWebChatAPIContext'; + +/** + * Hook to unmute voice mode (resumes microphone input after muting). + * This reactivates speech-to-speech listening. + */ +export default function useUnmuteVoice(): () => void { + return useWebChatAPIContext().unmuteVoice; +} diff --git a/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx b/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx index ea00dbe81d..757f9e5940 100644 --- a/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx +++ b/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx @@ -11,6 +11,7 @@ export function VoiceRecorderBridge(): null { const [voiceState] = useVoiceState(); const postVoiceActivity = usePostVoiceActivity(); + const muted = voiceState === 'muted'; // Derive recording state from voiceState - recording is active when not idle const recording = voiceState !== 'idle'; @@ -29,7 +30,13 @@ export function VoiceRecorderBridge(): null { [postVoiceActivity] ); - const { record } = useRecorder(handleAudioChunk); + const { record, mute } = useRecorder(handleAudioChunk); + + useEffect(() => { + if (muted) { + return mute(); + } + }, [mute, muted]); useEffect(() => { if (recording) { diff --git a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx index 6bb47cfa14..2b65f9195b 100644 --- a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx +++ b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx @@ -36,13 +36,16 @@ const mockWorkletNode = { port: mockWorkletPort }; +const mockSourceNode = { + connect: jest.fn(), + disconnect: jest.fn() +}; + const mockAudioContext = { audioWorklet: { addModule: jest.fn().mockResolvedValue(undefined) }, - createMediaStreamSource: jest.fn(() => ({ - connect: jest.fn() - })), + createMediaStreamSource: jest.fn(() => mockSourceNode), destination: {}, resume: jest.fn().mockResolvedValue(undefined), state: 'running' @@ -218,4 +221,74 @@ describe('useRecorder', () => { }); }); }); + + test('should return mute function', () => { + render(); + expect(typeof hookData?.mute).toBe('function'); + }); + + test('should send MUTE command and stop media stream when mute is called', async () => { + render(); + + // Start recording first + act(() => { + hookData?.record(); + }); + + await waitFor(() => { + expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'START' }); + }); + + // Clear mocks to isolate mute behavior + mockWorkletPort.postMessage.mockClear(); + mockTrack.stop.mockClear(); + mockSourceNode.disconnect.mockClear(); + + // Call mute + act(() => { + hookData?.mute(); + }); + + // Should send MUTE command to worklet + expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'MUTE' }); + // Should stop media stream tracks (mic indicator OFF) + expect(mockTrack.stop).toHaveBeenCalledTimes(1); + // Should disconnect source node + expect(mockSourceNode.disconnect).toHaveBeenCalledTimes(1); + }); + + test('should return unmute function from mute() that sends UNMUTE and restarts media stream', async () => { + render(); + + // Start recording first + act(() => { + hookData?.record(); + }); + + await waitFor(() => { + expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'START' }); + }); + + // Call mute and get unmute function + let unmute: (() => void) | undefined; + act(() => { + unmute = hookData?.mute(); + }); + + // Clear mocks to isolate unmute behavior + mockWorkletPort.postMessage.mockClear(); + mockMediaDevices.getUserMedia.mockClear(); + + // Call unmute + act(() => { + unmute?.(); + }); + + // Should send UNMUTE command to worklet + expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'UNMUTE' }); + // Should restart media stream + await waitFor(() => { + expect(mockMediaDevices.getUserMedia).toHaveBeenCalledTimes(1); + }); + }); }); diff --git a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts index 05ed029003..f67e07d241 100644 --- a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts +++ b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts @@ -8,9 +8,11 @@ declare class AudioWorkletProcessor { buffer: number[]; bufferSize: number; constructor(options?: AudioWorkletNodeOptions); + muted: boolean; process(inputs: Float32Array[][], outputs: Float32Array[][], parameters: Record): boolean; readonly port: MessagePort; recording: boolean; + silentFrame: Float32Array; } declare function registerProcessor(name: string, processorCtor: typeof AudioWorkletProcessor): void; @@ -20,12 +22,16 @@ declare function registerProcessor(name: string, processorCtor: typeof AudioWork * without any TypeScript annotations that could be transformed by the compiler. */ const audioProcessorCode = `(${function () { + const RENDER_QUANTUM = 128; + class AudioRecorderProcessor extends AudioWorkletProcessor { constructor(options: AudioWorkletNodeOptions) { super(); this.buffer = []; this.bufferSize = options.processorOptions.bufferSize; + this.muted = false; this.recording = false; + this.silentFrame = new Float32Array(RENDER_QUANTUM); // Pre-allocated zeros this.port.onmessage = e => { if (e.data.command === 'START') { @@ -33,13 +39,20 @@ const audioProcessorCode = `(${function () { } else if (e.data.command === 'STOP') { this.recording = false; this.buffer = []; + } else if (e.data.command === 'MUTE') { + this.muted = true; + } else if (e.data.command === 'UNMUTE') { + this.muted = false; } }; } process(inputs: Float32Array[][]) { - if (inputs[0] && inputs[0].length && this.recording) { - this.buffer.push(...inputs[0][0]); + if (this.recording) { + // Use real audio when not muted, otherwise silenced chunk to keep connection alive (all zeros). + const audioData = !this.muted && inputs[0] && inputs[0].length ? inputs[0][0] : this.silentFrame; + this.buffer.push(...audioData); + while (this.buffer.length >= this.bufferSize) { const chunk = this.buffer.splice(0, this.bufferSize); this.port.postMessage({ eventType: 'audio', audioData: new Float32Array(chunk) }); @@ -62,6 +75,7 @@ const MS_IN_SECOND = 1000; export function useRecorder(onAudioChunk: (base64: string, timestamp: string) => void) { const [{ Date }] = usePonyfill(); const audioCtxRef = useRef(undefined); + const sourceRef = useRef(undefined); const streamRef = useRef(undefined); const voiceConfiguration = useCapabilities(caps => caps.voiceConfiguration); const workletRef = useRef(undefined); @@ -69,17 +83,44 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) => const chunkIntervalMs = voiceConfiguration?.chunkIntervalMs ?? DEFAULT_CHUNK_SIZE_IN_MS; const sampleRate = voiceConfiguration?.sampleRate ?? DEFAULT_SAMPLE_RATE; + const stopMediaStream = useCallback(() => { + if (streamRef.current) { + streamRef.current.getTracks().forEach(track => track.stop()); + streamRef.current = undefined; + } + }, [streamRef]); + + // Acquire MediaStream and connect source to worklet + const acquireAndConnectMediaStream = useCallback(async () => { + const audioCtx = audioCtxRef.current; + if (!audioCtx) { + return; + } + + const stream = await navigator.mediaDevices.getUserMedia({ + audio: { + channelCount: 1, + echoCancellation: true, + sampleRate + } + }); + streamRef.current = stream; + + const source = audioCtx.createMediaStreamSource(stream); + if (workletRef.current) { + source.connect(workletRef.current); + } + sourceRef.current = source; + }, [audioCtxRef, sampleRate, sourceRef, streamRef, workletRef]); + const stopRecording = useCallback(() => { if (workletRef.current) { workletRef.current.port.postMessage({ command: 'STOP' }); workletRef.current.disconnect(); workletRef.current = undefined; } - if (streamRef.current) { - streamRef.current.getTracks().forEach(track => track.stop()); - streamRef.current = undefined; - } - }, [streamRef, workletRef]); + stopMediaStream(); + }, [stopMediaStream, workletRef]); const initAudio = useCallback(async () => { if (audioCtxRef.current) { @@ -103,15 +144,7 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) => if (audioCtx.state === 'suspended') { await audioCtx.resume(); } - const stream = await navigator.mediaDevices.getUserMedia({ - audio: { - channelCount: 1, - echoCancellation: true, - sampleRate - } - }); - streamRef.current = stream; - const source = audioCtx.createMediaStreamSource(stream); + const worklet = new AudioWorkletNode(audioCtx, 'audio-recorder', { processorOptions: { bufferSize: (sampleRate * chunkIntervalMs) / MS_IN_SECOND @@ -131,16 +164,57 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) => } }; - source.connect(worklet); worklet.connect(audioCtx.destination); - worklet.port.postMessage({ command: 'START' }); workletRef.current = worklet; - }, [audioCtxRef, chunkIntervalMs, Date, initAudio, onAudioChunk, sampleRate]); + + await acquireAndConnectMediaStream(); + + worklet.port.postMessage({ command: 'START' }); + }, [ + Date, + acquireAndConnectMediaStream, + audioCtxRef, + chunkIntervalMs, + initAudio, + onAudioChunk, + sampleRate, + workletRef + ]); + + const muteRecording = useCallback(() => { + // Stop MediaStream (mic indicator OFF) and disconnect source + stopMediaStream(); + + if (sourceRef.current) { + sourceRef.current.disconnect(); + sourceRef.current = undefined; + } + + // Tell worklet to output silence + if (workletRef.current) { + workletRef.current.port.postMessage({ command: 'MUTE' }); + } + + // Return unmute function + return () => { + if (!audioCtxRef.current || !workletRef.current) { + return; + } + + // Tell worklet to use real audio + workletRef.current.port.postMessage({ command: 'UNMUTE' }); + + // Restart MediaStream and reconnect source (fire and forget) + acquireAndConnectMediaStream(); + }; + }, [acquireAndConnectMediaStream, audioCtxRef, sourceRef, stopMediaStream, workletRef]); const record = useCallback(() => { startRecording(); return stopRecording; }, [startRecording, stopRecording]); - return useMemo(() => ({ record }), [record]); + const mute = useCallback(() => muteRecording(), [muteRecording]); + + return useMemo(() => ({ record, mute }), [record, mute]); } diff --git a/packages/bundle/src/boot/actual/hook/minimal.ts b/packages/bundle/src/boot/actual/hook/minimal.ts index b09c196177..354d3b56e3 100644 --- a/packages/bundle/src/boot/actual/hook/minimal.ts +++ b/packages/bundle/src/boot/actual/hook/minimal.ts @@ -44,6 +44,7 @@ export { useMarkAllAsAcknowledged, useMicrophoneButtonClick, useMicrophoneButtonDisabled, + useMuteVoice, useNotifications, useObserveScrollPosition, useObserveTranscriptFocus, @@ -94,6 +95,7 @@ export { useTransformHTMLContent, useTypingIndicatorVisible, useUIState, + useUnmuteVoice, useUserID, useUsername, useVoiceSelector, diff --git a/packages/component/src/boot/hook.ts b/packages/component/src/boot/hook.ts index 426948db69..d585aadd2b 100644 --- a/packages/component/src/boot/hook.ts +++ b/packages/component/src/boot/hook.ts @@ -39,6 +39,7 @@ export { useMarkActivityAsSpoken, useMarkActivityKeyAsRead, useMarkAllAsAcknowledged, + useMuteVoice, useNotifications, usePerformCardAction, usePonyfill, @@ -75,7 +76,8 @@ export { useUserID, useUsername, useVoiceSelector, - useVoiceState + useVoiceState, + useUnmuteVoice } from 'botframework-webchat-api/hook'; // #region Overrides diff --git a/packages/core/src/actions/muteVoiceRecording.ts b/packages/core/src/actions/muteVoiceRecording.ts new file mode 100644 index 0000000000..4a3f65c82e --- /dev/null +++ b/packages/core/src/actions/muteVoiceRecording.ts @@ -0,0 +1,17 @@ +const VOICE_MUTE_RECORDING = 'WEB_CHAT/VOICE_MUTE_RECORDING' as const; + +type VoiceMuteRecordingAction = { + type: typeof VOICE_MUTE_RECORDING; +}; + +function muteVoiceRecording(): VoiceMuteRecordingAction { + return { + type: VOICE_MUTE_RECORDING + }; +} + +export default muteVoiceRecording; + +export { VOICE_MUTE_RECORDING }; + +export type { VoiceMuteRecordingAction }; diff --git a/packages/core/src/actions/setVoiceState.ts b/packages/core/src/actions/setVoiceState.ts index 53fc12b7c2..70feef25c3 100644 --- a/packages/core/src/actions/setVoiceState.ts +++ b/packages/core/src/actions/setVoiceState.ts @@ -1,6 +1,6 @@ const VOICE_SET_STATE = 'WEB_CHAT/VOICE_SET_STATE' as const; -type VoiceState = 'idle' | 'listening' | 'user_speaking' | 'processing' | 'bot_speaking'; +type VoiceState = 'idle' | 'listening' | 'muted' | 'user_speaking' | 'processing' | 'bot_speaking'; type VoiceSetStateAction = { type: typeof VOICE_SET_STATE; diff --git a/packages/core/src/actions/unmuteVoiceRecording.ts b/packages/core/src/actions/unmuteVoiceRecording.ts new file mode 100644 index 0000000000..c4cbc74a46 --- /dev/null +++ b/packages/core/src/actions/unmuteVoiceRecording.ts @@ -0,0 +1,17 @@ +const VOICE_UNMUTE_RECORDING = 'WEB_CHAT/VOICE_UNMUTE_RECORDING' as const; + +type VoiceUnmuteRecordingAction = { + type: typeof VOICE_UNMUTE_RECORDING; +}; + +function unmuteVoiceRecording(): VoiceUnmuteRecordingAction { + return { + type: VOICE_UNMUTE_RECORDING + }; +} + +export default unmuteVoiceRecording; + +export { VOICE_UNMUTE_RECORDING }; + +export type { VoiceUnmuteRecordingAction }; diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 27fde76d63..c29a003704 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -3,7 +3,10 @@ import disconnect from './actions/disconnect'; import dismissNotification from './actions/dismissNotification'; import emitTypingIndicator from './actions/emitTypingIndicator'; import markActivity from './actions/markActivity'; +import muteVoiceRecording from './actions/muteVoiceRecording'; import postActivity from './actions/postActivity'; +import postVoiceActivity from './actions/postVoiceActivity'; +import registerVoiceHandler from './actions/registerVoiceHandler'; import sendEvent from './actions/sendEvent'; import sendFiles from './actions/sendFiles'; import sendMessage from './actions/sendMessage'; @@ -17,11 +20,16 @@ import setSendBox from './actions/setSendBox'; import setSendBoxAttachments from './actions/setSendBoxAttachments'; import setSendTimeout from './actions/setSendTimeout'; import setSendTypingIndicator from './actions/setSendTypingIndicator'; +import setVoiceState from './actions/setVoiceState'; import startDictate from './actions/startDictate'; import startSpeakingActivity from './actions/startSpeakingActivity'; +import startVoiceRecording from './actions/startVoiceRecording'; import stopDictate from './actions/stopDictate'; import stopSpeakingActivity from './actions/stopSpeakingActivity'; +import stopVoiceRecording from './actions/stopVoiceRecording'; import submitSendBox from './actions/submitSendBox'; +import unmuteVoiceRecording from './actions/unmuteVoiceRecording'; +import unregisterVoiceHandler from './actions/unregisterVoiceHandler'; import * as ActivityClientState from './constants/ActivityClientState'; import * as DictateState from './constants/DictateState'; import createStore, { @@ -44,12 +52,6 @@ import isVoiceActivity from './utils/voiceActivity/isVoiceActivity'; import isVoiceTranscriptActivity from './utils/voiceActivity/isVoiceTranscriptActivity'; import getVoiceActivityRole from './utils/voiceActivity/getVoiceActivityRole'; import getVoiceActivityText from './utils/voiceActivity/getVoiceActivityText'; -import startVoiceRecording from './actions/startVoiceRecording'; -import stopVoiceRecording from './actions/stopVoiceRecording'; -import setVoiceState from './actions/setVoiceState'; -import registerVoiceHandler from './actions/registerVoiceHandler'; -import unregisterVoiceHandler from './actions/unregisterVoiceHandler'; -import postVoiceActivity from './actions/postVoiceActivity'; import type { VoiceState } from './actions/setVoiceState'; import type { VoiceHandler } from './actions/registerVoiceHandler'; @@ -116,6 +118,7 @@ export { isVoiceActivity, isVoiceTranscriptActivity, markActivity, + muteVoiceRecording, onErrorResumeNext, parseAction, parseClaim, @@ -127,7 +130,6 @@ export { postActivity, postVoiceActivity, registerVoiceHandler, - unregisterVoiceHandler, sendEvent, sendFiles, sendMessage, @@ -149,7 +151,9 @@ export { stopDictate, stopSpeakingActivity, stopVoiceRecording, - submitSendBox + submitSendBox, + unmuteVoiceRecording, + unregisterVoiceHandler }; export type { diff --git a/packages/core/src/reducers/voiceActivity.ts b/packages/core/src/reducers/voiceActivity.ts index d7f6953e49..c65565c112 100644 --- a/packages/core/src/reducers/voiceActivity.ts +++ b/packages/core/src/reducers/voiceActivity.ts @@ -1,20 +1,26 @@ +import { VOICE_MUTE_RECORDING } from '../actions/muteVoiceRecording'; import { VOICE_REGISTER_HANDLER } from '../actions/registerVoiceHandler'; import { VOICE_SET_STATE } from '../actions/setVoiceState'; import { VOICE_START_RECORDING } from '../actions/startVoiceRecording'; import { VOICE_STOP_RECORDING } from '../actions/stopVoiceRecording'; +import { VOICE_UNMUTE_RECORDING } from '../actions/unmuteVoiceRecording'; import { VOICE_UNREGISTER_HANDLER } from '../actions/unregisterVoiceHandler'; +import type { VoiceMuteRecordingAction } from '../actions/muteVoiceRecording'; import type { VoiceHandler, VoiceRegisterHandlerAction } from '../actions/registerVoiceHandler'; import type { VoiceSetStateAction, VoiceState } from '../actions/setVoiceState'; import type { VoiceStartRecordingAction } from '../actions/startVoiceRecording'; import type { VoiceStopRecordingAction } from '../actions/stopVoiceRecording'; +import type { VoiceUnmuteRecordingAction } from '../actions/unmuteVoiceRecording'; import type { VoiceUnregisterHandlerAction } from '../actions/unregisterVoiceHandler'; type VoiceActivityActions = + | VoiceMuteRecordingAction | VoiceRegisterHandlerAction | VoiceSetStateAction | VoiceStartRecordingAction | VoiceStopRecordingAction + | VoiceUnmuteRecordingAction | VoiceUnregisterHandlerAction; interface VoiceActivityState { @@ -32,6 +38,12 @@ export default function voiceActivity( action: VoiceActivityActions ): VoiceActivityState { switch (action.type) { + case VOICE_MUTE_RECORDING: + return { + ...state, + voiceState: 'muted' + }; + case VOICE_REGISTER_HANDLER: { const newHandlers = new Map(state.voiceHandlers); newHandlers.set(action.payload.id, action.payload.voiceHandler); @@ -72,6 +84,16 @@ export default function voiceActivity( voiceState: 'idle' }; + case VOICE_UNMUTE_RECORDING: + if (state.voiceState !== 'muted') { + console.warn(`botframework-webchat: Should not transit from "${state.voiceState}" to "listening"`); + } + + return { + ...state, + voiceState: 'listening' + }; + default: return state; }