diff --git a/.gitignore b/.gitignore
index 112e91394..3a5180aca 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
**/.vs/
+.vscode/
build/
config.tests/*/.qmake.stash
config.tests/*/Makefile
diff --git a/.gitmodules b/.gitmodules
index 0b50efae7..bb2dc8d2f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
[submodule "moonlight-common-c/moonlight-common-c"]
path = moonlight-common-c/moonlight-common-c
- url = https://github.com/moonlight-stream/moonlight-common-c.git
+ url = https://github.com/andygrundman/moonlight-common-c.git
[submodule "qmdnsengine/qmdnsengine"]
path = qmdnsengine/qmdnsengine
url = https://github.com/cgutman/qmdnsengine.git
diff --git a/app/Info.plist b/app/Info.plist
index 88e6a502e..b949024cc 100644
--- a/app/Info.plist
+++ b/app/Info.plist
@@ -31,6 +31,8 @@
GCSupportsControllerUserInteraction
+ LSApplicationCategoryType
+ public.app-category.games
NSAppTransportSecurity
NSAllowsArbitraryLoads
diff --git a/app/app.pro b/app/app.pro
index cbdb9fbba..2dc7ea95c 100644
--- a/app/app.pro
+++ b/app/app.pro
@@ -165,10 +165,19 @@ macx {
CONFIG += discord-rpc
}
- LIBS += -lobjc -framework VideoToolbox -framework AVFoundation -framework CoreVideo -framework CoreGraphics -framework CoreMedia -framework AppKit -framework Metal -framework QuartzCore
-
- # For libsoundio
- LIBS += -framework CoreAudio -framework AudioUnit
+ LIBS += -lobjc \
+ -framework Accelerate \
+ -framework AppKit \
+ -framework AudioToolbox \
+ -framework AudioUnit \
+ -framework AVFoundation \
+ -framework CoreAudio \
+ -framework CoreVideo \
+ -framework CoreGraphics \
+ -framework CoreMedia \
+ -framework Metal \
+ -framework QuartzCore \
+ -framework VideoToolbox
CONFIG += ffmpeg soundio
}
@@ -201,6 +210,7 @@ SOURCES += \
streaming/input/reltouch.cpp \
streaming/session.cpp \
streaming/audio/audio.cpp \
+ streaming/audio/renderers/renderer.cpp \
streaming/audio/renderers/sdlaud.cpp \
gui/computermodel.cpp \
gui/appmodel.cpp \
@@ -403,14 +413,23 @@ win32:!winrt {
streaming/video/ffmpeg-renderers/pacer/dxvsyncsource.h
}
macx {
- message(VideoToolbox renderer selected)
+ message(CoreAudio + VideoToolbox renderers selected)
+
+ DEFINES += HAVE_COREAUDIO
SOURCES += \
+ streaming/audio/renderers/coreaudio/au_spatial_renderer.mm \
+ streaming/audio/renderers/coreaudio/coreaudio.cpp \
+ streaming/audio/renderers/coreaudio/TPCircularBuffer.c \
streaming/video/ffmpeg-renderers/vt_base.mm \
streaming/video/ffmpeg-renderers/vt_avsamplelayer.mm \
streaming/video/ffmpeg-renderers/vt_metal.mm
HEADERS += \
+ streaming/audio/renderers/coreaudio/au_spatial_renderer.h \
+ streaming/audio/renderers/coreaudio/coreaudio.h \
+ streaming/audio/renderers/coreaudio/coreaudio_helpers.h \
+ streaming/audio/renderers/coreaudio/TPCircularBuffer.h \
streaming/video/ffmpeg-renderers/vt.h
}
soundio {
diff --git a/app/deploy/macos/spatial-audio.entitlements b/app/deploy/macos/spatial-audio.entitlements
new file mode 100644
index 000000000..76fc88f8b
--- /dev/null
+++ b/app/deploy/macos/spatial-audio.entitlements
@@ -0,0 +1,12 @@
+
+
+
+
+ com.apple.security.app-sandbox
+
+ com.apple.developer.spatial-audio.profile-access
+
+ com.apple.developer.coremotion.head-pose
+
+
+
diff --git a/app/gui/SettingsView.qml b/app/gui/SettingsView.qml
index 30b9ed787..974d6bda2 100644
--- a/app/gui/SettingsView.qml
+++ b/app/gui/SettingsView.qml
@@ -881,6 +881,78 @@ Flickable {
}
}
+ Label {
+ width: parent.width
+ id: resSpatialAudioTitle
+ text: qsTr("Spatial audio")
+ font.pointSize: 12
+ wrapMode: Text.Wrap
+ visible: Qt.platform.os == "osx"
+ }
+
+ Row {
+ spacing: 5
+ width: parent.width
+ visible: Qt.platform.os == "osx"
+
+ AutoResizingComboBox {
+ // ignore setting the index at first, and actually set it when the component is loaded
+ Component.onCompleted: {
+ var saved_sac = StreamingPreferences.spatialAudioConfig
+ currentIndex = 0
+ for (var i = 0; i < spatialAudioListModel.count; i++) {
+ var el_audio = spatialAudioListModel.get(i).val;
+ if (saved_sac === el_audio) {
+ currentIndex = i
+ break
+ }
+ }
+ activated(currentIndex)
+ }
+
+ id: spatialAudioComboBox
+ enabled: StreamingPreferences.audioConfig != StreamingPreferences.AC_STEREO
+ textRole: "text"
+ model: ListModel {
+ id: spatialAudioListModel
+ ListElement {
+ text: qsTr("Enabled")
+ val: StreamingPreferences.SAC_AUTO
+ }
+ ListElement {
+ text: qsTr("Disabled")
+ val: StreamingPreferences.SAC_DISABLED
+ }
+ }
+
+ // ::onActivated must be used, as it only listens for when the index is changed by a human
+ onActivated : {
+ StreamingPreferences.spatialAudioConfig = spatialAudioListModel.get(currentIndex).val
+ }
+
+ ToolTip.delay: 1000
+ ToolTip.timeout: 5000
+ ToolTip.visible: hovered
+ ToolTip.text: qsTr("Spatial audio will be used when using any type of headphones, built-in Macbook speakers, and 2-channel USB devices.")
+ }
+
+ CheckBox {
+ id: spatialHeadTracking
+ enabled: StreamingPreferences.audioConfig != StreamingPreferences.AC_STEREO && StreamingPreferences.spatialAudioConfig != StreamingPreferences.SAC_DISABLED
+ width: parent.width
+ text: qsTr("Enable head-tracking")
+ font.pointSize: 12
+ checked: StreamingPreferences.spatialHeadTracking
+ onCheckedChanged: {
+ StreamingPreferences.spatialHeadTracking = checked
+ }
+
+ ToolTip.delay: 1000
+ ToolTip.timeout: 5000
+ ToolTip.visible: hovered
+ ToolTip.text: qsTr("Requires supported Apple or Beats headphones")
+ }
+ }
CheckBox {
id: audioPcCheck
@@ -1176,7 +1248,7 @@ Flickable {
ListElement {
text: qsTr("Maximized")
val: StreamingPreferences.UI_MAXIMIZED
- }
+ }
ListElement {
text: qsTr("Fullscreen")
val: StreamingPreferences.UI_FULLSCREEN
diff --git a/app/settings/streamingpreferences.cpp b/app/settings/streamingpreferences.cpp
index bb34f6269..9655d4799 100644
--- a/app/settings/streamingpreferences.cpp
+++ b/app/settings/streamingpreferences.cpp
@@ -19,9 +19,11 @@
#define SER_FULLSCREEN "fullscreen"
#define SER_VSYNC "vsync"
#define SER_GAMEOPTS "gameopts"
+#define SER_HEADTRACKING "headtracking"
#define SER_HOSTAUDIO "hostaudio"
#define SER_MULTICONT "multicontroller"
#define SER_AUDIOCFG "audiocfg"
+#define SER_SPATIALAUDIOCFG "spatialaudiocfg"
#define SER_VIDEOCFG "videocfg"
#define SER_HDR "hdr"
#define SER_YUV444 "yuv444"
@@ -124,6 +126,7 @@ void StreamingPreferences::reload()
unlockBitrate = settings.value(SER_UNLOCK_BITRATE, false).toBool();
enableVsync = settings.value(SER_VSYNC, true).toBool();
gameOptimizations = settings.value(SER_GAMEOPTS, true).toBool();
+ spatialHeadTracking = settings.value(SER_HEADTRACKING, false).toBool();
playAudioOnHost = settings.value(SER_HOSTAUDIO, false).toBool();
multiController = settings.value(SER_MULTICONT, true).toBool();
enableMdns = settings.value(SER_MDNS, true).toBool();
@@ -148,6 +151,8 @@ void StreamingPreferences::reload()
static_cast(CaptureSysKeysMode::CSK_OFF)).toInt());
audioConfig = static_cast(settings.value(SER_AUDIOCFG,
static_cast(AudioConfig::AC_STEREO)).toInt());
+ spatialAudioConfig = static_cast(settings.value(SER_SPATIALAUDIOCFG,
+ static_cast(SpatialAudioConfig::SAC_AUTO)).toInt());
videoCodecConfig = static_cast(settings.value(SER_VIDEOCFG,
static_cast(VideoCodecConfig::VCC_AUTO)).toInt());
videoDecoderSelection = static_cast(settings.value(SER_VIDEODEC,
@@ -314,6 +319,7 @@ void StreamingPreferences::save()
settings.setValue(SER_UNLOCK_BITRATE, unlockBitrate);
settings.setValue(SER_VSYNC, enableVsync);
settings.setValue(SER_GAMEOPTS, gameOptimizations);
+ settings.setValue(SER_HEADTRACKING, spatialHeadTracking);
settings.setValue(SER_HOSTAUDIO, playAudioOnHost);
settings.setValue(SER_MULTICONT, multiController);
settings.setValue(SER_MDNS, enableMdns);
@@ -328,6 +334,7 @@ void StreamingPreferences::save()
settings.setValue(SER_DETECTNETBLOCKING, detectNetworkBlocking);
settings.setValue(SER_SHOWPERFOVERLAY, showPerformanceOverlay);
settings.setValue(SER_AUDIOCFG, static_cast(audioConfig));
+ settings.setValue(SER_SPATIALAUDIOCFG, static_cast(spatialAudioConfig));
settings.setValue(SER_HDR, enableHdr);
settings.setValue(SER_YUV444, enableYUV444);
settings.setValue(SER_VIDEOCFG, static_cast(videoCodecConfig));
diff --git a/app/settings/streamingpreferences.h b/app/settings/streamingpreferences.h
index 3ca216fff..b2adf75ad 100644
--- a/app/settings/streamingpreferences.h
+++ b/app/settings/streamingpreferences.h
@@ -26,6 +26,13 @@ class StreamingPreferences : public QObject
};
Q_ENUM(AudioConfig)
+ enum SpatialAudioConfig
+ {
+ SAC_AUTO,
+ SAC_DISABLED
+ };
+ Q_ENUM(SpatialAudioConfig)
+
enum VideoCodecConfig
{
VCC_AUTO,
@@ -112,6 +119,7 @@ class StreamingPreferences : public QObject
Q_PROPERTY(bool unlockBitrate MEMBER unlockBitrate NOTIFY unlockBitrateChanged)
Q_PROPERTY(bool enableVsync MEMBER enableVsync NOTIFY enableVsyncChanged)
Q_PROPERTY(bool gameOptimizations MEMBER gameOptimizations NOTIFY gameOptimizationsChanged)
+ Q_PROPERTY(bool spatialHeadTracking MEMBER spatialHeadTracking NOTIFY spatialHeadTrackingChanged)
Q_PROPERTY(bool playAudioOnHost MEMBER playAudioOnHost NOTIFY playAudioOnHostChanged)
Q_PROPERTY(bool multiController MEMBER multiController NOTIFY multiControllerChanged)
Q_PROPERTY(bool enableMdns MEMBER enableMdns NOTIFY enableMdnsChanged)
@@ -125,6 +133,7 @@ class StreamingPreferences : public QObject
Q_PROPERTY(bool detectNetworkBlocking MEMBER detectNetworkBlocking NOTIFY detectNetworkBlockingChanged)
Q_PROPERTY(bool showPerformanceOverlay MEMBER showPerformanceOverlay NOTIFY showPerformanceOverlayChanged)
Q_PROPERTY(AudioConfig audioConfig MEMBER audioConfig NOTIFY audioConfigChanged)
+ Q_PROPERTY(SpatialAudioConfig spatialAudioConfig MEMBER spatialAudioConfig NOTIFY spatialAudioConfigChanged)
Q_PROPERTY(VideoCodecConfig videoCodecConfig MEMBER videoCodecConfig NOTIFY videoCodecConfigChanged)
Q_PROPERTY(bool enableHdr MEMBER enableHdr NOTIFY enableHdrChanged)
Q_PROPERTY(bool enableYUV444 MEMBER enableYUV444 NOTIFY enableYUV444Changed)
@@ -151,6 +160,7 @@ class StreamingPreferences : public QObject
bool unlockBitrate;
bool enableVsync;
bool gameOptimizations;
+ bool spatialHeadTracking;
bool playAudioOnHost;
bool multiController;
bool enableMdns;
@@ -171,6 +181,7 @@ class StreamingPreferences : public QObject
bool keepAwake;
int packetSize;
AudioConfig audioConfig;
+ SpatialAudioConfig spatialAudioConfig;
VideoCodecConfig videoCodecConfig;
bool enableHdr;
bool enableYUV444;
@@ -187,6 +198,7 @@ class StreamingPreferences : public QObject
void unlockBitrateChanged();
void enableVsyncChanged();
void gameOptimizationsChanged();
+ void spatialHeadTrackingChanged();
void playAudioOnHostChanged();
void multiControllerChanged();
void unsupportedFpsChanged();
@@ -195,6 +207,7 @@ class StreamingPreferences : public QObject
void absoluteMouseModeChanged();
void absoluteTouchModeChanged();
void audioConfigChanged();
+ void spatialAudioConfigChanged();
void videoCodecConfigChanged();
void enableHdrChanged();
void enableYUV444Changed();
diff --git a/app/streaming/audio/audio.cpp b/app/streaming/audio/audio.cpp
index cb60d6dbe..9f9695f9c 100644
--- a/app/streaming/audio/audio.cpp
+++ b/app/streaming/audio/audio.cpp
@@ -9,6 +9,10 @@
#include "renderers/slaud.h"
#endif
+#ifdef HAVE_COREAUDIO
+#include "renderers/coreaudio/coreaudio.h"
+#endif
+
#include "renderers/sdl.h"
#include
@@ -29,6 +33,12 @@ IAudioRenderer* Session::createAudioRenderer(const POPUS_MULTISTREAM_CONFIGURATI
TRY_INIT_RENDERER(SdlAudioRenderer, opusConfig)
return nullptr;
}
+#ifdef HAVE_COREAUDIO
+ else if (mlAudio == "coreaudio") {
+ TRY_INIT_RENDERER(CoreAudioRenderer, opusConfig)
+ return nullptr;
+ }
+#endif
#ifdef HAVE_SOUNDIO
else if (mlAudio == "libsoundio") {
TRY_INIT_RENDERER(SoundIoAudioRenderer, opusConfig)
@@ -55,6 +65,11 @@ IAudioRenderer* Session::createAudioRenderer(const POPUS_MULTISTREAM_CONFIGURATI
TRY_INIT_RENDERER(SLAudioRenderer, opusConfig)
#endif
+#ifdef HAVE_COREAUDIO
+ // Native renderer for macOS/iOS/tvOS, suports spatial audio
+ TRY_INIT_RENDERER(CoreAudioRenderer, opusConfig)
+#endif
+
// Default to SDL and use libsoundio as a fallback
TRY_INIT_RENDERER(SdlAudioRenderer, opusConfig)
#ifdef HAVE_SOUNDIO
@@ -157,6 +172,8 @@ int Session::arInit(int /* audioConfiguration */,
void Session::arCleanup()
{
+ s_ActiveSession->m_AudioRenderer->logGlobalAudioStats();
+
delete s_ActiveSession->m_AudioRenderer;
s_ActiveSession->m_AudioRenderer = nullptr;
@@ -205,6 +222,8 @@ void Session::arDecodeAndPlaySample(char* sampleData, int sampleLength)
}
if (s_ActiveSession->m_AudioRenderer != nullptr) {
+ uint64_t startTimeUs = LiGetMicroseconds();
+
int sampleSize = s_ActiveSession->m_AudioRenderer->getAudioBufferSampleSize();
int frameSize = sampleSize * s_ActiveSession->m_ActiveAudioConfig.channelCount;
int desiredBufferSize = frameSize * s_ActiveSession->m_ActiveAudioConfig.samplesPerFrame;
@@ -239,7 +258,29 @@ void Session::arDecodeAndPlaySample(char* sampleData, int sampleLength)
desiredBufferSize = 0;
}
- if (!s_ActiveSession->m_AudioRenderer->submitAudio(desiredBufferSize)) {
+ // used to display the raw audio bitrate
+ s_ActiveSession->m_AudioRenderer->statsAddOpusBytesReceived(sampleLength);
+
+ // Once a second, maybe grab stats from the last two windows for display, then shift to the next stats window
+ if (LiGetMicroseconds() > s_ActiveSession->m_AudioRenderer->getActiveWndAudioStats().measurementStartUs + 1000000) {
+ if (s_ActiveSession->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebugAudio)) {
+ AUDIO_STATS lastTwoWndAudioStats = {};
+ s_ActiveSession->m_AudioRenderer->snapshotAudioStats(lastTwoWndAudioStats);
+
+ s_ActiveSession->m_AudioRenderer->stringifyAudioStats(lastTwoWndAudioStats,
+ s_ActiveSession->getOverlayManager().getOverlayText(Overlay::OverlayDebugAudio),
+ s_ActiveSession->getOverlayManager().getOverlayMaxTextLength());
+ s_ActiveSession->getOverlayManager().setOverlayTextUpdated(Overlay::OverlayDebugAudio);
+ }
+
+ s_ActiveSession->m_AudioRenderer->flipAudioStatsWindows();
+ }
+
+ if (s_ActiveSession->m_AudioRenderer->submitAudio(desiredBufferSize)) {
+ // keep stats on how long the audio pipline took to execute
+ s_ActiveSession->m_AudioRenderer->statsTrackDecodeTime(startTimeUs);
+ }
+ else {
SDL_LogWarn(SDL_LOG_CATEGORY_APPLICATION,
"Reinitializing audio renderer after failure");
diff --git a/app/streaming/audio/renderers/coreaudio/AllocatedAudioBufferList.h b/app/streaming/audio/renderers/coreaudio/AllocatedAudioBufferList.h
new file mode 100644
index 000000000..241388dc0
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/AllocatedAudioBufferList.h
@@ -0,0 +1,57 @@
+/*
+Copyright © 2024 Apple Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+#pragma once
+
+#include
+
+class AllocatedAudioBufferList
+{
+public:
+ AllocatedAudioBufferList(UInt32 channelCount, uint16_t bufferSize)
+ {
+
+ mBufferList = static_cast(malloc(sizeof(AudioBufferList) + (sizeof(AudioBuffer) * channelCount)));
+ mBufferList->mNumberBuffers = channelCount;
+ for (UInt32 c = 0; c < channelCount; ++c) {
+ mBufferList->mBuffers[c].mNumberChannels = 1;
+ mBufferList->mBuffers[c].mDataByteSize = bufferSize * sizeof(float);
+ mBufferList->mBuffers[c].mData = malloc(sizeof(float) * bufferSize);
+ }
+ }
+
+ AllocatedAudioBufferList(const AllocatedAudioBufferList&) = delete;
+
+ AllocatedAudioBufferList& operator=(const AllocatedAudioBufferList&) = delete;
+
+ ~AllocatedAudioBufferList()
+ {
+ if (mBufferList == nullptr) { return; }
+
+ for (UInt32 i = 0; i < mBufferList->mNumberBuffers; ++i) {
+ free(mBufferList->mBuffers[i].mData);
+ }
+ free(mBufferList);
+ mBufferList = nullptr;
+ }
+
+ AudioBufferList * _Nonnull get()
+ {
+ return mBufferList;
+ }
+
+private:
+ AudioBufferList * _Nonnull mBufferList = { nullptr };
+};
diff --git a/app/streaming/audio/renderers/coreaudio/README.coreaudio b/app/streaming/audio/renderers/coreaudio/README.coreaudio
new file mode 100644
index 000000000..0dfad191f
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/README.coreaudio
@@ -0,0 +1,60 @@
+Moonlight CoreAudio supports 2 modes:
+
+1. A normal passthrough mode where decoded PCM from the Opus stream is passed directly to the output Audio Unit. This mode
+is used when the incoming stream is stereo or when the local output device is already multichannel, e.g. when outputting over HDMI.
+
+2. Spatial Mixer mode. This mode is used for 5.1 and 7.1 channel streams, when the output device supports spatial audio. This usually means
+the system knows that headphones are in use, or the built-in Macbook speakers are in use. Apple uses a specially tuned profile to enable
+a spatial effect from their laptop speakers.
+
+There are a lot of knobs available in the mixer to describe how you want the rendering to be done, but I have hardcoded what seem
+to be Apple's recommended defaults. For example, I can find zero documentation about what the different SpatializationAlgorithm types do,
+and UseOutputType is the right choice, apparently picking the best algorithm for the target device.
+
+kSpatializationAlgorithm_EqualPowerPanning
+kSpatializationAlgorithm_HRTF
+kSpatializationAlgorithm_SoundField
+kSpatializationAlgorithm_SphericalHead
+kSpatializationAlgorithm_StereoPassThrough
+kSpatializationAlgorithm_VectorBasedPanning
+kSpatializationAlgorithm_HRTFHQ
+kSpatializationAlgorithm_UseOutputType
+
+The CoreAudio renderer was inspired by an example app in Apple's Audio Toolbox documentation:
+
+https://developer.apple.com/documentation/audiotoolbox/generating_spatial_audio_from_a_multichannel_audio_stream
+
+In theoery, any amount of channels with any layout can be processed by SpatialMixer, with 7.1.4 Atmos as Apple's example,
+in the form of a 12-channel WAV file. Interestingly, raw multichannel WAV files get automatically spatialized when played
+with QuickTime on macOS.
+
+The design and program flow of the example app is overly complex, even though it only uses 2 AudioUnits: one in stereo for final output
+and one that is a SpatialMixer. Perhaps they really wanted to show off mixing Swift UI with advanced Obj-C++ using closures/lambdas.
+
+I've left in some sections of the code that are platform-specific (iOS needs to use different audio APIs). This will
+hopefully make it easier to port this to moonlight-ios.
+
+Apple example:
+
+AudioFileReader->pullAudioBlock() <- N channel local WAV file (the example has a few 7.1.4 samples)
+ rendering->mInputBlock()
+ AudioUnitRender(mAUSM)
+ mAUSM->process()
+ Kernel->process()
+ 2-channel binaural out <- OutputAU
+
+CoreAudioRenderer:
+
+A thread-safe ring buffer is used, on one end is the Opus decoder which decodes 5ms Opus packets into PCM, 32 bits-per-channel.
+The reader is one of two AURenderCallback functions that are called by CoreAudio in a pull model.
+
+renderCallbackDirect is the simple case: simply copy the PCM into the buffers being given to us by CoreAudio.
+This mode is able to pass the interleaved PCM unchanged to the OS.
+
+In Spatial mode, renderCallbackSpatial uses an intermediate SpatialMixer, which it asks for 2-channel binaural PCM
+using m_SpatialAU.process(). m_SpatialAU is our AUSpatialRenderer class that contains a lot of setup and one callback.
+The process() method calls AudioUnitRender() which will have CoreAudio call inputCallback asking for 8 channels of PCM
+data for example. This is copied out of the ring buffer, where it is stored interleaved (each channel's data is together
+and makes up one frame) and needs to be transformed to non-interleaved format into 8 separate buffers. After this, the
+mixer does whatever it does, and process() returns. We're still in renderCallbackSpatial and it can deliver the final
+2-channel version to the final output.
diff --git a/app/streaming/audio/renderers/coreaudio/TODO b/app/streaming/audio/renderers/coreaudio/TODO
new file mode 100644
index 000000000..b00255dfb
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/TODO
@@ -0,0 +1,64 @@
+CoreAudioRenderer TODO
+----------------------
+Test the lower-quality surround modes that have coupled streams, is that broken on headphones or when spatialized?
+From https://people.xiph.org/~xiphmont/demo/opus/demo3.shtml
+
+ Surround masking takes advantage of cross-channel masking between free-field loudspeakers. Obviously, we can't do that for stereo, as stereo is often listened to on headphones or nearfield monitors, but for surround encodings played on typical surround placements with listeners placed well within the soundfield, there's considerable savings to be had by assuming freefield masking. We only need to make slight modifications to ensure that the encode still sounds good when downmixed to stereo for playback on non-surround systems.
+
+Refactor into more logical/cleaner C++ classes.
+Refactor audio stats code and implement for other backends.
+
+New options for audio:
+Packet size: 2.5/5/10/20
+Surround channel mode (coupled/mono)
+Bitrate: 96/128/256/510 (256k max per channel for multichannel)
+VBR: yes/no
+RTP FEC (audio)
+
+2-line overlay stats version:
+
+Line 1: 1280x800 @ 60 FPS, HEVC 10-bit HDR, 15.0 Mbps (+20% FEC)
+Line 2: Latency: 1ms ± 1ms, Decode: 6.67ms, Render: 3.78ms, Host: 1.00ms, Loss: 0%
+
+Line 1: 7.1 Opus @ 48 kHz, 512 kbps (+33% FEC), spatial audio (AirPods)
+Line 2: Latency: 12.9ms, Decode: 0.06ms, Packet Loss: 0%
+
+Understand Game Mode
+--------------------
+Watch for game mode in/out events, currently seems to be undocumented but there is plenty of activity when it happens:
+
+macOS log enabling Game Mode:
+
+02:47:39.879865-0400 Policy default com.apple.gamepolicyd gamepolicyd Game mode enabled.
+02:47:39.883277-0400 Server.Core default com.apple.bluetooth bluetoothd SystemSettingsModel::systemMonitor game console mode changed:1
+02:47:39.883805-0400 Server.HID default com.apple.bluetooth bluetoothd adjustHIDSniffInterval: numAudioDevices(2), isStereoSCO(0), isStreaming(1), numGameControllers(0), numOfLEADevices(0)
+02:47:39.884598-0400 Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode update, new mode: enabled, fConsoleGameModeOn: enabled, fGameModeOn: disabled, fCombinedGameOn: disabled->enabled
+02:47:39.885883-0400 General default com.apple.modelmanager gamepolicyd Acquiring assertion: DaemonRep(policy: "StandardGameMode", description: "com.apple.gamepolicyd", timestamp: 749371659.885592, id: 54C06E74-1D53-4569-B723-D9111CCC60C2, acquirerPID: 665)
+02:47:39.884661-0400 Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, aggregated = 0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn =0, fConsoleGameModeOn=1
+02:47:39.892354-0400 ttl default com.apple.runningboard runningboardd Acquiring assertion targeting [anon(501):94115] from originator [osservice:160] with description
+ ]>
+02:47:39.899548-0400 Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn=0, fConsoleGameModeOn=1
+02:47:39.899576-0400 Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode change, notify BTHAL low Latency Game: 1, HID: 1
+02:47:39.899650-0400 Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn=0, fConsoleGameModeOn=1
+02:47:39.900522-0400 Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode change, notify BTHAL low Latency Game: 1, HID: 1
+02:47:39.921763-0400 ttl default com.apple.runningboard runningboardd Acquiring assertion targeting [osservice:1490] from originator [osservice:665] with description
+ ]>
+02:47:39.928395-0400 gamemode default com.apple.powerlog PerfPowerServices Sent game mode notification to submodules: 1
+
+
+macOS log when coming out of Game Mode:
+
+Policy default com.apple.gamepolicyd gamepolicyd Game mode disabled.
+Server.Core default com.apple.bluetooth bluetoothd SystemSettingsModel::systemMonitor game console mode changed:0
+Server.HID default com.apple.bluetooth bluetoothd adjustHIDSniffInterval: numAudioDevices(2), isStereoSCO(0), isStreaming(1), numGameControllers(0), numOfLEADevices(0)
+Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode update, new mode: disabled, fConsoleGameModeOn: disabled, fGameModeOn: disabled, fCombinedGameOn: enabled->disabled
+Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, aggregated = 0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn =0, fConsoleGameModeOn=0
+Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn=0, fConsoleGameModeOn=0
+Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode change, notify BTHAL low Latency Game: 0, HID: 1
+Server.Audio default com.apple.bluetooth bluetoothd Dynamic Latency Trigger fKeyboardOn =0, fVoiceOverOn=0, fGameModeOn=0, fGarageBandOn=0, fSpatialVideoOn=0, fSpatialMusicOn=0, fScreenOn =1, fExpanseOn =0, fAudioInputAggregateOn=0, fConsoleGameModeOn=0
+Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game console mode change, notify BTHAL low Latency Game: 0, HID: 1
+gamemode default com.apple.powerlog PerfPowerServices Sent game mode notification to submodules: 0
+monitor default com.apple.runningboard gamepolicyd Received state update for 181 (osservice, running-NotVisible
+Server.Audio default com.apple.bluetooth bluetoothd Low Latency Game a2dpDynamicLatencyTransitionCompleted new latencymode: 6
diff --git a/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.c b/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.c
new file mode 100644
index 000000000..7d87a63f0
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.c
@@ -0,0 +1,149 @@
+//
+// TPCircularBuffer.c
+// Circular/Ring buffer implementation
+//
+// https://github.com/michaeltyson/TPCircularBuffer
+//
+// Created by Michael Tyson on 10/12/2011.
+//
+// Copyright (C) 2012-2013 A Tasty Pixel
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source distribution.
+//
+
+#include "TPCircularBuffer.h"
+#include
+#include
+#include
+
+#define reportResult(result,operation) (_reportResult((result),(operation),strrchr(__FILE__, '/')+1,__LINE__))
+static inline bool _reportResult(kern_return_t result, const char *operation, const char* file, int line) {
+ if ( result != ERR_SUCCESS ) {
+ printf("%s:%d: %s: %s\n", file, line, operation, mach_error_string(result));
+ return false;
+ }
+ return true;
+}
+
+bool _TPCircularBufferInit(TPCircularBuffer *buffer, uint32_t length, size_t structSize) {
+
+ assert(length > 0);
+
+ if ( structSize != sizeof(TPCircularBuffer) ) {
+ fprintf(stderr, "TPCircularBuffer: Header version mismatch. Check for old versions of TPCircularBuffer in your project\n");
+ abort();
+ }
+
+ // Keep trying until we get our buffer, needed to handle race conditions
+ int retries = 3;
+ while ( true ) {
+
+ buffer->length = (uint32_t)round_page(length); // We need whole page sizes
+
+ // Temporarily allocate twice the length, so we have the contiguous address space to
+ // support a second instance of the buffer directly after
+ vm_address_t bufferAddress;
+ kern_return_t result = vm_allocate(mach_task_self(),
+ &bufferAddress,
+ buffer->length * 2,
+ VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit
+ if ( result != ERR_SUCCESS ) {
+ if ( retries-- == 0 ) {
+ reportResult(result, "Buffer allocation");
+ return false;
+ }
+ // Try again if we fail
+ continue;
+ }
+
+ // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
+ result = vm_deallocate(mach_task_self(),
+ bufferAddress + buffer->length,
+ buffer->length);
+ if ( result != ERR_SUCCESS ) {
+ if ( retries-- == 0 ) {
+ reportResult(result, "Buffer deallocation");
+ return false;
+ }
+ // If this fails somehow, deallocate the whole region and try again
+ vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
+ continue;
+ }
+
+ // Re-map the buffer to the address space immediately after the buffer
+ vm_address_t virtualAddress = bufferAddress + buffer->length;
+ vm_prot_t cur_prot, max_prot;
+ result = vm_remap(mach_task_self(),
+ &virtualAddress, // mirror target
+ buffer->length, // size of mirror
+ 0, // auto alignment
+ 0, // force remapping to virtualAddress
+ mach_task_self(), // same task
+ bufferAddress, // mirror source
+ 0, // MAP READ-WRITE, NOT COPY
+ &cur_prot, // unused protection struct
+ &max_prot, // unused protection struct
+ VM_INHERIT_DEFAULT);
+ if ( result != ERR_SUCCESS ) {
+ if ( retries-- == 0 ) {
+ reportResult(result, "Remap buffer memory");
+ return false;
+ }
+ // If this remap failed, we hit a race condition, so deallocate and try again
+ vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
+ continue;
+ }
+
+ if ( virtualAddress != bufferAddress+buffer->length ) {
+ // If the memory is not contiguous, clean up both allocated buffers and try again
+ if ( retries-- == 0 ) {
+ printf("Couldn't map buffer memory to end of buffer\n");
+ return false;
+ }
+
+ vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
+ vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
+ continue;
+ }
+
+ buffer->buffer = (void*)bufferAddress;
+ buffer->fillCount = 0;
+ buffer->head = buffer->tail = 0;
+ buffer->atomic = true;
+
+ return true;
+ }
+ return false;
+}
+
+void TPCircularBufferCleanup(TPCircularBuffer *buffer) {
+ vm_deallocate(mach_task_self(), (vm_address_t)buffer->buffer, buffer->length * 2);
+ memset(buffer, 0, sizeof(TPCircularBuffer));
+}
+
+void TPCircularBufferClear(TPCircularBuffer *buffer) {
+ uint32_t fillCount;
+ if ( TPCircularBufferTail(buffer, &fillCount) ) {
+ TPCircularBufferConsume(buffer, fillCount);
+ }
+}
+
+void TPCircularBufferSetAtomic(TPCircularBuffer *buffer, bool atomic) {
+ buffer->atomic = atomic;
+}
diff --git a/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.h b/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.h
new file mode 100644
index 000000000..95994995f
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/TPCircularBuffer.h
@@ -0,0 +1,243 @@
+//
+// TPCircularBuffer.h
+// Circular/Ring buffer implementation
+//
+// https://github.com/michaeltyson/TPCircularBuffer
+//
+// Created by Michael Tyson on 10/12/2011.
+//
+//
+// This implementation makes use of a virtual memory mapping technique that inserts a virtual copy
+// of the buffer memory directly after the buffer's end, negating the need for any buffer wrap-around
+// logic. Clients can simply use the returned memory address as if it were contiguous space.
+//
+// The implementation is thread-safe in the case of a single producer and single consumer.
+//
+// Virtual memory technique originally proposed by Philip Howard (http://vrb.slashusr.org/), and
+// adapted to Darwin by Kurt Revis (http://www.snoize.com,
+// http://www.snoize.com/Code/PlayBufferedSoundFile.tar.gz)
+//
+//
+// Copyright (C) 2012-2013 A Tasty Pixel
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+//
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+//
+// 3. This notice may not be removed or altered from any source distribution.
+//
+
+#ifndef TPCircularBuffer_h
+#define TPCircularBuffer_h
+
+#include
+#include
+#include
+
+#ifdef __cplusplus
+ extern "C++" {
+ #include
+ typedef std::atomic_int atomicInt;
+ #define atomicFetchAdd(a,b) std::atomic_fetch_add(a,b)
+ }
+#else
+ #include
+ typedef atomic_int atomicInt;
+ #define atomicFetchAdd(a,b) atomic_fetch_add(a,b)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ void *buffer;
+ uint32_t length;
+ uint32_t tail;
+ uint32_t head;
+ volatile atomicInt fillCount;
+ bool atomic;
+} TPCircularBuffer;
+
+/*!
+ * Initialise buffer
+ *
+ * Note that the length is advisory only: Because of the way the
+ * memory mirroring technique works, the true buffer length will
+ * be multiples of the device page size (e.g. 4096 bytes)
+ *
+ * If you intend to use the AudioBufferList utilities, you should
+ * always allocate a bit more space than you need for pure audio
+ * data, so there's room for the metadata. How much extra is required
+ * depends on how many AudioBufferList structures are used, which is
+ * a function of how many audio frames each buffer holds. A good rule
+ * of thumb is to add 15%, or at least another 2048 bytes or so.
+ *
+ * @param buffer Circular buffer
+ * @param length Length of buffer
+ */
+#define TPCircularBufferInit(buffer, length) \
+ _TPCircularBufferInit(buffer, length, sizeof(*buffer))
+bool _TPCircularBufferInit(TPCircularBuffer *buffer, uint32_t length, size_t structSize);
+
+/*!
+ * Cleanup buffer
+ *
+ * Releases buffer resources.
+ */
+void TPCircularBufferCleanup(TPCircularBuffer *buffer);
+
+/*!
+ * Clear buffer
+ *
+ * Resets buffer to original, empty state.
+ *
+ * This is safe for use by consumer while producer is accessing
+ * buffer.
+ */
+void TPCircularBufferClear(TPCircularBuffer *buffer);
+
+/*!
+ * Set the atomicity
+ *
+ * If you set the atomiticy to false using this method, the buffer will
+ * not use atomic operations. This can be used to give the compiler a little
+ * more optimisation opportunities when the buffer is only used on one thread.
+ *
+ * Important note: Only set this to false if you know what you're doing!
+ *
+ * The default value is true (the buffer will use atomic operations)
+ *
+ * @param buffer Circular buffer
+ * @param atomic Whether the buffer is atomic (default true)
+ */
+void TPCircularBufferSetAtomic(TPCircularBuffer *buffer, bool atomic);
+
+// Reading (consuming)
+
+/*!
+ * Access end of buffer
+ *
+ * This gives you a pointer to the end of the buffer, ready
+ * for reading, and the number of available bytes to read.
+ *
+ * @param buffer Circular buffer
+ * @param availableBytes On output, the number of bytes ready for reading
+ * @return Pointer to the first bytes ready for reading, or NULL if buffer is empty
+ */
+static __inline__ __attribute__((always_inline)) void* TPCircularBufferTail(TPCircularBuffer *buffer, uint32_t* availableBytes) {
+ *availableBytes = buffer->fillCount;
+ if ( *availableBytes == 0 ) return NULL;
+ return (void*)((char*)buffer->buffer + buffer->tail);
+}
+
+/*!
+ * Consume bytes in buffer
+ *
+ * This frees up the just-read bytes, ready for writing again.
+ *
+ * @param buffer Circular buffer
+ * @param amount Number of bytes to consume
+ */
+static __inline__ __attribute__((always_inline)) void TPCircularBufferConsume(TPCircularBuffer *buffer, uint32_t amount) {
+ buffer->tail = (buffer->tail + amount) % buffer->length;
+ if ( buffer->atomic ) {
+ atomicFetchAdd(&buffer->fillCount, -(int)amount);
+ } else {
+ buffer->fillCount -= amount;
+ }
+ assert(buffer->fillCount >= 0);
+}
+
+/*!
+ * Access front of buffer
+ *
+ * This gives you a pointer to the front of the buffer, ready
+ * for writing, and the number of available bytes to write.
+ *
+ * @param buffer Circular buffer
+ * @param availableBytes On output, the number of bytes ready for writing
+ * @return Pointer to the first bytes ready for writing, or NULL if buffer is full
+ */
+static __inline__ __attribute__((always_inline)) void* TPCircularBufferHead(TPCircularBuffer *buffer, uint32_t* availableBytes) {
+ *availableBytes = (buffer->length - buffer->fillCount);
+ if ( *availableBytes == 0 ) return NULL;
+ return (void*)((char*)buffer->buffer + buffer->head);
+}
+
+// Writing (producing)
+
+/*!
+ * Produce bytes in buffer
+ *
+ * This marks the given section of the buffer ready for reading.
+ *
+ * @param buffer Circular buffer
+ * @param amount Number of bytes to produce
+ */
+static __inline__ __attribute__((always_inline)) void TPCircularBufferProduce(TPCircularBuffer *buffer, uint32_t amount) {
+ buffer->head = (buffer->head + amount) % buffer->length;
+ if ( buffer->atomic ) {
+ atomicFetchAdd(&buffer->fillCount, (int)amount);
+ } else {
+ buffer->fillCount += amount;
+ }
+ assert((uint32_t)buffer->fillCount <= buffer->length);
+}
+
+/*!
+ * Helper routine to copy bytes to buffer
+ *
+ * This copies the given bytes to the buffer, and marks them ready for reading.
+ *
+ * @param buffer Circular buffer
+ * @param src Source buffer
+ * @param len Number of bytes in source buffer
+ * @return true if bytes copied, false if there was insufficient space
+ */
+static __inline__ __attribute__((always_inline)) bool TPCircularBufferProduceBytes(TPCircularBuffer *buffer, const void* src, uint32_t len) {
+ uint32_t space;
+ void *ptr = TPCircularBufferHead(buffer, &space);
+ if ( space < len ) return false;
+ memcpy(ptr, src, len);
+ TPCircularBufferProduce(buffer, len);
+ return true;
+}
+
+/*!
+ * Deprecated method
+ */
+static __inline__ __attribute__((always_inline)) __deprecated_msg("use TPCircularBufferSetAtomic(false) and TPCircularBufferConsume instead")
+void TPCircularBufferConsumeNoBarrier(TPCircularBuffer *buffer, uint32_t amount) {
+ buffer->tail = (buffer->tail + amount) % buffer->length;
+ buffer->fillCount -= amount;
+ assert(buffer->fillCount >= 0);
+}
+
+/*!
+ * Deprecated method
+ */
+static __inline__ __attribute__((always_inline)) __deprecated_msg("use TPCircularBufferSetAtomic(false) and TPCircularBufferProduce instead")
+void TPCircularBufferProduceNoBarrier(TPCircularBuffer *buffer, uint32_t amount) {
+ buffer->head = (buffer->head + amount) % buffer->length;
+ buffer->fillCount += amount;
+ assert((uint32_t)buffer->fillCount <= buffer->length);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.h b/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.h
new file mode 100644
index 000000000..c1e18d0ad
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include "TPCircularBuffer.h"
+
+#include
+#include
+
+#include
+
+typedef void (^SimpleBlock)();
+
+class AUSpatialRenderer
+{
+public:
+ AUSpatialRenderer();
+ ~AUSpatialRenderer();
+
+ double getAudioUnitLatency();
+ void setRingBufferPtr(const TPCircularBuffer* __nonnull buffer);
+ void setStatsTrackRenderBlock(SimpleBlock _Nonnull);
+ bool setup(AUSpatialMixerOutputType outputType, float sampleRate, int inChannelCount);
+ OSStatus setStreamFormatAndACL(float inSampleRate, AudioChannelLayoutTag inLayoutTag, AudioUnitScope inScope, AudioUnitElement inElement);
+ OSStatus setOutputType(AUSpatialMixerOutputType outputType);
+ OSStatus process(AudioBufferList* __nullable outputABL, AudioUnitRenderActionFlags* __nonnull ioActionFlags, const AudioTimeStamp* __nullable inTimestamp, float inNumberFrames);
+
+ friend OSStatus inputCallback(void * _Nonnull,
+ AudioUnitRenderActionFlags *_Nullable,
+ const AudioTimeStamp * _Nullable,
+ uint32_t, uint32_t,
+ AudioBufferList * _Nonnull);
+
+ uint32_t m_HeadTracking;
+ uint32_t m_PersonalizedHRTF;
+
+private:
+ AudioUnit _Nonnull m_Mixer;
+ const TPCircularBuffer* _Nonnull m_RingBufferPtr; // pointer to RingBuffer in the outer CoreAudioRenderer
+ SimpleBlock _Nonnull m_StatsTrackRenderBlock;
+
+ double m_AudioUnitLatency;
+};
diff --git a/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.mm b/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.mm
new file mode 100644
index 000000000..ae23c5026
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/au_spatial_renderer.mm
@@ -0,0 +1,358 @@
+#import "au_spatial_renderer.h"
+#import "coreaudio_helpers.h"
+#import "AllocatedAudioBufferList.h"
+#include "settings/streamingpreferences.h"
+
+#import
+#import
+#import
+
+AUSpatialRenderer::AUSpatialRenderer()
+ : m_HeadTracking(0),
+ m_PersonalizedHRTF(0),
+ m_AudioUnitLatency(0.0)
+{
+ DEBUG_TRACE("AUSpatialRenderer construct");
+
+ AudioComponentDescription desc = {kAudioUnitType_Mixer,
+ kAudioUnitSubType_SpatialMixer,
+ kAudioUnitManufacturer_Apple,
+ 0,
+ 0};
+ AudioComponent comp = AudioComponentFindNext(NULL, &desc);
+ assert(comp);
+
+ OSStatus status = AudioComponentInstanceNew(comp, &m_Mixer);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to create Spatial Mixer");
+ assert(status == noErr);
+ }
+}
+
+AUSpatialRenderer::~AUSpatialRenderer()
+{
+ DEBUG_TRACE("AUSpatialRenderer destruct");
+
+ if (m_Mixer) {
+ AudioComponentInstanceDispose(m_Mixer);
+ }
+}
+
+double AUSpatialRenderer::getAudioUnitLatency()
+{
+ return m_AudioUnitLatency;
+}
+
+void AUSpatialRenderer::setRingBufferPtr(const TPCircularBuffer *buffer)
+{
+ m_RingBufferPtr = buffer;
+}
+
+OSStatus AUSpatialRenderer::setStreamFormatAndACL(float inSampleRate,
+ AudioChannelLayoutTag inLayoutTag,
+ AudioUnitScope inScope,
+ AudioUnitElement inElement)
+{
+ AVAudioChannelLayout* layout = [AVAudioChannelLayout layoutWithLayoutTag:inLayoutTag];
+ AVAudioFormat *format = [[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32
+ sampleRate:inSampleRate
+ interleaved:NO
+ channelLayout:layout];
+
+ const AudioStreamBasicDescription* asbd = [format streamDescription];
+ if (inScope == kAudioUnitScope_Input) {
+ CA_PrintASBD("CoreAudioRenderer spatial mixer input AudioStreamBasicDescription:", asbd);
+ } else {
+ CA_PrintASBD("CoreAudioRenderer spatial mixer output AudioStreamBasicDescription:", asbd);
+ }
+ OSStatus status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_StreamFormat, inScope, inElement, asbd, sizeof(AudioStreamBasicDescription));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer StreamFormat scope=%d", inScope);
+ return status;
+ }
+
+ const AudioChannelLayout* outLayout = [layout layout];
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_AudioChannelLayout, inScope, inElement, outLayout, sizeof(AudioChannelLayout));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer AudioChannelLayout scope=%d, layout=%d", inScope, outLayout);
+ return status;
+ }
+
+ return noErr;
+}
+
+OSStatus AUSpatialRenderer::setOutputType(AUSpatialMixerOutputType outputType)
+{
+ return AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SpatialMixerOutputType, kAudioUnitScope_Global, 0, &outputType, sizeof(outputType));
+}
+
+// realtime method
+OSStatus inputCallback(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp * /*inTimestamp*/,
+ uint32_t /*inBusNumber*/,
+ uint32_t inNumberFrames,
+ AudioBufferList *ioData)
+{
+ AUSpatialRenderer *me = (AUSpatialRenderer *)inRefCon;
+
+ // Clear the buffer
+ for (uint32_t i = 0; i < ioData->mNumberBuffers; i++) {
+ // faster version of memset((float *)ioData->mBuffers[i].mData, 0, inNumberFrames * 4);
+ vDSP_vclr((float *)ioData->mBuffers[i].mData, 1, inNumberFrames * 4);
+ }
+
+ // Pull audio from playthrough buffer
+ uint32_t availableBytes;
+ float *ringBuffer = (float *)TPCircularBufferTail((TPCircularBuffer *)me->m_RingBufferPtr, &availableBytes);
+
+ // Total size of interleaved PCM for all channels
+ uint32_t channelCount = ioData->mNumberBuffers;
+ uint32_t wantedBytes = channelCount * inNumberFrames * 4;
+
+ if (availableBytes < wantedBytes) {
+ // not enough data for all channels, note we are sending back a zeroed-out buffer
+ // This underrun is not always a problem, so it's not included in stats currently
+ // XXX this ioActionFlags with silence flag seems to get lost, instead of returning via
+ // AudioUnitRender() <- process() <- renderSpatialCallback()
+ *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
+ return noErr;
+ }
+
+ // de-interleave ringBuffer PCM data into per-channel buffers
+ const float zero = 0.0f;
+ for (uint32_t channel = 0; channel < channelCount; channel++) {
+ float *channelBuffer = (float *)ioData->mBuffers[channel].mData;
+ vDSP_vsadd(ringBuffer + channel, channelCount, &zero, channelBuffer, 1, inNumberFrames);
+ }
+
+ TPCircularBufferConsume((TPCircularBuffer *)me->m_RingBufferPtr, wantedBytes);
+
+ // The Apple example included this but it doesn't seem to do anything?
+ // (*ioActionFlags) = kAudioOfflineUnitRenderAction_Complete;
+
+ return noErr;
+}
+
+bool AUSpatialRenderer::setup(AUSpatialMixerOutputType outputType, float sampleRate, int inChannelCount)
+{
+ // Set the number of input elements (buses).
+ uint32_t numInputs = 1;
+ OSStatus status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numInputs, sizeof(numInputs));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer numInputs to 1");
+ return false;
+ }
+
+ // Set up the output stream format and channel layout for stereo.
+ status = setStreamFormatAndACL(sampleRate, kAudioChannelLayoutTag_Stereo, kAudioUnitScope_Output, 0);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer output stream format to stereo");
+ return false;
+ }
+
+ // Set up the input stream format as multichannel with 5.1 or 7.1 channel layout
+ // If it's ever possible to access a 12-channel Atmos bitstream from Windows, it should work here too
+ AudioChannelLayoutTag layout;
+ switch (inChannelCount) {
+ case 2:
+ layout = kAudioChannelLayoutTag_Stereo;
+ break;
+ case 6:
+ // Back in the DVD era I remember 5.1 meant side surrounds (WAVE_5_1_A), but at some point it became back surrounds?
+ // layout = kAudioChannelLayoutTag_WAVE_5_1_A; // L R C LFE Ls Rs
+ layout = kAudioChannelLayoutTag_WAVE_5_1_B; // L R C LFE Rls Rrs
+ break;
+ case 8:
+ layout = kAudioChannelLayoutTag_WAVE_7_1; // L R C LFE Rls Rrs Ls Rs
+ break;
+ case 12:
+ layout = kAudioChannelLayoutTag_Atmos_7_1_4; // L R C LFE Ls Rs Rls Rrs Vhl Vhr Ltr Rtr
+ break;
+ default:
+ CA_LogError(-1, "Unsupported number of channels for spatial audio mixer: %d", inChannelCount);
+ return false;
+ }
+
+ // XXX Allow user to override channel layout
+
+ status = setStreamFormatAndACL(sampleRate, layout, kAudioUnitScope_Input, 0);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer input stream format to %d channels", inChannelCount);
+ return false;
+ }
+
+ // Apple docs say: Use kSpatializationAlgorithm_UseOutputType with appropriate kAudioUnitProperty_SpatialMixerOutputType
+ // for highest-quality spatial rendering across different hardware.
+ uint32_t renderingAlgorithm = kSpatializationAlgorithm_UseOutputType;
+ DEBUG_TRACE("AUSpatialRenderer kAudioUnitProperty_SpatializationAlgorithm set to UseOutputType (%d)", renderingAlgorithm);
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SpatializationAlgorithm, kAudioUnitScope_Input, 0, &renderingAlgorithm, sizeof(renderingAlgorithm));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer spatialization algorithm");
+ return false;
+ }
+
+ // Set the source mode. AmbienceBed causes the input channels to be spatialized around the listener as far-field sources.
+ uint32_t sourceMode = kSpatialMixerSourceMode_AmbienceBed;
+ DEBUG_TRACE("AUSpatialRenderer kAudioUnitProperty_SpatialMixerSourceMode set to AmbienceBed (%d)", sourceMode);
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SpatialMixerSourceMode, kAudioUnitScope_Input, 0, &sourceMode, sizeof(sourceMode));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer source mode");
+ return false;
+ }
+
+ // Set up the output type to adapt the rendering depending on the physical output.
+ // The unit renders binaural for headphones, Apple-proprietary for built-in
+ // speakers, or multichannel for external speakers.
+ DEBUG_TRACE("AUSpatialRenderer setOutputType %d", outputType);
+ status = setOutputType(outputType);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer output type");
+ return false;
+ }
+
+#if TARGET_OS_OSX
+ if (@available(macOS 13.0, *))
+#elif TARGET_OS_IOS
+ if (@available(iOS 18.0, *))
+#elif TARGET_OS_TV
+ if (@available(tvOS 18.0, *))
+#endif
+ {
+ if (outputType == kSpatialMixerOutputType_Headphones) {
+ // XXX: Both of these might require the builder to have a paid Apple developer account due to the use of entitlements.
+
+ // For devices that support it, enable head-tracking.
+ // Apps that use low-latency head-tracking in iOS/tvOS need to set
+ // the audio session category to ambient or run in Game Mode.
+ // Head tracking requires the entitlement com.apple.developer.coremotion.head-pose.
+
+ // XXX Head-tracking may cause audio glitches. It's off by default.
+ StreamingPreferences *prefs = StreamingPreferences::get();
+ if (prefs->spatialHeadTracking) {
+ uint32_t ht = 1;
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SpatialMixerEnableHeadTracking, kAudioUnitScope_Global, 0, &ht, sizeof(uint32_t));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to enable head tracking");
+ }
+ else {
+ DEBUG_TRACE("AUSpatialRenderer enabled head-tracking");
+ m_HeadTracking = 1;
+ }
+ }
+
+ // For devices that support it, enable personalized head-related transfer function (HRTF).
+ // HRTF requires the entitlement com.apple.developer.spatial-audio.profile-access.
+ // https://developer.apple.com/documentation/bundleresources/entitlements/com_apple_developer_spatial-audio_profile-access
+ // This is an opportunistic API, so if personalized HRTF isn't available, the
+ // system falls back to generic HRTF.
+ uint32_t hrtf = kSpatialMixerPersonalizedHRTFMode_Auto;
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SpatialMixerPersonalizedHRTFMode, kAudioUnitScope_Global, 0, &hrtf, sizeof(uint32_t));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to enable personalized spatial audio");
+ }
+ else {
+ DEBUG_TRACE("AUSpatialRenderer set personalized HRTF mode to auto");
+ }
+ }
+ }
+
+#if TARGET_OS_IOS
+ if (@available(iOS 18.0, *))
+#elif TARGET_OS_TV
+ if (@available(tvOS 18.0, *))
+#endif
+ {
+ // Set a factory preset to use with media playback on an Apple device.
+ // This can override previously set properties. Check the available
+ // presets by using `auval` command. For example, `auval -v aumx 3dem appl`
+ // may list the following presets:
+ //
+ // ID: 0 Name: Built-In Speaker Media Playback
+ // ID: 1 Name: Headphone Media Playback Default
+ // ID: 2 Name: Headphone Media Playback Movie
+ AUPreset preset {
+ outputType == kSpatialMixerOutputType_BuiltInSpeakers ? 0 : 1,
+ NULL
+ };
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_PresentPreset, kAudioUnitScope_Global, 0, &preset, sizeof(AUPreset));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer factory preset");
+ return false;
+ }
+ }
+
+
+ // Set the maximum frames we can process per callback (must match size of m_SpatialBuffer)
+ uint32_t mfps = 4096;
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &mfps, sizeof(mfps));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer max frame size");
+ return false;
+ }
+
+ // Set up the input callback that pulls n channels of PCM from the ringBuffer
+ AURenderCallbackStruct callbackStruct;
+ callbackStruct.inputProc = inputCallback;
+ callbackStruct.inputProcRefCon = this;
+ DEBUG_TRACE("AUSpatialRenderer set input callback");
+ status = AudioUnitSetProperty(m_Mixer, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set AUSpatialRenderer input callback");
+ return false;
+ }
+
+ // We're ready!
+ DEBUG_TRACE("AUSpatialRenderer initialize");
+ status = AudioUnitInitialize(m_Mixer);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to initialize AUSpatialRenderer");
+ return false;
+ }
+
+#if TARGET_OS_OSX
+ // you can set HRTF in 13 but only check the status in 14
+ if (@available(macOS 14.0, *))
+#elif TARGET_OS_IOS
+ if (@available(iOS 18.0, *))
+#elif TARGET_OS_TV
+ if (@available(tvOS 18.0, *))
+#endif
+ {
+ // After initialize, we can check if personalized HRTF is actually being used
+ if (outputType == kSpatialMixerOutputType_Headphones) {
+ m_PersonalizedHRTF = 0;
+ uint32_t size = sizeof(m_PersonalizedHRTF);
+ status = AudioUnitGetProperty(m_Mixer, kAudioUnitProperty_SpatialMixerAnyInputIsUsingPersonalizedHRTF, kAudioUnitScope_Global, 0, &m_PersonalizedHRTF, &size);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get AUSpatialRenderer personalized HRTF status");
+ }
+ else {
+ DEBUG_TRACE("AUSpatialRenderer actual personalized HRTF status: %s", m_PersonalizedHRTF ? "enabled" : "disabled");
+ }
+ }
+ }
+
+ // Get the internal AudioUnit latency (processing time from input to output)
+ {
+ m_AudioUnitLatency = 0.0;
+ uint32_t size = sizeof(m_AudioUnitLatency);
+ status = AudioUnitGetProperty(m_Mixer, kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &m_AudioUnitLatency, &size);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get SpatialAU AudioUnit latency");
+ return false;
+ }
+ DEBUG_TRACE("CoreAudioRenderer SpatialAU AudioUnit latency: %0.2f ms", m_AudioUnitLatency * 1000.0);
+ }
+
+ return true;
+}
+
+// realtime method
+OSStatus AUSpatialRenderer::process(AudioBufferList* __nullable outputABL,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp* __nullable inTimestamp,
+ float inNumberFrames)
+{
+ return AudioUnitRender(m_Mixer, ioActionFlags, inTimestamp, 0, inNumberFrames, outputABL);
+}
diff --git a/app/streaming/audio/renderers/coreaudio/coreaudio.cpp b/app/streaming/audio/renderers/coreaudio/coreaudio.cpp
new file mode 100644
index 000000000..16ced8509
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/coreaudio.cpp
@@ -0,0 +1,779 @@
+#include "coreaudio.h"
+#include "coreaudio_helpers.h"
+#include "settings/streamingpreferences.h"
+
+#if TARGET_OS_OSX
+#include
+#endif
+
+#include
+#include
+#include
+#include
+
+#define kRingBufferMaxSeconds 0.030
+
+CoreAudioRenderer::CoreAudioRenderer()
+ : m_SpatialBuffer(2, 4096)
+{
+ DEBUG_TRACE("CoreAudioRenderer construct");
+
+ AudioComponentDescription description;
+ description.componentType = kAudioUnitType_Output;
+#if TARGET_OS_IPHONE
+ description.componentSubType = kAudioUnitSubType_RemoteIO;
+#elif TARGET_OS_OSX
+ description.componentSubType = kAudioUnitSubType_HALOutput;
+#endif
+ description.componentManufacturer = kAudioUnitManufacturer_Apple;
+ description.componentFlags = 0;
+ description.componentFlagsMask = 0;
+
+ AudioComponent comp = AudioComponentFindNext(NULL, &description);
+ if (!comp) {
+ return;
+ }
+
+ OSStatus status = AudioComponentInstanceNew(comp, &m_OutputAU);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to create an instance of HALOutput or RemoteIO");
+ throw std::runtime_error("Failed to create an instance of HALOutput or RemoteIO");
+ }
+}
+
+CoreAudioRenderer::~CoreAudioRenderer()
+{
+ DEBUG_TRACE("CoreAudioRenderer destruct");
+ cleanup();
+}
+
+void CoreAudioRenderer::stop()
+{
+ DEBUG_TRACE("CoreAudioRenderer stop");
+ if (m_OutputAU != nullptr) {
+ AudioOutputUnitStop(m_OutputAU);
+ }
+}
+
+void CoreAudioRenderer::cleanup()
+{
+ DEBUG_TRACE("CoreAudioRenderer cleanup");
+ stop();
+
+ if (m_OutputAU != nullptr) {
+ AudioUnitUninitialize(m_OutputAU);
+ AudioComponentInstanceDispose(m_OutputAU);
+ m_OutputAU = nullptr;
+
+ // Must be destroyed after the stream is stopped
+ TPCircularBufferCleanup(&m_RingBuffer);
+ }
+
+ if (m_OutputDeviceID) {
+ deinitListeners();
+ m_OutputDeviceID = 0;
+ }
+
+ if (m_OutputDeviceName) {
+ free(m_OutputDeviceName);
+ }
+}
+
+int CoreAudioRenderer::getCapabilities()
+{
+ // CAPABILITY_DIRECT_SUBMIT feels worse than decoding in a separate thread
+ return CAPABILITY_SUPPORTS_ARBITRARY_AUDIO_DURATION;
+}
+
+IAudioRenderer::AudioFormat CoreAudioRenderer::getAudioBufferFormat()
+{
+ return AudioFormat::Float32NE;
+}
+
+void CoreAudioRenderer::statsIncDeviceOverload()
+{
+ m_ActiveWndAudioStats.totalGlitches++;
+}
+
+// realtime method
+void CoreAudioRenderer::statsTrackRender(uint64_t startTimeUs, const AudioTimeStamp *inTimestamp, uint32_t inNumberFrames)
+{
+ // check for lost packets because we weren't called in time, possibly due to system overload
+ if (m_LastSampleTime && inTimestamp->mFlags & kAudioTimeStampSampleTimeValid) {
+ double expectedSampleTime = m_LastSampleTime + inNumberFrames;
+ if (expectedSampleTime != inTimestamp->mSampleTime) {
+ uint32_t lostFrames = (inTimestamp->mSampleTime - m_LastSampleTime) - m_LastNumFrames;
+ double lostDuration = (double)lostFrames / 48000.0;
+
+ m_ActiveWndAudioStats.totalGlitches++;
+
+#ifdef COREAUDIO_DEBUG
+ dispatch_async(dispatch_get_main_queue(), ^{
+ DEBUG_TRACE("[%llu] Error: lost/dropped audio frames: %u (%.02fms)", inTimestamp->mHostTime, lostFrames, lostDuration * 1000.0);
+ });
+#endif
+ }
+ }
+
+ m_LastSampleTime = inTimestamp->mSampleTime;
+ m_LastNumFrames = inNumberFrames;
+
+ // add this to our decoderTime
+ uint64_t decodeTimeUs = LiGetMicroseconds() - startTimeUs;
+ m_ActiveWndAudioStats.decodeDurationUs += decodeTimeUs;
+
+ // We now have decodeDurationUs covering 2 time periods:
+ // 1. Filling the queue: Opus decoding plus write to circular buffer (statsTrackDecodeTime)
+ // 2. Emptying the queue: from start of AudioUnit callback in either direct or spatial mode (statsTrackRender)
+ // Although it's referred to as render time, the time is just added to decodeDurationUs
+}
+
+int CoreAudioRenderer::stringifyAudioStats(AUDIO_STATS& stats, char *output, int length)
+{
+ // let parent class provide generic stats first
+ int offset = IAudioRenderer::stringifyAudioStats(stats, output, length);
+ if (offset < 0) {
+ return -1;
+ }
+
+ int ret = snprintf(
+ &output[offset],
+ length - offset,
+ "Output device: %s @ %.1f kHz, %u-channel\n"
+ "Render mode: %s %s %s %s\n"
+ "Latency: %0.1f ms (network %d ms, buffers %.1f ms, hardware: %.1f ms)\n",
+
+ // "Output device: %s @ %.1f kHz, %u-channel\n"
+ m_OutputDeviceName,
+ m_OutputASBD.mSampleRate / 1000.0,
+ m_OutputASBD.mChannelsPerFrame,
+
+ // "Render mode: %s %s %s %s\n"
+ m_Spatial ? (m_SpatialAU.m_PersonalizedHRTF ? "personalized spatial audio" : "spatial audio") : "passthrough",
+ m_Spatial && m_SpatialAU.m_HeadTracking ? "with head-tracking for" : "for",
+ !strcmp(m_OutputTransportType, "blue") ? "Bluetooth"
+ : !strcmp(m_OutputTransportType, "bltn") ? "built-in"
+ : !strcmp(m_OutputTransportType, "usb ") ? "USB"
+ : !strcmp(m_OutputTransportType, "hdmi") ? "HDMI"
+ : !strcmp(m_OutputTransportType, "airp") ? "AirPlay"
+ : m_OutputTransportType,
+ !strcmp(m_OutputDataSource , "hdpn") ? "headphones"
+ : !strcmp(m_OutputDataSource, "ispk") ? "internal speakers"
+ : !strcmp(m_OutputDataSource, "espk") ? "external speakers"
+ : m_OutputDataSource,
+
+ // "Latency: %0.1fms (network %dms, buffers %.1fms, hardware: %.1fms)\n"
+ (double)stats.lastRtt + (m_TotalSoftwareLatency + m_OutputHardwareLatency) * 1000.0,
+ stats.lastRtt,
+ m_TotalSoftwareLatency * 1000.0,
+ m_OutputHardwareLatency * 1000.0
+ );
+ if (ret < 0 || ret >= length - offset) {
+ SDL_assert(false);
+ return -1;
+ }
+
+ return offset + ret;
+}
+
+// realtime method
+OSStatus renderCallbackDirect(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimestamp,
+ uint32_t /*inBusNumber*/,
+ uint32_t inNumberFrames,
+ AudioBufferList *ioData)
+{
+ uint64_t start = LiGetMicroseconds();
+
+ CoreAudioRenderer *me = (CoreAudioRenderer *)inRefCon;
+ int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
+ float *targetBuffer = (float *)ioData->mBuffers[0].mData;
+
+ // Pull audio from playthrough buffer
+ uint32_t availableBytes;
+ float *buffer = (float *)TPCircularBufferTail(&me->m_RingBuffer, &availableBytes);
+
+ if ((int)availableBytes < bytesToCopy) {
+ // write silence if not enough buffered data is available
+ // faster version of memset(targetBuffer, 0, bytesToCopy);
+ vDSP_vclr(targetBuffer, 1, bytesToCopy);
+ *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
+ } else {
+ // faster version of memcpy(targetBuffer, buffer, qMin(bytesToCopy, (int)availableBytes));
+ vDSP_mmov(buffer, targetBuffer, 1, qMin(bytesToCopy, (int)availableBytes), 1, 1);
+ TPCircularBufferConsume(&me->m_RingBuffer, qMin(bytesToCopy, (int)availableBytes));
+ }
+
+ me->statsTrackRender(start, inTimestamp, inNumberFrames);
+
+ return noErr;
+}
+
+// realtime method
+OSStatus renderCallbackSpatial(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimestamp,
+ uint32_t /*inBusNumber*/,
+ uint32_t inNumberFrames,
+ AudioBufferList *ioData)
+{
+ uint64_t start = LiGetMicroseconds();
+ CoreAudioRenderer *me = (CoreAudioRenderer *)inRefCon;
+ AudioBufferList *spatialBuffer = me->m_SpatialBuffer.get();
+
+ // Set the byte size with the output audio buffer list.
+ for (uint32_t i = 0; i < spatialBuffer->mNumberBuffers; i++) {
+ spatialBuffer->mBuffers[i].mDataByteSize = inNumberFrames * 4;
+ }
+
+ // Process the input frames with the audio unit spatial mixer.
+ me->m_SpatialAU.process(spatialBuffer, ioActionFlags, inTimestamp, inNumberFrames);
+
+ // Copy the temporary buffer to the output.
+ for (uint32_t i = 0; i < spatialBuffer->mNumberBuffers; i++) {
+ // faster version of memcpy(ioData->mBuffers[i].mData, spatialBuffer->mBuffers[i].mData, inNumberFrames * 4);
+ vDSP_mmov((const float *)spatialBuffer->mBuffers[i].mData, (float *)ioData->mBuffers[i].mData, 1, inNumberFrames * 4, 1, 1);
+ }
+
+ me->statsTrackRender(start, inTimestamp, inNumberFrames);
+
+ return noErr;
+}
+
+bool CoreAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION* opusConfig)
+{
+ OSStatus status = noErr;
+ m_opusConfig = opusConfig;
+
+ // Request the OS set our buffer close to the Opus packet size
+ m_AudioPacketDuration = (opusConfig->samplesPerFrame / (opusConfig->sampleRate / 1000)) / 1000.0;
+
+ if (!initAudioUnit()) {
+ DEBUG_TRACE("initAudioUnit failed");
+ return false;
+ }
+
+ if (!initRingBuffer()) {
+ DEBUG_TRACE("initRingBuffer failed");
+ return false;
+ }
+
+ if (!initListeners()) {
+ DEBUG_TRACE("initListeners failed");
+ return false;
+ }
+
+ m_Spatial = false;
+ AUSpatialMixerOutputType outputType = getSpatialMixerOutputType();
+
+ DEBUG_TRACE("CoreAudioRenderer getSpatialMixerOutputType = %d", outputType);
+
+ if (opusConfig->channelCount > 2) {
+ if (outputType != kSpatialMixerOutputType_ExternalSpeakers) {
+ m_Spatial = true;
+ }
+ }
+
+ StreamingPreferences *prefs = StreamingPreferences::get();
+ if (prefs->spatialAudioConfig == StreamingPreferences::SAC_DISABLED) {
+ // User has disabled spatial audio
+ DEBUG_TRACE("CoreAudioRenderer user has disabled spatial audio");
+ m_Spatial = false;
+ }
+
+ // indicate the format our callback will provide samples in
+ // If necessary, the OS takes care of resampling (but not downmixing, hmm)
+ AudioStreamBasicDescription streamDesc;
+ memset(&streamDesc, 0, sizeof(AudioStreamBasicDescription));
+ streamDesc.mSampleRate = m_opusConfig->sampleRate;
+ streamDesc.mFormatID = kAudioFormatLinearPCM;
+ streamDesc.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
+ streamDesc.mFramesPerPacket = 1;
+ streamDesc.mChannelsPerFrame = (uint32_t)opusConfig->channelCount;
+ streamDesc.mBitsPerChannel = 32;
+ streamDesc.mBytesPerPacket = 4 * opusConfig->channelCount;
+ streamDesc.mBytesPerFrame = streamDesc.mBytesPerPacket;
+
+ if (m_Spatial) {
+ // render audio for binaural headphones or built-in laptop speakers
+ setCallback(renderCallbackSpatial);
+
+ // this callback is non-interleaved
+ streamDesc.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
+ streamDesc.mBytesPerPacket = 4;
+ streamDesc.mBytesPerFrame = 4;
+
+ m_SpatialOutputType = outputType;
+
+ SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION, "CoreAudioRenderer is using spatial audio output");
+
+ if (!m_SpatialAU.setup(outputType, opusConfig->sampleRate, opusConfig->channelCount)) {
+ DEBUG_TRACE("m_SpatialAU.setup failed");
+ return false;
+ }
+
+ m_TotalSoftwareLatency += m_SpatialAU.getAudioUnitLatency();
+ } else {
+ // direct passthrough of all channels for stereo and HDMI
+ setCallback(renderCallbackDirect);
+
+ SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION, "CoreAudioRenderer is using passthrough mode");
+ }
+
+ status = AudioUnitSetProperty(m_OutputAU, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamDesc, sizeof(streamDesc));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set output stream format");
+ return false;
+ }
+
+ DEBUG_TRACE("CoreAudioRenderer start");
+ status = AudioOutputUnitStart(m_OutputAU);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to start output audio unit");
+ return false;
+ }
+
+ return true;
+}
+
+bool CoreAudioRenderer::initAudioUnit()
+{
+ // Initialize the audio unit interface to begin configuring it.
+ OSStatus status = AudioUnitInitialize(m_OutputAU);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to initialize the output audio unit");
+ return false;
+ }
+
+ /* macOS:
+ * disable OutputAU input IO
+ * enable OutputAU output IO
+ * get system default output AudioDeviceID (todo: allow user to choose specific device from list)
+ * set OutputAU to AudioDeviceID
+ * get device's AudioStreamBasicDescription (format, bit depth, samplerate, etc)
+ * get device name
+ * get output buffer frame size
+ * get output buffer min/max
+ * set output buffer frame size
+ */
+
+#if TARGET_OS_OSX
+ constexpr AudioUnitElement outputElement{0};
+ constexpr AudioUnitElement inputElement{1};
+
+ {
+ uint32_t enableIO = 0;
+ status = AudioUnitSetProperty(m_OutputAU, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, inputElement, &enableIO, sizeof(enableIO));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to disable the input on AUHAL");
+ return false;
+ }
+
+ enableIO = 1;
+ status = AudioUnitSetProperty(m_OutputAU, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, outputElement, &enableIO, sizeof(enableIO));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to enable the output on AUHAL");
+ return false;
+ }
+ }
+
+ {
+ uint32_t size = sizeof(AudioDeviceID);
+ AudioObjectPropertyAddress addr{kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(AudioObjectID(kAudioObjectSystemObject), &addr, outputElement, nil, &size, &m_OutputDeviceID);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the default output device");
+ return false;
+ }
+ }
+
+ {
+ CFStringRef name;
+ uint32_t nameSize = sizeof(CFStringRef);
+ AudioObjectPropertyAddress addr{kAudioObjectPropertyName, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addr, 0, nil, &nameSize, &name);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get name of output device");
+ return false;
+ }
+ setOutputDeviceName(name);
+ CFRelease(name);
+ DEBUG_TRACE("CoreAudioRenderer default output device ID: %d, name: %s", m_OutputDeviceID, m_OutputDeviceName);
+ }
+
+ {
+ // Set the current device to the default output device.
+ // This should be done only after I/O is enabled on the output audio unit.
+ status = AudioUnitSetProperty(m_OutputAU, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, outputElement, &m_OutputDeviceID, sizeof(AudioDeviceID));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set the default output device");
+ return false;
+ }
+ }
+
+ {
+ uint32_t streamFormatSize = sizeof(AudioStreamBasicDescription);
+ AudioObjectPropertyAddress addr{kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addr, 0, nil, &streamFormatSize, &m_OutputASBD);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get output device AudioStreamBasicDescription");
+ return false;
+ }
+ CA_PrintASBD("CoreAudioRenderer output format:", &m_OutputASBD);
+ }
+
+ // Buffer:
+ // The goal here is to set the system buffer to our desired value, which is currently in m_AudioPacketDuration.
+ // First we get the current value, and the range of allowed values, set our value, and then query to find the actual value.
+ // We also query the hardware latency (e.g. Bluetooth delay for AirPods), but this is just for fun
+
+ {
+ uint32_t bufferFrameSize = 0;
+ uint32_t size = sizeof(uint32_t);
+ AudioObjectPropertyAddress addr{kAudioDevicePropertyBufferFrameSize, kAudioObjectPropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addr, 0, nil, &size, &bufferFrameSize);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the output device buffer frame size");
+ return false;
+ }
+ DEBUG_TRACE("CoreAudioRenderer output current BufferFrameSize %d", bufferFrameSize);
+ }
+
+ {
+ AudioValueRange avr;
+ uint32_t size = sizeof(AudioValueRange);
+ AudioObjectPropertyAddress addr{kAudioDevicePropertyBufferFrameSizeRange, kAudioObjectPropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addr, 0, nil, &size, &avr);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the output device buffer frame size range");
+ return false;
+ }
+ m_OutputSoftwareLatencyMin = avr.mMinimum / m_OutputASBD.mSampleRate;
+ m_OutputSoftwareLatencyMax = avr.mMaximum / m_OutputASBD.mSampleRate;
+ DEBUG_TRACE("CoreAudioRenderer output BufferFrameSizeRange: %.0f - %.0f", avr.mMinimum, avr.mMaximum);
+ }
+
+ // The latency values we have access to are:
+ // kAudioDevicePropertyBufferFrameSize our requested buffer as close to Opus packet size as possible
+ // + kAudioDevicePropertySafetyOffset an additional CoreAudio buffer
+ // + kAudioUnitProperty_Latency processing latency of OutputAU (+ SpatialAU in spatial mode)
+ // = total software latency
+ // kAudioDevicePropertyLatency = hardware latency
+
+ {
+ double desiredBufferFrameSize = m_AudioPacketDuration;
+ desiredBufferFrameSize = qMax(qMin(desiredBufferFrameSize, m_OutputSoftwareLatencyMax), m_OutputSoftwareLatencyMin);
+ uint32_t bufferFrameSize = (uint32_t)(desiredBufferFrameSize * m_OutputASBD.mSampleRate);
+ AudioObjectPropertyAddress addrSet{kAudioDevicePropertyBufferFrameSize, kAudioObjectPropertyScopeInput, kAudioObjectPropertyElementMain};
+ status = AudioObjectSetPropertyData(m_OutputDeviceID, &addrSet, 0, NULL, sizeof(uint32_t), &bufferFrameSize);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set the output device buffer frame size");
+ return false;
+ }
+ DEBUG_TRACE("CoreAudioRenderer output requested BufferFrameSize of %d (%0.3f ms)", bufferFrameSize, desiredBufferFrameSize * 1000.0);
+
+ // see what we got
+ uint32_t size = sizeof(uint32_t);
+ AudioObjectPropertyAddress addrGet{kAudioDevicePropertyBufferFrameSize, kAudioObjectPropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addrGet, 0, nil, &size, &m_BufferFrameSize);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the output device buffer frame size");
+ return false;
+ }
+ double bufferFrameLatency = (double)m_BufferFrameSize / m_OutputASBD.mSampleRate;
+ m_TotalSoftwareLatency += bufferFrameLatency;
+ m_TotalSoftwareLatency += 0.0025; // Opus has 2.5ms of initial delay
+ DEBUG_TRACE("CoreAudioRenderer output now has actual BufferFrameSize of %d (%0.3f ms)", m_BufferFrameSize, bufferFrameLatency * 1000.0);
+ }
+
+ {
+ double audioUnitLatency = 0.0;
+ uint32_t size = sizeof(audioUnitLatency);
+ status = AudioUnitGetProperty(m_OutputAU, kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &audioUnitLatency, &size);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get OutputAU AudioUnit latency");
+ return false;
+ }
+ m_TotalSoftwareLatency += audioUnitLatency;
+ DEBUG_TRACE("CoreAudioRenderer OutputAU AudioUnit latency: %0.2f ms", audioUnitLatency * 1000.0);
+ }
+
+ {
+ uint32_t safetyOffsetLatency = 0;
+ uint32_t size = sizeof(safetyOffsetLatency);
+ AudioObjectPropertyAddress addrGet{kAudioDevicePropertySafetyOffset, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addrGet, 0, nil, &size, &safetyOffsetLatency);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get safety offset latency");
+ return false;
+ }
+ m_TotalSoftwareLatency += (double)safetyOffsetLatency / m_OutputASBD.mSampleRate;
+ DEBUG_TRACE("CoreAudioRenderer OutputAU safety latency: %0.2f ms", ((double)safetyOffsetLatency / m_OutputASBD.mSampleRate) * 1000.0);
+ }
+
+ {
+ uint32_t latencyFrames;
+ uint32_t size = sizeof(uint32_t);
+ AudioObjectPropertyAddress addr{kAudioDevicePropertyLatency, kAudioObjectPropertyScopeOutput, kAudioObjectPropertyElementMain};
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &addr, 0, nil, &size, &latencyFrames);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the output device hardware latency");
+ return false;
+ }
+ m_OutputHardwareLatency = (double)latencyFrames / m_OutputASBD.mSampleRate;
+ DEBUG_TRACE("CoreAudioRenderer output hardware latency: %d (%0.2f ms)", latencyFrames, m_OutputHardwareLatency * 1000.0);
+ }
+#endif
+
+ return true;
+}
+
+bool CoreAudioRenderer::initRingBuffer()
+{
+ // Always buffer at least 2 packets, up to 30ms worth of packets
+ int packetsToBuffer = qMax(2, (int)ceil(kRingBufferMaxSeconds / m_AudioPacketDuration));
+
+ bool ok = TPCircularBufferInit(&m_RingBuffer,
+ sizeof(float) *
+ m_opusConfig->channelCount *
+ m_opusConfig->samplesPerFrame *
+ packetsToBuffer);
+ if (!ok) return false;
+
+ // Spatial mixer code needs to be able to read from the ring buffer
+ m_SpatialAU.setRingBufferPtr(&m_RingBuffer);
+
+ // real length will be larger than requested due to memory page alignment
+ m_BufferSize = m_RingBuffer.length;
+ DEBUG_TRACE("CoreAudioRenderer ring buffer init, %d packets (%d bytes)", packetsToBuffer, m_BufferSize);
+
+ return true;
+}
+
+OSStatus onDeviceOverload(AudioObjectID /*inObjectID*/,
+ UInt32 /*inNumberAddresses*/,
+ const AudioObjectPropertyAddress * /*inAddresses*/,
+ void *inClientData)
+{
+ CoreAudioRenderer *me = (CoreAudioRenderer *)inClientData;
+ SDL_LogWarn(SDL_LOG_CATEGORY_APPLICATION, "CoreAudioRenderer output device overload");
+ me->statsIncDeviceOverload();
+ return noErr;
+}
+
+OSStatus onAudioNeedsReinit(AudioObjectID /*inObjectID*/,
+ UInt32 /*inNumberAddresses*/,
+ const AudioObjectPropertyAddress * /*inAddresses*/,
+ void *inClientData)
+{
+ CoreAudioRenderer *me = (CoreAudioRenderer *)inClientData;
+ SDL_LogWarn(SDL_LOG_CATEGORY_APPLICATION, "CoreAudioRenderer output device had a change, will reinit");
+ me->m_needsReinit = true;
+ return noErr;
+}
+
+bool CoreAudioRenderer::initListeners()
+{
+ // events we care about on our output device
+
+ AudioObjectPropertyAddress addr{kAudioDeviceProcessorOverload, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMain};
+ OSStatus status = AudioObjectAddPropertyListener(m_OutputDeviceID, &addr, onDeviceOverload, this);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to add listener for kAudioDeviceProcessorOverload");
+ return false;
+ }
+
+ addr.mSelector = kAudioDevicePropertyDeviceHasChanged;
+ status = AudioObjectAddPropertyListener(m_OutputDeviceID, &addr, onAudioNeedsReinit, this);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to add listener for kAudioDevicePropertyDeviceHasChanged");
+ return false;
+ }
+
+ // non-device-specific listeners
+ addr.mSelector = kAudioHardwarePropertyServiceRestarted;
+ status = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &addr, onAudioNeedsReinit, this);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to add listener for kAudioHardwarePropertyServiceRestarted");
+ return false;
+ }
+
+ addr.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ status = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &addr, onAudioNeedsReinit, this);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to add listener for kAudioDevicePropertyIOStoppedAbnormally");
+ return false;
+ }
+
+ return true;
+}
+
+void CoreAudioRenderer::deinitListeners()
+{
+ AudioObjectPropertyAddress addr{kAudioDeviceProcessorOverload, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMain};
+ AudioObjectRemovePropertyListener(m_OutputDeviceID, &addr, onDeviceOverload, this);
+
+ addr.mSelector = kAudioDevicePropertyDeviceHasChanged;
+ AudioObjectRemovePropertyListener(m_OutputDeviceID, &addr, onAudioNeedsReinit, this);
+
+ addr.mSelector = kAudioHardwarePropertyServiceRestarted;
+ AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &addr, onAudioNeedsReinit, this);
+
+ addr.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &addr, onAudioNeedsReinit, this);
+}
+
+bool CoreAudioRenderer::setCallback(AURenderCallback callback)
+{
+ AURenderCallbackStruct callbackStruct;
+ callbackStruct.inputProc = callback;
+ callbackStruct.inputProcRefCon = this;
+
+ OSStatus status = AudioUnitSetProperty(m_OutputAU, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Output, 0, &callbackStruct, sizeof(callbackStruct));
+ if (status != noErr) {
+ CA_LogError(status, "Failed to set output render callback");
+ return false;
+ }
+
+ return true;
+}
+
+void* CoreAudioRenderer::getAudioBuffer(int* size)
+{
+ // We must always write a full frame of audio. If we don't,
+ // the reader will get out of sync with the writer and our
+ // channels will get all mixed up. To ensure this is always
+ // the case, round our bytes free down to the next multiple
+ // of our frame size.
+ uint32_t bytesFree;
+ void *ptr = TPCircularBufferHead(&m_RingBuffer, &bytesFree);
+ int bytesPerFrame = m_opusConfig->channelCount * sizeof(float);
+ *size = qMin(*size, (int)(bytesFree / bytesPerFrame) * bytesPerFrame);
+
+ m_BufferFilledBytes = m_RingBuffer.length - bytesFree;
+
+ return ptr;
+}
+
+bool CoreAudioRenderer::submitAudio(int bytesWritten)
+{
+ // We'll be fully recreated after any changes to the audio device, default output, etc.
+ if (m_needsReinit) {
+ return false;
+ }
+
+ if (bytesWritten == 0) {
+ // Nothing to do
+ return true;
+ }
+
+ // drop packet if we've fallen behind Moonlight's queue by at least 30 ms
+ if (LiGetPendingAudioDuration() > 30) {
+ m_ActiveWndAudioStats.totalGlitches++;
+ m_ActiveWndAudioStats.droppedOverload++;
+ return true;
+ }
+
+ // Advance the write pointer
+ TPCircularBufferProduce(&m_RingBuffer, bytesWritten);
+
+ return true;
+}
+
+AUSpatialMixerOutputType CoreAudioRenderer::getSpatialMixerOutputType()
+{
+#if TARGET_OS_OSX
+ // Check if headphones are plugged in.
+ uint32_t dataSource{};
+ uint32_t size = sizeof(dataSource);
+
+ AudioObjectPropertyAddress addTransType{kAudioDevicePropertyTransportType, kAudioObjectPropertyScopeOutput, kAudioObjectPropertyElementMain};
+ OSStatus status = AudioObjectGetPropertyData(m_OutputDeviceID, &addTransType, 0, nullptr, &size, &dataSource);
+ if (status != noErr) {
+ CA_LogError(status, "Failed to get the transport type of output device");
+ return kSpatialMixerOutputType_ExternalSpeakers;
+ }
+
+ CA_FourCC(dataSource, m_OutputTransportType);
+ DEBUG_TRACE("CoreAudioRenderer output transport type %s", m_OutputTransportType);
+
+ if (dataSource == kAudioDeviceTransportTypeHDMI) {
+ dataSource = kIOAudioOutputPortSubTypeExternalSpeaker;
+ } else if (dataSource == kAudioDeviceTransportTypeBluetooth || dataSource == kAudioDeviceTransportTypeUSB) {
+ dataSource = kIOAudioOutputPortSubTypeHeadphones;
+ } else {
+ AudioObjectPropertyAddress theAddress{kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMain};
+
+ status = AudioObjectGetPropertyData(m_OutputDeviceID, &theAddress, 0, nullptr, &size, &dataSource);
+ if (status != noErr) {
+ CA_LogError(status, "Couldn't determine default audio device type, defaulting to ExternalSpeakers");
+ return kSpatialMixerOutputType_ExternalSpeakers;
+ }
+ }
+
+ CA_FourCC(dataSource, m_OutputDataSource);
+ DEBUG_TRACE("CoreAudioRenderer output data source %s", m_OutputDataSource);
+
+ switch (dataSource) {
+ case kIOAudioOutputPortSubTypeInternalSpeaker:
+ return kSpatialMixerOutputType_BuiltInSpeakers;
+ break;
+
+ case kIOAudioOutputPortSubTypeHeadphones:
+ return kSpatialMixerOutputType_Headphones;
+ break;
+
+ case kIOAudioOutputPortSubTypeExternalSpeaker:
+ return kSpatialMixerOutputType_ExternalSpeakers;
+ break;
+
+ default:
+ return kSpatialMixerOutputType_Headphones;
+ break;
+ }
+#else
+ AVAudioSession *audioSession = [AVAudioSession sharedInstance];
+
+ if ([audioSession.currentRoute.outputs count] != 1) {
+ return kSpatialMixerOutputType_ExternalSpeakers;
+ } else {
+ NSString* pType = audioSession.currentRoute.outputs.firstObject.portType;
+ if ([pType isEqualToString:AVAudioSessionPortHeadphones] || [pType isEqualToString:AVAudioSessionPortBluetoothA2DP] || [pType isEqualToString:AVAudioSessionPortBluetoothLE] || [pType isEqualToString:AVAudioSessionPortBluetoothHFP]) {
+ return kSpatialMixerOutputType_Headphones;
+ } else if ([pType isEqualToString:AVAudioSessionPortBuiltInSpeaker]) {
+ return kSpatialMixerOutputType_BuiltInSpeakers;
+ } else {
+ return kSpatialMixerOutputType_ExternalSpeakers;
+ }
+ }
+#endif
+}
+
+static void replace_fancy_quote(char *str)
+{
+ char *pos;
+ while ((pos = strstr(str, "\xe2\x80\x99")) != NULL) {
+ *pos = '\'';
+ memmove(pos + 1, pos + 3, strlen(pos + 3) + 1);
+ }
+}
+
+void CoreAudioRenderer::setOutputDeviceName(const CFStringRef cfstr)
+{
+ if (cfstr) {
+ CFIndex size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(cfstr), kCFStringEncodingUTF8) + 1;
+ char *buffer = (char *)malloc(size);
+ CFStringGetCString(cfstr, buffer, size, kCFStringEncodingUTF8);
+
+ // it's very likely we'll get a name like "Andy’s AirPods Pro"
+ // with a UTF8 quote, and our overlay font is only ASCII
+ replace_fancy_quote(buffer);
+
+ if (m_OutputDeviceName) {
+ free(m_OutputDeviceName);
+ }
+
+ m_OutputDeviceName = buffer;
+ }
+}
diff --git a/app/streaming/audio/renderers/coreaudio/coreaudio.h b/app/streaming/audio/renderers/coreaudio/coreaudio.h
new file mode 100644
index 000000000..9e5c66678
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/coreaudio.h
@@ -0,0 +1,77 @@
+#pragma once
+
+#include "../renderer.h"
+#include "au_spatial_renderer.h"
+#include "AllocatedAudioBufferList.h"
+#include "TPCircularBuffer.h"
+
+#include
+#include
+
+class CoreAudioRenderer : public IAudioRenderer
+{
+public:
+ CoreAudioRenderer();
+ ~CoreAudioRenderer();
+
+ bool prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION* opusConfig);
+ virtual void* getAudioBuffer(int* size);
+ virtual bool submitAudio(int bytesWritten);
+ virtual int getCapabilities();
+ virtual AudioFormat getAudioBufferFormat();
+ const char * getRendererName() { return "CoreAudio"; }
+ virtual int stringifyAudioStats(AUDIO_STATS &stats, char* output, int length);
+
+ friend OSStatus renderCallbackDirect(void *, AudioUnitRenderActionFlags *, const AudioTimeStamp *, uint32_t, uint32_t, AudioBufferList *);
+ friend OSStatus renderCallbackSpatial(void *, AudioUnitRenderActionFlags *, const AudioTimeStamp *, uint32_t, uint32_t, AudioBufferList *);
+ friend OSStatus onDeviceOverload(AudioObjectID, UInt32, const AudioObjectPropertyAddress *, void *);
+ friend OSStatus onAudioNeedsReinit(AudioObjectID, UInt32, const AudioObjectPropertyAddress *, void *);
+ friend OSStatus onAudioNeedsReinit(Uint32, AudioObjectID, UInt32, const AudioObjectPropertyAddress *, void *);
+
+private:
+ bool initAudioUnit();
+ bool initRingBuffer();
+ bool initListeners();
+ void deinitListeners();
+ bool setCallback(AURenderCallback);
+ void stop();
+ void cleanup();
+ AUSpatialMixerOutputType getSpatialMixerOutputType();
+ void setOutputDeviceName(CFStringRef);
+
+ AudioUnit m_OutputAU;
+ AUSpatialRenderer m_SpatialAU;
+
+ // output device metadata
+ AudioDeviceID m_OutputDeviceID;
+ AudioStreamBasicDescription m_OutputASBD;
+ char *m_OutputDeviceName;
+ char m_OutputTransportType[5];
+ char m_OutputDataSource[5];
+
+ // buffers
+ TPCircularBuffer m_RingBuffer;
+ AllocatedAudioBufferList m_SpatialBuffer;
+ double m_AudioPacketDuration;
+ uint32_t m_BufferFrameSize;
+
+ // latency
+ double m_OutputHardwareLatency;
+ double m_TotalSoftwareLatency;
+ double m_OutputSoftwareLatencyMin;
+ double m_OutputSoftwareLatencyMax;
+
+ // internal device state
+ bool m_needsReinit;
+ bool m_Spatial;
+ uint32_t m_SpatialOutputType;
+ uint64_t m_LastDebugOutputTime;
+
+ // stats
+ double m_LastSampleTime;
+ uint32_t m_LastNumFrames;
+ uint32_t m_BufferSize;
+ uint32_t m_BufferFilledBytes;
+ void statsIncDeviceOverload();
+ void statsTrackRender(uint64_t, const AudioTimeStamp *, uint32_t);
+};
diff --git a/app/streaming/audio/renderers/coreaudio/coreaudio_helpers.h b/app/streaming/audio/renderers/coreaudio/coreaudio_helpers.h
new file mode 100644
index 000000000..022543bbf
--- /dev/null
+++ b/app/streaming/audio/renderers/coreaudio/coreaudio_helpers.h
@@ -0,0 +1,100 @@
+#pragma once
+
+#include
+
+#include
+#include
+#include
+
+#ifndef NDEBUG
+#define COREAUDIO_DEBUG
+# define DEBUG_TRACE(...) SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION, __VA_ARGS__)
+#else
+# define DEBUG_TRACE(...)
+#endif
+
+static void CA_LogError(OSStatus error, const char *fmt, ...)
+{
+ char errorString[20];
+
+ // See if it appears to be a 4-char-code
+ *(uint32_t *)(errorString + 1) = CFSwapInt32HostToBig(error);
+ if (isprint(errorString[1]) && isprint(errorString[2]) &&
+ isprint(errorString[3]) && isprint(errorString[4])) {
+ errorString[0] = errorString[5] = '\'';
+ errorString[6] = '\0';
+ }
+ else {
+ // No, format it as an integer
+ snprintf(errorString, sizeof(errorString), "%d", (int)error);
+ }
+
+ char logBuffer[1024];
+ va_list args;
+ va_start(args, fmt);
+ vsnprintf(logBuffer, sizeof(logBuffer), fmt, args);
+ va_end(args);
+
+ SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "CoreAudio Error: %s (%s)\n", logBuffer, errorString);
+}
+
+static void CA_FourCC(uint32_t value, char *outFormatIDStr)
+{
+ uint32_t formatID = CFSwapInt32HostToBig(value);
+ bcopy(&formatID, outFormatIDStr, 4);
+ outFormatIDStr[4] = '\0';
+}
+
+// based on mpv ca_print_asbd()
+static void CA_PrintASBD(const char *description, const AudioStreamBasicDescription *asbd)
+{
+ char formatIDStr[5];
+ CA_FourCC(asbd->mFormatID, formatIDStr);
+
+ uint32_t flags = asbd->mFormatFlags;
+ DEBUG_TRACE(
+ "%s %7.1fHz %" PRIu32 "bit %s "
+ "[%" PRIu32 "bpp][%" PRIu32 "fpp]"
+ "[%" PRIu32 "bpf][%" PRIu32 "ch] "
+ "%s %s %s%s%s%s\n",
+ description, asbd->mSampleRate, asbd->mBitsPerChannel, formatIDStr,
+ asbd->mBytesPerPacket, asbd->mFramesPerPacket,
+ asbd->mBytesPerFrame, asbd->mChannelsPerFrame,
+ (flags & kAudioFormatFlagIsFloat) ? "float" : "int",
+ (flags & kAudioFormatFlagIsBigEndian) ? "BE" : "LE",
+ (flags & kAudioFormatFlagIsFloat) ? ""
+ : ((flags & kAudioFormatFlagIsSignedInteger) ? "S" : "U"),
+ (flags & kAudioFormatFlagIsPacked) ? " packed" : "",
+ (flags & kAudioFormatFlagIsAlignedHigh) ? " aligned" : "",
+ (flags & kAudioFormatFlagIsNonInterleaved) ? " non-interleaved" : " interleaved");
+}
+
+// classic hex dump
+static void CA_HexDump(const float *buffer, size_t length)
+{
+ const uint8_t *bytePtr = (const uint8_t *)buffer;
+ size_t bytesToPrint = length * sizeof(float);
+
+ // Print 32 bytes per line
+ for (size_t i = 0; i < bytesToPrint; i += 32) {
+ printf("%08lx ", (unsigned long)(bytePtr + i));
+
+ // Print the hex values (32 bytes)
+ for (size_t j = 0; j < 32 && (i + j) < bytesToPrint; ++j) {
+ printf("%02x ", bytePtr[i + j]);
+ if (j == 15) printf(" ");
+ }
+
+ printf(" |");
+
+ for (size_t j = 0; j < 32 && (i + j) < bytesToPrint; ++j) {
+ uint8_t byte = bytePtr[i + j];
+ if (byte >= 32 && byte <= 126)
+ printf("%c", byte);
+ else
+ printf(".");
+ }
+
+ printf("|\n");
+ }
+}
diff --git a/app/streaming/audio/renderers/renderer.cpp b/app/streaming/audio/renderers/renderer.cpp
new file mode 100644
index 000000000..09c52daec
--- /dev/null
+++ b/app/streaming/audio/renderers/renderer.cpp
@@ -0,0 +1,158 @@
+#include "renderer.h"
+
+#include
+
+IAudioRenderer::IAudioRenderer()
+{
+ SDL_zero(m_ActiveWndAudioStats);
+ SDL_zero(m_LastWndAudioStats);
+ SDL_zero(m_GlobalAudioStats);
+
+ m_ActiveWndAudioStats.measurementStartUs = LiGetMicroseconds();
+}
+
+int IAudioRenderer::getAudioBufferSampleSize()
+{
+ switch (getAudioBufferFormat()) {
+ case IAudioRenderer::AudioFormat::Sint16NE:
+ return sizeof(short);
+ case IAudioRenderer::AudioFormat::Float32NE:
+ return sizeof(float);
+ default:
+ Q_UNREACHABLE();
+ }
+}
+
+void IAudioRenderer::addAudioStats(AUDIO_STATS& src, AUDIO_STATS& dst)
+{
+ dst.opusBytesReceived += src.opusBytesReceived;
+ dst.decodedPackets += src.decodedPackets;
+ dst.renderedPackets += src.renderedPackets;
+ dst.droppedNetwork += src.droppedNetwork;
+ dst.droppedOverload += src.droppedOverload;
+ dst.decodeDurationUs += src.decodeDurationUs;
+
+ if (!LiGetEstimatedRttInfo(&dst.lastRtt, NULL)) {
+ dst.lastRtt = 0;
+ }
+ else {
+ // Our logic to determine if RTT is valid depends on us never
+ // getting an RTT of 0. ENet currently ensures RTTs are >= 1.
+ SDL_assert(dst.lastRtt > 0);
+ }
+
+ // Initialize the measurement start point if this is the first video stat window
+ if (!dst.measurementStartUs) {
+ dst.measurementStartUs = src.measurementStartUs;
+ }
+
+ // The following code assumes the global measure was already started first
+ SDL_assert(dst.measurementStartUs <= src.measurementStartUs);
+
+ double timeDiffSecs = (double)(LiGetMicroseconds() - dst.measurementStartUs) / 1000000.0;
+ dst.opusKbitsPerSec = (double)(dst.opusBytesReceived * 8) / 1000.0 / timeDiffSecs;
+}
+
+void IAudioRenderer::flipAudioStatsWindows()
+{
+ // Called once a second, adds stats to the running global total,
+ // copies the active window to the last window, and initializes
+ // a fresh active window.
+
+ // Accumulate these values into the global stats
+ addAudioStats(m_ActiveWndAudioStats, m_GlobalAudioStats);
+
+ // Move this window into the last window slot and clear it for next window
+ SDL_memcpy(&m_LastWndAudioStats, &m_ActiveWndAudioStats, sizeof(m_ActiveWndAudioStats));
+ SDL_zero(m_ActiveWndAudioStats);
+ m_ActiveWndAudioStats.measurementStartUs = LiGetMicroseconds();
+}
+
+void IAudioRenderer::logGlobalAudioStats()
+{
+ if (m_GlobalAudioStats.decodedPackets > 0) {
+ char audioStatsStr[1024];
+ stringifyAudioStats(m_GlobalAudioStats, audioStatsStr, sizeof(audioStatsStr));
+
+ SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
+ "\nCurrent session audio stats\n---------------------------\n%s",
+ audioStatsStr);
+ }
+}
+
+void IAudioRenderer::snapshotAudioStats(AUDIO_STATS &snapshot)
+{
+ addAudioStats(m_LastWndAudioStats, snapshot);
+ addAudioStats(m_ActiveWndAudioStats, snapshot);
+}
+
+void IAudioRenderer::statsAddOpusBytesReceived(int size)
+{
+ m_ActiveWndAudioStats.opusBytesReceived += size;
+
+ if (size) {
+ m_ActiveWndAudioStats.decodedPackets++;
+ }
+ else {
+ // if called with size=0 it indicates a packet that is presumed lost by the network
+ m_ActiveWndAudioStats.droppedNetwork++;
+ }
+}
+
+void IAudioRenderer::statsTrackDecodeTime(uint64_t startTimeUs)
+{
+ uint64_t decodeTimeUs = LiGetMicroseconds() - startTimeUs;
+ m_ActiveWndAudioStats.decodeDurationUs += decodeTimeUs;
+}
+
+// Provide audio stats common to all renderer backends. Child classes can then add additional lines
+// at the returned offset length into output.
+int IAudioRenderer::stringifyAudioStats(AUDIO_STATS& stats, char *output, int length)
+{
+ int offset = 0;
+
+ // Start with an empty string
+ output[offset] = 0;
+
+ double opusFrameSize = (double)m_opusConfig->samplesPerFrame / 48.0;
+ const RTP_AUDIO_STATS* rtpAudioStats = LiGetRTPAudioStats();
+ double fecOverhead = (double)rtpAudioStats->packetCountFec * 1.0 / (rtpAudioStats->packetCountAudio + rtpAudioStats->packetCountFec);
+
+ int ret = snprintf(
+ &output[offset],
+ length - offset,
+ "Audio stream: %s-channel Opus low-delay @ 48 kHz (%s)\n"
+ "Bitrate: %.1f kbps, +%.0f%% forward error-correction\n"
+ "Opus config: %s, frame size: %.1f ms\n"
+ "Packet loss from network: %.2f%%, loss from CPU overload: %.2f%%\n"
+ "Average decoding time: %0.2f ms\n",
+
+ // "Audio stream: %s-channel Opus low-delay @ 48 kHz (%s)\n"
+ m_opusConfig->channelCount == 6 ? "5.1" : m_opusConfig->channelCount == 8 ? "7.1" : "2",
+ getRendererName(),
+
+ // "Bitrate: %.1f %s, +%.0f%% forward error-correction\n"
+ stats.opusKbitsPerSec,
+ fecOverhead * 100.0,
+
+ // "Opus config: %s, frame size: %.1fms\n"
+ // Work out if we're getting high or low quality from Sunshine. coupled surround is designed for physical speakers
+ ((m_opusConfig->channelCount == 2 && stats.opusKbitsPerSec > 128) || !m_opusConfig->coupledStreams)
+ ? "high quality (LAN)" // 512k stereo coupled, 1.5mbps 5.1 uncoupled, 2mbps 7.1 uncoupled
+ : "normal quality", // 96k stereo coupled, 256k 5.1 coupled, 450k 7.1 coupled
+ opusFrameSize,
+
+ // "Packet loss from network: %.2f%%, loss from CPU overload: %.2f%%\n"
+ stats.decodedPackets ? ((double)stats.droppedNetwork / stats.decodedPackets) * 100.0 : 0.0,
+ stats.decodedPackets ? ((double)stats.droppedOverload / stats.decodedPackets) * 100.0 : 0.0,
+
+ // "Average decoding time: %0.2f ms\n"
+ (double)(stats.decodeDurationUs / 1000.0) / stats.decodedPackets
+ );
+ if (ret < 0 || ret >= length - offset) {
+ SDL_assert(false);
+ return -1;
+ }
+
+ return offset + ret;
+}
diff --git a/app/streaming/audio/renderers/renderer.h b/app/streaming/audio/renderers/renderer.h
index acda076fd..2379c7859 100644
--- a/app/streaming/audio/renderers/renderer.h
+++ b/app/streaming/audio/renderers/renderer.h
@@ -2,14 +2,37 @@
#include
#include
+#include
+
+typedef struct _AUDIO_STATS {
+ uint32_t opusBytesReceived;
+ uint32_t decodedPackets; // total packets decoded (if less than renderedPackets it indicates droppedOverload)
+ uint32_t renderedPackets; // total audio packets rendered (only for certain backends)
+
+ uint32_t droppedNetwork; // total packets lost to the network
+ uint32_t droppedOverload; // total times we dropped a packet due to being unable to run in time
+ uint32_t totalGlitches; // total times the audio was interrupted
+
+ uint64_t decodeDurationUs; // cumulative render time, microseconds
+ uint64_t decodeDurationUsMax; // slowest render time, microseconds
+ uint32_t lastRtt; // network latency from enet, milliseconds
+ uint64_t measurementStartUs; // timestamp stats were started, microseconds
+ double opusKbitsPerSec; // current Opus bitrate in kbps, not including FEC overhead
+} AUDIO_STATS, *PAUDIO_STATS;
class IAudioRenderer
{
public:
+ IAudioRenderer();
+
virtual ~IAudioRenderer() {}
virtual bool prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION* opusConfig) = 0;
+ virtual void setOpusConfig(const OPUS_MULTISTREAM_CONFIGURATION* opusConfig) {
+ m_opusConfig = opusConfig;
+ }
+
virtual void* getAudioBuffer(int* size) = 0;
// Return false if an unrecoverable error has occurred and the renderer must be reinitialized
@@ -33,14 +56,28 @@ class IAudioRenderer
};
virtual AudioFormat getAudioBufferFormat() = 0;
- int getAudioBufferSampleSize() {
- switch (getAudioBufferFormat()) {
- case IAudioRenderer::AudioFormat::Sint16NE:
- return sizeof(short);
- case IAudioRenderer::AudioFormat::Float32NE:
- return sizeof(float);
- default:
- Q_UNREACHABLE();
- }
+ virtual int getAudioBufferSampleSize();
+
+ AUDIO_STATS & getActiveWndAudioStats() {
+ return m_ActiveWndAudioStats;
}
+
+ virtual const char * getRendererName() { return "IAudioRenderer"; };
+
+ // generic stats handling for all child classes
+ virtual void addAudioStats(AUDIO_STATS &, AUDIO_STATS &);
+ virtual void flipAudioStatsWindows();
+ virtual void logGlobalAudioStats();
+ virtual void snapshotAudioStats(AUDIO_STATS &);
+ virtual void statsAddOpusBytesReceived(int);
+ virtual void statsTrackDecodeTime(uint64_t);
+ virtual int stringifyAudioStats(AUDIO_STATS &, char *, int);
+
+protected:
+ AUDIO_STATS m_ActiveWndAudioStats;
+ AUDIO_STATS m_LastWndAudioStats;
+ AUDIO_STATS m_GlobalAudioStats;
+
+ // input stream metadata
+ const OPUS_MULTISTREAM_CONFIGURATION* m_opusConfig;
};
diff --git a/app/streaming/audio/renderers/sdl.h b/app/streaming/audio/renderers/sdl.h
index 44d555517..0e667617d 100644
--- a/app/streaming/audio/renderers/sdl.h
+++ b/app/streaming/audio/renderers/sdl.h
@@ -20,8 +20,11 @@ class SdlAudioRenderer : public IAudioRenderer
virtual AudioFormat getAudioBufferFormat();
+ const char * getRendererName() { return m_Name; }
+
private:
SDL_AudioDeviceID m_AudioDevice;
void* m_AudioBuffer;
int m_FrameSize;
+ char m_Name[24];
};
diff --git a/app/streaming/audio/renderers/sdlaud.cpp b/app/streaming/audio/renderers/sdlaud.cpp
index 9653ca973..58b47c5be 100644
--- a/app/streaming/audio/renderers/sdlaud.cpp
+++ b/app/streaming/audio/renderers/sdlaud.cpp
@@ -5,7 +5,8 @@
SdlAudioRenderer::SdlAudioRenderer()
: m_AudioDevice(0),
- m_AudioBuffer(nullptr)
+ m_AudioBuffer(nullptr),
+ m_Name("SDL")
{
SDL_assert(!SDL_WasInit(SDL_INIT_AUDIO));
@@ -59,6 +60,8 @@ bool SdlAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION*
return false;
}
+ setOpusConfig(opusConfig);
+
SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
"Desired audio buffer: %u samples (%u bytes)",
want.samples,
@@ -69,9 +72,10 @@ bool SdlAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION*
have.samples,
have.size);
+ const char *driver = SDL_GetCurrentAudioDriver();
+ snprintf(m_Name, 5 + strlen(driver), "SDL/%s", driver);
SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
- "SDL audio driver: %s",
- SDL_GetCurrentAudioDriver());
+ "SDL audio driver: %s", driver);
// Start playback
SDL_PauseAudioDevice(m_AudioDevice, 0);
@@ -110,6 +114,8 @@ bool SdlAudioRenderer::submitAudio(int bytesWritten)
// Don't queue if there's already more than 30 ms of audio data waiting
// in Moonlight's audio queue.
if (LiGetPendingAudioDuration() > 30) {
+ m_ActiveWndAudioStats.totalGlitches++;
+ m_ActiveWndAudioStats.droppedOverload++;
return true;
}
diff --git a/app/streaming/audio/renderers/slaud.cpp b/app/streaming/audio/renderers/slaud.cpp
index 3a6e0304a..7167b96dd 100644
--- a/app/streaming/audio/renderers/slaud.cpp
+++ b/app/streaming/audio/renderers/slaud.cpp
@@ -19,6 +19,8 @@ bool SLAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATION* o
return false;
}
+ setOpusConfig(opusConfig);
+
// This number is pretty conservative (especially for surround), but
// it's hard to avoid since we get crushed by CPU limitations.
m_MaxQueuedAudioMs = 40 * opusConfig->channelCount / 2;
@@ -109,6 +111,8 @@ bool SLAudioRenderer::submitAudio(int bytesWritten)
SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
"Too many queued audio frames: %d",
LiGetPendingAudioFrames());
+ m_ActiveWndAudioStats.totalGlitches++;
+ m_ActiveWndAudioStats.droppedOverload++;
}
return true;
diff --git a/app/streaming/audio/renderers/slaud.h b/app/streaming/audio/renderers/slaud.h
index 679fb7636..71b9e2950 100644
--- a/app/streaming/audio/renderers/slaud.h
+++ b/app/streaming/audio/renderers/slaud.h
@@ -20,6 +20,8 @@ class SLAudioRenderer : public IAudioRenderer
virtual AudioFormat getAudioBufferFormat();
+ const char * getRendererName() { return "Steam Link"; }
+
virtual void remapChannels(POPUS_MULTISTREAM_CONFIGURATION opusConfig);
private:
diff --git a/app/streaming/audio/renderers/soundioaudiorenderer.cpp b/app/streaming/audio/renderers/soundioaudiorenderer.cpp
index 495a50aec..d0d4555b0 100644
--- a/app/streaming/audio/renderers/soundioaudiorenderer.cpp
+++ b/app/streaming/audio/renderers/soundioaudiorenderer.cpp
@@ -12,7 +12,8 @@ SoundIoAudioRenderer::SoundIoAudioRenderer()
m_RingBuffer(nullptr),
m_AudioPacketDuration(0),
m_Latency(0),
- m_Errored(false)
+ m_Errored(false),
+ m_Name("libsoundio")
{
}
@@ -109,6 +110,8 @@ bool SoundIoAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATI
return false;
}
+ setOpusConfig(opusConfig);
+
m_SoundIo->app_name = "Moonlight";
m_SoundIo->userdata = this;
m_SoundIo->on_backend_disconnect = sioBackendDisconnect;
@@ -123,7 +126,7 @@ bool SoundIoAudioRenderer::prepareForPlayback(const OPUS_MULTISTREAM_CONFIGURATI
}
SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
- "Audio backend: %s",
+ "Audio backend: soundio using %s",
soundio_backend_name(m_SoundIo->current_backend));
// Don't continue if we could only open the dummy backend
diff --git a/app/streaming/audio/renderers/soundioaudiorenderer.h b/app/streaming/audio/renderers/soundioaudiorenderer.h
index 9aff2bc2a..b2b84bfb0 100644
--- a/app/streaming/audio/renderers/soundioaudiorenderer.h
+++ b/app/streaming/audio/renderers/soundioaudiorenderer.h
@@ -21,6 +21,14 @@ class SoundIoAudioRenderer : public IAudioRenderer
virtual AudioFormat getAudioBufferFormat();
+ const char * getRendererName() {
+ if (m_SoundIo != nullptr) {
+ const char *backend = soundio_backend_name(m_SoundIo->current_backend);
+ snprintf(m_Name, 12 + strlen(backend), "libsoundio/%s", backend );
+ }
+ return m_Name;
+ }
+
private:
int scoreChannelLayout(const struct SoundIoChannelLayout* layout, const OPUS_MULTISTREAM_CONFIGURATION* opusConfig);
@@ -41,4 +49,5 @@ class SoundIoAudioRenderer : public IAudioRenderer
double m_AudioPacketDuration;
double m_Latency;
bool m_Errored;
+ char m_Name[24];
};
diff --git a/app/streaming/input/gamepad.cpp b/app/streaming/input/gamepad.cpp
index 131ec1931..e25ca11d9 100644
--- a/app/streaming/input/gamepad.cpp
+++ b/app/streaming/input/gamepad.cpp
@@ -388,6 +388,8 @@ void SdlInputHandler::handleControllerButtonEvent(SDL_ControllerButtonEvent* eve
// Toggle the stats overlay
Session::get()->getOverlayManager().setOverlayState(Overlay::OverlayDebug,
!Session::get()->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebug));
+ Session::get()->getOverlayManager().setOverlayState(Overlay::OverlayDebugAudio,
+ !Session::get()->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebugAudio));
// Clear buttons down on this gamepad
LiSendMultiControllerEvent(state->index, m_GamepadMask,
diff --git a/app/streaming/input/keyboard.cpp b/app/streaming/input/keyboard.cpp
index a501cdb67..2fcd44bf2 100644
--- a/app/streaming/input/keyboard.cpp
+++ b/app/streaming/input/keyboard.cpp
@@ -56,6 +56,8 @@ void SdlInputHandler::performSpecialKeyCombo(KeyCombo combo)
// Toggle the stats overlay
Session::get()->getOverlayManager().setOverlayState(Overlay::OverlayDebug,
!Session::get()->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebug));
+ Session::get()->getOverlayManager().setOverlayState(Overlay::OverlayDebugAudio,
+ !Session::get()->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebugAudio));
break;
case KeyComboToggleMouseMode:
diff --git a/app/streaming/session.cpp b/app/streaming/session.cpp
index b6f1a0251..a9fa42039 100644
--- a/app/streaming/session.cpp
+++ b/app/streaming/session.cpp
@@ -1968,6 +1968,7 @@ void Session::execInternal()
// Toggle the stats overlay if requested by the user
m_OverlayManager.setOverlayState(Overlay::OverlayDebug, m_Preferences->showPerformanceOverlay);
+ m_OverlayManager.setOverlayState(Overlay::OverlayDebugAudio, m_Preferences->showPerformanceOverlay);
// Hijack this thread to be the SDL main thread. We have to do this
// because we want to suspend all Qt processing until the stream is over.
diff --git a/app/streaming/streamutils.cpp b/app/streaming/streamutils.cpp
index 2aa5a51d0..f65d680f9 100644
--- a/app/streaming/streamutils.cpp
+++ b/app/streaming/streamutils.cpp
@@ -208,7 +208,7 @@ bool StreamUtils::getNativeDesktopMode(int displayIndex, SDL_DisplayMode* mode,
CGDirectDisplayID displayIds[MAX_DISPLAYS];
uint32_t displayCount = 0;
CGGetActiveDisplayList(MAX_DISPLAYS, displayIds, &displayCount);
- if (displayIndex >= displayCount) {
+ if (displayIndex >= (int)displayCount) {
return false;
}
diff --git a/app/streaming/video/decoder.h b/app/streaming/video/decoder.h
index 24708d828..3e2856f8d 100644
--- a/app/streaming/video/decoder.h
+++ b/app/streaming/video/decoder.h
@@ -9,27 +9,29 @@
#define MAX_SLICES 4
typedef struct _VIDEO_STATS {
+ uint64_t receivedVideoBytes;
uint32_t receivedFrames;
uint32_t decodedFrames;
uint32_t renderedFrames;
uint32_t totalFrames;
uint32_t networkDroppedFrames;
uint32_t pacerDroppedFrames;
- uint16_t minHostProcessingLatency;
- uint16_t maxHostProcessingLatency;
- uint32_t totalHostProcessingLatency;
- uint32_t framesWithHostProcessingLatency;
- uint32_t totalReassemblyTime;
- uint32_t totalDecodeTime;
- uint32_t totalPacerTime;
- uint32_t totalRenderTime;
- uint32_t lastRtt;
- uint32_t lastRttVariance;
- float totalFps;
- float receivedFps;
- float decodedFps;
- float renderedFps;
- uint32_t measurementStartTimestamp;
+ uint16_t minHostProcessingLatency; // low-res from RTP
+ uint16_t maxHostProcessingLatency; // low-res from RTP
+ uint32_t totalHostProcessingLatency; // low-res from RTP
+ uint32_t framesWithHostProcessingLatency; // low-res from RTP
+ uint64_t totalReassemblyTimeUs; // high-res (1us)
+ uint64_t totalDecodeTimeUs; // high-res from moonlight-common-c (1us)
+ uint64_t totalPacerTimeUs; // high-res (1us)
+ uint64_t totaldecodeTimeUs; // high-res (1us)
+ uint32_t lastRtt; // low-res from enet (1ms)
+ uint32_t lastRttVariance; // low-res from enet (1ms)
+ double totalFps; // high-res
+ double receivedFps; // high-res
+ double decodedFps; // high-res
+ double renderedFps; // high-res
+ double videoMegabitsPerSec; // current video bitrate in Mbps, not including FEC overhead
+ uint64_t measurementStartUs; // microseconds
} VIDEO_STATS, *PVIDEO_STATS;
typedef struct _DECODER_PARAMETERS {
diff --git a/app/streaming/video/ffmpeg-renderers/d3d11va.cpp b/app/streaming/video/ffmpeg-renderers/d3d11va.cpp
index 201eb7acc..d8b8bc8ff 100644
--- a/app/streaming/video/ffmpeg-renderers/d3d11va.cpp
+++ b/app/streaming/video/ffmpeg-renderers/d3d11va.cpp
@@ -967,6 +967,11 @@ void D3D11VARenderer::notifyOverlayUpdated(Overlay::OverlayType type)
renderRect.x = 0;
renderRect.y = m_DisplayHeight - newSurface->h;
}
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ renderRect.x = m_DisplayWidth - newSurface->w;
+ renderRect.y = m_DisplayHeight - newSurface->h;
+ }
renderRect.w = newSurface->w;
renderRect.h = newSurface->h;
diff --git a/app/streaming/video/ffmpeg-renderers/dxva2.cpp b/app/streaming/video/ffmpeg-renderers/dxva2.cpp
index a3e538b02..91fd1504f 100644
--- a/app/streaming/video/ffmpeg-renderers/dxva2.cpp
+++ b/app/streaming/video/ffmpeg-renderers/dxva2.cpp
@@ -866,6 +866,11 @@ void DXVA2Renderer::notifyOverlayUpdated(Overlay::OverlayType type)
renderRect.x = 0;
renderRect.y = 0;
}
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ renderRect.x = m_DisplayWidth - newSurface->w;
+ renderRect.y = 0;
+ }
renderRect.w = newSurface->w;
renderRect.h = newSurface->h;
diff --git a/app/streaming/video/ffmpeg-renderers/eglvid.cpp b/app/streaming/video/ffmpeg-renderers/eglvid.cpp
index 27f86c106..6ee809f73 100644
--- a/app/streaming/video/ffmpeg-renderers/eglvid.cpp
+++ b/app/streaming/video/ffmpeg-renderers/eglvid.cpp
@@ -241,6 +241,11 @@ void EGLRenderer::renderOverlay(Overlay::OverlayType type, int viewportWidth, in
// Top left
overlayRect.x = 0;
overlayRect.y = viewportHeight - newSurface->h;
+ }
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ overlayRect.x = viewportWidth - newSurface->w;
+ overlayRect.y = viewportHeight - newSurface->h;
} else {
SDL_assert(false);
}
diff --git a/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp b/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp
index 2686c5495..30fd566fe 100644
--- a/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp
+++ b/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp
@@ -333,14 +333,14 @@ void Pacer::signalVsync()
void Pacer::renderFrame(AVFrame* frame)
{
// Count time spent in Pacer's queues
- Uint32 beforeRender = SDL_GetTicks();
- m_VideoStats->totalPacerTime += beforeRender - frame->pkt_dts;
+ uint64_t beforeRender = LiGetMicroseconds();
+ m_VideoStats->totalPacerTimeUs += (beforeRender - (uint64_t)frame->pkt_dts);
// Render it
m_VsyncRenderer->renderFrame(frame);
- Uint32 afterRender = SDL_GetTicks();
+ uint64_t afterRender = LiGetMicroseconds();
- m_VideoStats->totalRenderTime += afterRender - beforeRender;
+ m_VideoStats->totaldecodeTimeUs += (afterRender - beforeRender);
m_VideoStats->renderedFrames++;
av_frame_free(&frame);
diff --git a/app/streaming/video/ffmpeg-renderers/plvk.cpp b/app/streaming/video/ffmpeg-renderers/plvk.cpp
index 4546be7c5..4b6bec76d 100644
--- a/app/streaming/video/ffmpeg-renderers/plvk.cpp
+++ b/app/streaming/video/ffmpeg-renderers/plvk.cpp
@@ -817,6 +817,11 @@ void PlVkRenderer::renderFrame(AVFrame *frame)
overlayParts[i].dst.x0 = 0;
overlayParts[i].dst.y0 = 0;
}
+ else if (i == Overlay::OverlayDebugAudio) {
+ // Top right
+ overlayParts[i].dst.x0 = SDL_max(0, targetFrame.crop.x1 - overlayParts[i].src.x1);
+ overlayParts[i].dst.y0 = 0;
+ }
overlayParts[i].dst.x1 = overlayParts[i].dst.x0 + overlayParts[i].src.x1;
overlayParts[i].dst.y1 = overlayParts[i].dst.y0 + overlayParts[i].src.y1;
diff --git a/app/streaming/video/ffmpeg-renderers/sdlvid.cpp b/app/streaming/video/ffmpeg-renderers/sdlvid.cpp
index 1467ce52b..95d0cf1f8 100644
--- a/app/streaming/video/ffmpeg-renderers/sdlvid.cpp
+++ b/app/streaming/video/ffmpeg-renderers/sdlvid.cpp
@@ -229,10 +229,11 @@ void SdlRenderer::renderOverlay(Overlay::OverlayType type)
SDL_DestroyTexture(m_OverlayTextures[type]);
}
+ SDL_Rect viewportRect;
+ SDL_RenderGetViewport(m_Renderer, &viewportRect);
+
if (type == Overlay::OverlayStatusUpdate) {
// Bottom Left
- SDL_Rect viewportRect;
- SDL_RenderGetViewport(m_Renderer, &viewportRect);
m_OverlayRects[type].x = 0;
m_OverlayRects[type].y = viewportRect.h - newSurface->h;
}
@@ -241,6 +242,11 @@ void SdlRenderer::renderOverlay(Overlay::OverlayType type)
m_OverlayRects[type].x = 0;
m_OverlayRects[type].y = 0;
}
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ m_OverlayRects[type].x = viewportRect.w - newSurface->w;
+ m_OverlayRects[type].y = 0;
+ }
m_OverlayRects[type].w = newSurface->w;
m_OverlayRects[type].h = newSurface->h;
diff --git a/app/streaming/video/ffmpeg-renderers/vaapi.cpp b/app/streaming/video/ffmpeg-renderers/vaapi.cpp
index d8f66074a..43bb6ac9d 100644
--- a/app/streaming/video/ffmpeg-renderers/vaapi.cpp
+++ b/app/streaming/video/ffmpeg-renderers/vaapi.cpp
@@ -722,6 +722,11 @@ void VAAPIRenderer::notifyOverlayUpdated(Overlay::OverlayType type)
overlayRect.x = 0;
overlayRect.y = 0;
}
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ overlayRect.x = -newSurface->w;
+ overlayRect.y = 0;
+ }
overlayRect.w = newSurface->w;
overlayRect.h = newSurface->h;
diff --git a/app/streaming/video/ffmpeg-renderers/vdpau.cpp b/app/streaming/video/ffmpeg-renderers/vdpau.cpp
index 99244dcfc..67657e52e 100644
--- a/app/streaming/video/ffmpeg-renderers/vdpau.cpp
+++ b/app/streaming/video/ffmpeg-renderers/vdpau.cpp
@@ -435,6 +435,11 @@ void VDPAURenderer::notifyOverlayUpdated(Overlay::OverlayType type)
overlayRect.x0 = 0;
overlayRect.y0 = 0;
}
+ else if (type == Overlay::OverlayDebugAudio) {
+ // Top right
+ overlayRect.x0 = m_DisplayWidth - newSurface->w;
+ overlayRect.y0 = 0;
+ }
overlayRect.x1 = overlayRect.x0 + newSurface->w;
overlayRect.y1 = overlayRect.y0 + newSurface->h;
diff --git a/app/streaming/video/ffmpeg-renderers/vt_avsamplelayer.mm b/app/streaming/video/ffmpeg-renderers/vt_avsamplelayer.mm
index 245845276..3c53c267c 100644
--- a/app/streaming/video/ffmpeg-renderers/vt_avsamplelayer.mm
+++ b/app/streaming/video/ffmpeg-renderers/vt_avsamplelayer.mm
@@ -497,6 +497,9 @@ void updateOverlayOnMainThread(Overlay::OverlayType type)
case Overlay::OverlayDebug:
[m_OverlayTextFields[type] setAlignment:NSTextAlignmentLeft];
break;
+ case Overlay::OverlayDebugAudio:
+ [m_OverlayTextFields[type] setAlignment:NSTextAlignmentRight]; // XXX
+ break;
case Overlay::OverlayStatusUpdate:
[m_OverlayTextFields[type] setAlignment:NSTextAlignmentRight];
break;
diff --git a/app/streaming/video/ffmpeg-renderers/vt_metal.mm b/app/streaming/video/ffmpeg-renderers/vt_metal.mm
index 61f15a7f9..14e2b520a 100644
--- a/app/streaming/video/ffmpeg-renderers/vt_metal.mm
+++ b/app/streaming/video/ffmpeg-renderers/vt_metal.mm
@@ -603,6 +603,11 @@ virtual void renderFrame(AVFrame* frame) override
renderRect.x = 0;
renderRect.y = m_LastDrawableHeight - overlayTexture.height;
}
+ else if (i == Overlay::OverlayDebugAudio) {
+ // Top right
+ renderRect.x = m_LastDrawableWidth - overlayTexture.width;
+ renderRect.y = m_LastDrawableHeight - overlayTexture.height;
+ }
renderRect.w = overlayTexture.width;
renderRect.h = overlayTexture.height;
diff --git a/app/streaming/video/ffmpeg.cpp b/app/streaming/video/ffmpeg.cpp
index c17487e90..9d91f848a 100644
--- a/app/streaming/video/ffmpeg.cpp
+++ b/app/streaming/video/ffmpeg.cpp
@@ -660,16 +660,17 @@ bool FFmpegVideoDecoder::completeInitialization(const AVCodec* decoder, enum AVP
void FFmpegVideoDecoder::addVideoStats(VIDEO_STATS& src, VIDEO_STATS& dst)
{
+ dst.receivedVideoBytes += src.receivedVideoBytes;
dst.receivedFrames += src.receivedFrames;
dst.decodedFrames += src.decodedFrames;
dst.renderedFrames += src.renderedFrames;
dst.totalFrames += src.totalFrames;
dst.networkDroppedFrames += src.networkDroppedFrames;
dst.pacerDroppedFrames += src.pacerDroppedFrames;
- dst.totalReassemblyTime += src.totalReassemblyTime;
- dst.totalDecodeTime += src.totalDecodeTime;
- dst.totalPacerTime += src.totalPacerTime;
- dst.totalRenderTime += src.totalRenderTime;
+ dst.totalReassemblyTimeUs += src.totalReassemblyTimeUs;
+ dst.totalDecodeTimeUs += src.totalDecodeTimeUs;
+ dst.totalPacerTimeUs += src.totalPacerTimeUs;
+ dst.totaldecodeTimeUs += src.totaldecodeTimeUs;
if (dst.minHostProcessingLatency == 0) {
dst.minHostProcessingLatency = src.minHostProcessingLatency;
@@ -691,20 +692,20 @@ void FFmpegVideoDecoder::addVideoStats(VIDEO_STATS& src, VIDEO_STATS& dst)
SDL_assert(dst.lastRtt > 0);
}
- Uint32 now = SDL_GetTicks();
-
// Initialize the measurement start point if this is the first video stat window
- if (!dst.measurementStartTimestamp) {
- dst.measurementStartTimestamp = src.measurementStartTimestamp;
+ if (!dst.measurementStartUs) {
+ dst.measurementStartUs = src.measurementStartUs;
}
// The following code assumes the global measure was already started first
- SDL_assert(dst.measurementStartTimestamp <= src.measurementStartTimestamp);
-
- dst.totalFps = (float)dst.totalFrames / ((float)(now - dst.measurementStartTimestamp) / 1000);
- dst.receivedFps = (float)dst.receivedFrames / ((float)(now - dst.measurementStartTimestamp) / 1000);
- dst.decodedFps = (float)dst.decodedFrames / ((float)(now - dst.measurementStartTimestamp) / 1000);
- dst.renderedFps = (float)dst.renderedFrames / ((float)(now - dst.measurementStartTimestamp) / 1000);
+ SDL_assert(dst.measurementStartUs <= src.measurementStartUs);
+
+ double timeDiffSecs = (double)(LiGetMicroseconds() - dst.measurementStartUs) / 1000000.0;
+ dst.totalFps = (double)dst.totalFrames / timeDiffSecs;
+ dst.receivedFps = (double)dst.receivedFrames / timeDiffSecs;
+ dst.decodedFps = (double)dst.decodedFrames / timeDiffSecs;
+ dst.renderedFps = (double)dst.renderedFrames / timeDiffSecs;
+ dst.videoMegabitsPerSec = (double)(dst.receivedVideoBytes * 8) / 1000000.0 / timeDiffSecs;
}
void FFmpegVideoDecoder::stringifyVideoStats(VIDEO_STATS& stats, char* output, int length)
@@ -786,13 +787,21 @@ void FFmpegVideoDecoder::stringifyVideoStats(VIDEO_STATS& stats, char* output, i
if (stats.receivedFps > 0) {
if (m_VideoDecoderCtx != nullptr) {
+ const RTP_VIDEO_STATS* rtpVideoStats = LiGetRTPVideoStats();
+ float fecOverhead = (float)rtpVideoStats->packetCountFec * 1.0 / (rtpVideoStats->packetCountVideo + rtpVideoStats->packetCountFec);
+ bool useKb = stats.videoMegabitsPerSec < 1 ? true : false;
+
ret = snprintf(&output[offset],
length - offset,
- "Video stream: %dx%d %.2f FPS (Codec: %s)\n",
+ "Video stream: %dx%d %.2f FPS (Codec: %s)\n"
+ "Bitrate: %.1f %s, +%.0f%% forward error-correction\n",
m_VideoDecoderCtx->width,
m_VideoDecoderCtx->height,
stats.totalFps,
- codecString);
+ codecString,
+ useKb ? stats.videoMegabitsPerSec * 1000 : stats.videoMegabitsPerSec,
+ useKb ? "kbps" : "Mbps",
+ fecOverhead * 100.0);
if (ret < 0 || ret >= length - offset) {
SDL_assert(false);
return;
@@ -803,12 +812,8 @@ void FFmpegVideoDecoder::stringifyVideoStats(VIDEO_STATS& stats, char* output, i
ret = snprintf(&output[offset],
length - offset,
- "Incoming frame rate from network: %.2f FPS\n"
- "Decoding frame rate: %.2f FPS\n"
- "Rendering frame rate: %.2f FPS\n",
- stats.receivedFps,
- stats.decodedFps,
- stats.renderedFps);
+ "FPS incoming/decoding/rendering: %.2f/%.2f/%.2f\n",
+ stats.receivedFps, stats.decodedFps, stats.renderedFps);
if (ret < 0 || ret >= length - offset) {
SDL_assert(false);
return;
@@ -853,9 +858,9 @@ void FFmpegVideoDecoder::stringifyVideoStats(VIDEO_STATS& stats, char* output, i
(float)stats.networkDroppedFrames / stats.totalFrames * 100,
(float)stats.pacerDroppedFrames / stats.decodedFrames * 100,
rttString,
- (float)stats.totalDecodeTime / stats.decodedFrames,
- (float)stats.totalPacerTime / stats.renderedFrames,
- (float)stats.totalRenderTime / stats.renderedFrames);
+ (double)(stats.totalDecodeTimeUs / 1000.0) / stats.decodedFrames,
+ (double)(stats.totalPacerTimeUs / 1000.0) / stats.renderedFrames,
+ (double)(stats.totaldecodeTimeUs / 1000.0) / stats.renderedFrames);
if (ret < 0 || ret >= length - offset) {
SDL_assert(false);
return;
@@ -872,10 +877,8 @@ void FFmpegVideoDecoder::logVideoStats(VIDEO_STATS& stats, const char* title)
stringifyVideoStats(stats, videoStatsStr, sizeof(videoStatsStr));
SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
- "%s", title);
- SDL_LogInfo(SDL_LOG_CATEGORY_APPLICATION,
- "----------------------------------------------------------\n%s",
- videoStatsStr);
+ "\n%s\n------------------\n%s",
+ title, videoStatsStr);
}
}
@@ -1685,7 +1688,7 @@ void FFmpegVideoDecoder::decoderThreadProc()
av_log_set_level(AV_LOG_INFO);
// Capture a frame timestamp to measuring pacing delay
- frame->pkt_dts = SDL_GetTicks();
+ frame->pkt_dts = LiGetMicroseconds();
if (!m_FrameInfoQueue.isEmpty()) {
// Data buffers in the DU are not valid here!
@@ -1694,7 +1697,7 @@ void FFmpegVideoDecoder::decoderThreadProc()
// Count time in avcodec_send_packet() and avcodec_receive_frame()
// as time spent decoding. Also count time spent in the decode unit
// queue because that's directly caused by decoder latency.
- m_ActiveWndVideoStats.totalDecodeTime += LiGetMillis() - du.enqueueTimeMs;
+ m_ActiveWndVideoStats.totalDecodeTimeUs += (LiGetMicroseconds() - du.enqueueTimeUs);
// Store the presentation time
frame->pts = du.presentationTimeMs;
@@ -1770,18 +1773,19 @@ int FFmpegVideoDecoder::submitDecodeUnit(PDECODE_UNIT du)
}
if (!m_LastFrameNumber) {
- m_ActiveWndVideoStats.measurementStartTimestamp = SDL_GetTicks();
+ m_ActiveWndVideoStats.measurementStartUs = LiGetMicroseconds();
m_LastFrameNumber = du->frameNumber;
}
else {
// Any frame number greater than m_LastFrameNumber + 1 represents a dropped frame
m_ActiveWndVideoStats.networkDroppedFrames += du->frameNumber - (m_LastFrameNumber + 1);
m_ActiveWndVideoStats.totalFrames += du->frameNumber - (m_LastFrameNumber + 1);
+ m_ActiveWndVideoStats.receivedVideoBytes += (uint64_t)du->fullLength;
m_LastFrameNumber = du->frameNumber;
}
// Flip stats windows roughly every second
- if (SDL_TICKS_PASSED(SDL_GetTicks(), m_ActiveWndVideoStats.measurementStartTimestamp + 1000)) {
+ if (LiGetMicroseconds() > m_ActiveWndVideoStats.measurementStartUs + 1000000) {
// Update overlay stats if it's enabled
if (Session::get()->getOverlayManager().isOverlayEnabled(Overlay::OverlayDebug)) {
VIDEO_STATS lastTwoWndStats = {};
@@ -1800,7 +1804,7 @@ int FFmpegVideoDecoder::submitDecodeUnit(PDECODE_UNIT du)
// Move this window into the last window slot and clear it for next window
SDL_memcpy(&m_LastWndVideoStats, &m_ActiveWndVideoStats, sizeof(m_ActiveWndVideoStats));
SDL_zero(m_ActiveWndVideoStats);
- m_ActiveWndVideoStats.measurementStartTimestamp = SDL_GetTicks();
+ m_ActiveWndVideoStats.measurementStartUs = LiGetMicroseconds();
}
if (du->frameHostProcessingLatency != 0) {
@@ -1843,7 +1847,7 @@ int FFmpegVideoDecoder::submitDecodeUnit(PDECODE_UNIT du)
m_Pkt->flags = 0;
}
- m_ActiveWndVideoStats.totalReassemblyTime += du->enqueueTimeMs - du->receiveTimeMs;
+ m_ActiveWndVideoStats.totalReassemblyTimeUs += (du->enqueueTimeUs - du->receiveTimeUs);
err = avcodec_send_packet(m_VideoDecoderCtx, m_Pkt);
if (err < 0) {
diff --git a/app/streaming/video/overlaymanager.cpp b/app/streaming/video/overlaymanager.cpp
index 168e52330..cdfd547a1 100644
--- a/app/streaming/video/overlaymanager.cpp
+++ b/app/streaming/video/overlaymanager.cpp
@@ -12,6 +12,9 @@ OverlayManager::OverlayManager() :
m_Overlays[OverlayType::OverlayDebug].color = {0xD0, 0xD0, 0x00, 0xFF};
m_Overlays[OverlayType::OverlayDebug].fontSize = 20;
+ m_Overlays[OverlayType::OverlayDebugAudio].color = {0x00, 0xD0, 0xD0, 0xFF};
+ m_Overlays[OverlayType::OverlayDebugAudio].fontSize = 20;
+
m_Overlays[OverlayType::OverlayStatusUpdate].color = {0xCC, 0x00, 0x00, 0xFF};
m_Overlays[OverlayType::OverlayStatusUpdate].fontSize = 36;
diff --git a/app/streaming/video/overlaymanager.h b/app/streaming/video/overlaymanager.h
index 59c808b92..129560791 100644
--- a/app/streaming/video/overlaymanager.h
+++ b/app/streaming/video/overlaymanager.h
@@ -9,6 +9,7 @@ namespace Overlay {
enum OverlayType {
OverlayDebug,
+ OverlayDebugAudio,
OverlayStatusUpdate,
OverlayMax
};
@@ -46,7 +47,7 @@ class OverlayManager
bool enabled;
int fontSize;
SDL_Color color;
- char text[512];
+ char text[1024];
TTF_Font* font;
SDL_Surface* surface;
diff --git a/moonlight-common-c/moonlight-common-c b/moonlight-common-c/moonlight-common-c
index 8599b6042..583754fc6 160000
--- a/moonlight-common-c/moonlight-common-c
+++ b/moonlight-common-c/moonlight-common-c
@@ -1 +1 @@
-Subproject commit 8599b6042a4ba27749b0f94134dd614b4328a9bc
+Subproject commit 583754fc686fea5ba0537c78dd5a647c788b138c
diff --git a/scripts/generate-dmg.sh b/scripts/generate-dmg.sh
index 12cacf972..14b709616 100755
--- a/scripts/generate-dmg.sh
+++ b/scripts/generate-dmg.sh
@@ -60,7 +60,12 @@ find $BUILD_FOLDER/app/Moonlight.app/ -name '*.dSYM' | xargs rm -rf
if [ "$SIGNING_IDENTITY" != "" ]; then
echo Signing app bundle
- codesign --force --deep --options runtime --timestamp --sign "$SIGNING_IDENTITY" $BUILD_FOLDER/app/Moonlight.app || fail "Signing failed!"
+ codesign --force --deep --options runtime --timestamp \
+ --entitlements $SOURCE_ROOT/app/deploy/macos/spatial-audio.entitlements \
+ --sign "$SIGNING_IDENTITY" \
+ $BUILD_FOLDER/app/Moonlight.app || fail "Signing failed!"
+ echo "App signature:"
+ codesign -d --entitlements - -vvv $BUILD_FOLDER/app/Moonlight.app
fi
echo Creating DMG
@@ -84,4 +89,4 @@ if [ "$NOTARY_KEYCHAIN_PROFILE" != "" ]; then
fi
mv $INSTALLER_FOLDER/Moonlight\ $VERSION.dmg $INSTALLER_FOLDER/Moonlight-$VERSION.dmg
-echo Build successful
\ No newline at end of file
+echo Build successful