mirror of
https://github.com/Redot-Engine/redot-engine.git
synced 2025-12-06 07:17:42 -05:00
Merge pull request #943 from Spartan322/4.3.1-cherry-pick/bugs-audio
[4.3] Cherry-picks for the 4.3 (4.3.1) branch - 1st audio bugs batch
This commit is contained in:
@@ -252,7 +252,7 @@ OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon,
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::start() {
|
||||
if (!active) {
|
||||
if (!active && audio_unit != nullptr) {
|
||||
OSStatus result = AudioOutputUnitStart(audio_unit);
|
||||
if (result != noErr) {
|
||||
ERR_PRINT("AudioOutputUnitStart failed, code: " + itos(result));
|
||||
|
||||
@@ -125,6 +125,8 @@ const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
|
||||
|
||||
static bool default_output_device_changed = false;
|
||||
static bool default_input_device_changed = false;
|
||||
static int output_reinit_countdown = 0;
|
||||
static int input_reinit_countdown = 0;
|
||||
|
||||
// Silence warning due to a COM API weirdness (GH-35194).
|
||||
#if defined(__GNUC__) && !defined(__clang__)
|
||||
@@ -136,6 +138,8 @@ class CMMNotificationClient : public IMMNotificationClient {
|
||||
LONG _cRef = 1;
|
||||
|
||||
public:
|
||||
ComPtr<IMMDeviceEnumerator> enumerator = nullptr;
|
||||
|
||||
CMMNotificationClient() {}
|
||||
virtual ~CMMNotificationClient() {}
|
||||
|
||||
@@ -201,6 +205,9 @@ public:
|
||||
static CMMNotificationClient notif_client;
|
||||
|
||||
Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_input, bool p_reinit, bool p_no_audio_client_3) {
|
||||
// This function can be called recursively, so clean up before starting:
|
||||
audio_device_finish(p_device);
|
||||
|
||||
WAVEFORMATEX *pwfex;
|
||||
ComPtr<IMMDeviceEnumerator> enumerator = nullptr;
|
||||
ComPtr<IMMDevice> output_device = nullptr;
|
||||
@@ -227,21 +234,25 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
ComPtr<IMMDevice> tmp_device = nullptr;
|
||||
|
||||
hr = devices->Item(i, &tmp_device);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
ERR_BREAK_MSG(hr != S_OK, "Cannot get devices item.");
|
||||
|
||||
ComPtr<IPropertyStore> props = nullptr;
|
||||
hr = tmp_device->OpenPropertyStore(STGM_READ, &props);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
ERR_BREAK_MSG(hr != S_OK, "Cannot open property store.");
|
||||
|
||||
PROPVARIANT propvar;
|
||||
PropVariantInit(&propvar);
|
||||
|
||||
hr = props->GetValue(PKEY_Device_FriendlyNameGodot, &propvar);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
ERR_BREAK_MSG(hr != S_OK, "Cannot get value.");
|
||||
|
||||
if (p_device->device_name == String(propvar.pwszVal)) {
|
||||
hr = tmp_device->GetId(&strId);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
if (unlikely(hr != S_OK)) {
|
||||
PropVariantClear(&propvar);
|
||||
ERR_PRINT("Cannot get device ID string.");
|
||||
break;
|
||||
}
|
||||
|
||||
found = true;
|
||||
}
|
||||
@@ -272,9 +283,14 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
if (notif_client.enumerator != nullptr) {
|
||||
notif_client.enumerator->UnregisterEndpointNotificationCallback(¬if_client);
|
||||
notif_client.enumerator = nullptr;
|
||||
}
|
||||
hr = enumerator->RegisterEndpointNotificationCallback(¬if_client);
|
||||
|
||||
if (hr != S_OK) {
|
||||
if (hr == S_OK) {
|
||||
notif_client.enumerator = enumerator;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error");
|
||||
}
|
||||
|
||||
@@ -319,6 +335,7 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
|
||||
hr = p_device->audio_client->GetMixFormat(&pwfex);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
// From this point onward, CoTaskMemFree(pwfex) must be called before returning or pwfex will leak!
|
||||
|
||||
print_verbose("WASAPI: wFormatTag = " + itos(pwfex->wFormatTag));
|
||||
print_verbose("WASAPI: nChannels = " + itos(pwfex->nChannels));
|
||||
@@ -342,6 +359,7 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
print_verbose("WASAPI: closest->cbSize = " + itos(closest->cbSize));
|
||||
|
||||
WARN_PRINT("WASAPI: Using closest match instead");
|
||||
CoTaskMemFree(pwfex);
|
||||
pwfex = closest;
|
||||
}
|
||||
}
|
||||
@@ -361,11 +379,13 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Format not supported");
|
||||
CoTaskMemFree(pwfex);
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
} else {
|
||||
if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) {
|
||||
ERR_PRINT("WASAPI: Format not supported");
|
||||
CoTaskMemFree(pwfex);
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
}
|
||||
@@ -378,10 +398,28 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8);
|
||||
}
|
||||
hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, p_input ? REFTIMES_PER_SEC : 0, 0, pwfex, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(hr != S_OK, ERR_CANT_OPEN, "WASAPI: Initialize failed with error 0x" + String::num_uint64(hr, 16) + ".");
|
||||
|
||||
if (p_reinit) {
|
||||
// In case we're trying to re-initialize the device, prevent throwing this error on the console,
|
||||
// otherwise if there is currently no device available this will spam the console.
|
||||
if (hr != S_OK) {
|
||||
print_verbose("WASAPI: Initialize failed with error 0x" + String::num_uint64(hr, 16) + ".");
|
||||
CoTaskMemFree(pwfex);
|
||||
return ERR_CANT_OPEN;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(hr != S_OK)) {
|
||||
CoTaskMemFree(pwfex);
|
||||
ERR_FAIL_V_MSG(ERR_CANT_OPEN, "WASAPI: Initialize failed with error 0x" + String::num_uint64(hr, 16) + ".");
|
||||
}
|
||||
}
|
||||
|
||||
UINT32 max_frames;
|
||||
hr = p_device->audio_client->GetBufferSize(&max_frames);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
if (unlikely(hr != S_OK)) {
|
||||
CoTaskMemFree(pwfex);
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
// Due to WASAPI Shared Mode we have no control of the buffer size
|
||||
if (!p_input) {
|
||||
@@ -455,7 +493,10 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
} else {
|
||||
hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client);
|
||||
}
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
if (unlikely(hr != S_OK)) {
|
||||
CoTaskMemFree(pwfex);
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
// Free memory
|
||||
CoTaskMemFree(pwfex);
|
||||
@@ -466,6 +507,11 @@ Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_i
|
||||
Error AudioDriverWASAPI::init_output_device(bool p_reinit) {
|
||||
Error err = audio_device_init(&audio_output, false, p_reinit);
|
||||
if (err != OK) {
|
||||
// We've tried to init the device, but have failed. Time to clean up.
|
||||
Error finish_err = finish_output_device();
|
||||
if (finish_err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_output_device error after failed output audio_device_init");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -506,6 +552,11 @@ Error AudioDriverWASAPI::init_output_device(bool p_reinit) {
|
||||
Error AudioDriverWASAPI::init_input_device(bool p_reinit) {
|
||||
Error err = audio_device_init(&audio_input, true, p_reinit);
|
||||
if (err != OK) {
|
||||
// We've tried to init the device, but have failed. Time to clean up.
|
||||
Error finish_err = finish_input_device();
|
||||
if (finish_err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_input_device error after failed input audio_device_init");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -828,9 +879,15 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
|
||||
}
|
||||
|
||||
if (!ad->audio_output.audio_client) {
|
||||
Error err = ad->init_output_device(true);
|
||||
if (err == OK) {
|
||||
ad->start();
|
||||
if (output_reinit_countdown < 1) {
|
||||
Error err = ad->init_output_device(true);
|
||||
if (err == OK) {
|
||||
ad->start();
|
||||
} else {
|
||||
output_reinit_countdown = 1000;
|
||||
}
|
||||
} else {
|
||||
output_reinit_countdown--;
|
||||
}
|
||||
|
||||
avail_frames = 0;
|
||||
@@ -901,9 +958,15 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
|
||||
}
|
||||
|
||||
if (!ad->audio_input.audio_client) {
|
||||
Error err = ad->init_input_device(true);
|
||||
if (err == OK) {
|
||||
ad->input_start();
|
||||
if (input_reinit_countdown < 1) {
|
||||
Error err = ad->init_input_device(true);
|
||||
if (err == OK) {
|
||||
ad->input_start();
|
||||
} else {
|
||||
input_reinit_countdown = 1000;
|
||||
}
|
||||
} else {
|
||||
input_reinit_countdown--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,15 @@ Error ResourceImporterWAV::import(const String &p_source_file, const String &p_s
|
||||
}
|
||||
|
||||
/* GET FILESIZE */
|
||||
file->get_32(); // filesize
|
||||
|
||||
// The file size in header is 8 bytes less than the actual size.
|
||||
// See https://docs.fileformat.com/audio/wav/
|
||||
const int FILE_SIZE_HEADER_OFFSET = 8;
|
||||
uint32_t file_size_header = file->get_32() + FILE_SIZE_HEADER_OFFSET;
|
||||
uint64_t file_size = file->get_length();
|
||||
if (file_size != file_size_header) {
|
||||
WARN_PRINT(vformat("File size %d is %s than the expected size %d. (%s)", file_size, file_size > file_size_header ? "larger" : "smaller", file_size_header, p_source_file));
|
||||
}
|
||||
|
||||
/* CHECK WAVE */
|
||||
|
||||
@@ -199,7 +207,12 @@ Error ResourceImporterWAV::import(const String &p_source_file, const String &p_s
|
||||
break;
|
||||
}
|
||||
|
||||
uint64_t remaining_bytes = file_size - file_pos;
|
||||
frames = chunksize;
|
||||
if (remaining_bytes < chunksize) {
|
||||
WARN_PRINT(vformat("Data chunk size is smaller than expected. Proceeding with actual data size. (%s)", p_source_file));
|
||||
frames = remaining_bytes;
|
||||
}
|
||||
|
||||
if (format_channels == 0) {
|
||||
ERR_FAIL_COND_V(format_channels == 0, ERR_INVALID_DATA);
|
||||
|
||||
@@ -486,8 +486,6 @@ void AudioStreamInteractive::_bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("set_clip_auto_advance_next_clip", "clip_index", "auto_advance_next_clip"), &AudioStreamInteractive::set_clip_auto_advance_next_clip);
|
||||
ClassDB::bind_method(D_METHOD("get_clip_auto_advance_next_clip", "clip_index"), &AudioStreamInteractive::get_clip_auto_advance_next_clip);
|
||||
|
||||
ADD_PROPERTY(PropertyInfo(Variant::INT, "initial_clip", PROPERTY_HINT_ENUM, "", PROPERTY_USAGE_DEFAULT), "set_initial_clip", "get_initial_clip");
|
||||
|
||||
ADD_PROPERTY(PropertyInfo(Variant::INT, "clip_count", PROPERTY_HINT_RANGE, "1," + itos(MAX_CLIPS), PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_ARRAY, "Clips,clip_,page_size=999,unfoldable,numbered,swap_method=_inspector_array_swap_clip,add_button_text=" + String(RTR("Add Clip"))), "set_clip_count", "get_clip_count");
|
||||
for (int i = 0; i < MAX_CLIPS; i++) {
|
||||
ADD_PROPERTYI(PropertyInfo(Variant::STRING_NAME, "clip_" + itos(i) + "/name", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_INTERNAL), "set_clip_name", "get_clip_name", i);
|
||||
@@ -496,6 +494,9 @@ void AudioStreamInteractive::_bind_methods() {
|
||||
ADD_PROPERTYI(PropertyInfo(Variant::INT, "clip_" + itos(i) + "/next_clip", PROPERTY_HINT_ENUM, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_INTERNAL), "set_clip_auto_advance_next_clip", "get_clip_auto_advance_next_clip", i);
|
||||
}
|
||||
|
||||
// Needs to be registered after `clip_*` properties, as it depends on them.
|
||||
ADD_PROPERTY(PropertyInfo(Variant::INT, "initial_clip", PROPERTY_HINT_ENUM, "", PROPERTY_USAGE_DEFAULT), "set_initial_clip", "get_initial_clip");
|
||||
|
||||
// TRANSITIONS
|
||||
|
||||
ClassDB::bind_method(D_METHOD("add_transition", "from_clip", "to_clip", "from_time", "to_time", "fade_mode", "fade_beats", "use_filler_clip", "filler_clip", "hold_previous"), &AudioStreamInteractive::add_transition, DEFVAL(false), DEFVAL(-1), DEFVAL(false));
|
||||
|
||||
@@ -870,7 +870,10 @@ class Bus {
|
||||
* @returns {void}
|
||||
*/
|
||||
static move(fromIndex, toIndex) {
|
||||
const movedBus = GodotAudio.Bus.getBus(fromIndex);
|
||||
const movedBus = GodotAudio.Bus.getBusOrNull(fromIndex);
|
||||
if (movedBus == null) {
|
||||
return;
|
||||
}
|
||||
const buses = GodotAudio.buses.filter((_, i) => i !== fromIndex);
|
||||
// Inserts at index.
|
||||
buses.splice(toIndex - 1, 0, movedBus);
|
||||
@@ -1426,7 +1429,10 @@ const _GodotAudio = {
|
||||
* @returns {void}
|
||||
*/
|
||||
remove_sample_bus: function (index) {
|
||||
const bus = GodotAudio.Bus.getBus(index);
|
||||
const bus = GodotAudio.Bus.getBusOrNull(index);
|
||||
if (bus == null) {
|
||||
return;
|
||||
}
|
||||
bus.clear();
|
||||
},
|
||||
|
||||
@@ -1456,8 +1462,17 @@ const _GodotAudio = {
|
||||
* @returns {void}
|
||||
*/
|
||||
set_sample_bus_send: function (busIndex, sendIndex) {
|
||||
const bus = GodotAudio.Bus.getBus(busIndex);
|
||||
bus.setSend(GodotAudio.Bus.getBus(sendIndex));
|
||||
const bus = GodotAudio.Bus.getBusOrNull(busIndex);
|
||||
if (bus == null) {
|
||||
// Cannot send from an invalid bus.
|
||||
return;
|
||||
}
|
||||
let targetBus = GodotAudio.Bus.getBusOrNull(sendIndex);
|
||||
if (targetBus == null) {
|
||||
// Send to master.
|
||||
targetBus = GodotAudio.Bus.getBus(0);
|
||||
}
|
||||
bus.setSend(targetBus);
|
||||
},
|
||||
|
||||
/**
|
||||
@@ -1467,7 +1482,10 @@ const _GodotAudio = {
|
||||
* @returns {void}
|
||||
*/
|
||||
set_sample_bus_volume_db: function (busIndex, volumeDb) {
|
||||
const bus = GodotAudio.Bus.getBus(busIndex);
|
||||
const bus = GodotAudio.Bus.getBusOrNull(busIndex);
|
||||
if (bus == null) {
|
||||
return;
|
||||
}
|
||||
bus.setVolumeDb(volumeDb);
|
||||
},
|
||||
|
||||
@@ -1478,7 +1496,10 @@ const _GodotAudio = {
|
||||
* @returns {void}
|
||||
*/
|
||||
set_sample_bus_solo: function (busIndex, enable) {
|
||||
const bus = GodotAudio.Bus.getBus(busIndex);
|
||||
const bus = GodotAudio.Bus.getBusOrNull(busIndex);
|
||||
if (bus == null) {
|
||||
return;
|
||||
}
|
||||
bus.solo(enable);
|
||||
},
|
||||
|
||||
@@ -1489,7 +1510,10 @@ const _GodotAudio = {
|
||||
* @returns {void}
|
||||
*/
|
||||
set_sample_bus_mute: function (busIndex, enable) {
|
||||
const bus = GodotAudio.Bus.getBus(busIndex);
|
||||
const bus = GodotAudio.Bus.getBusOrNull(busIndex);
|
||||
if (bus == null) {
|
||||
return;
|
||||
}
|
||||
bus.mute(enable);
|
||||
},
|
||||
},
|
||||
|
||||
@@ -125,10 +125,8 @@ void AudioStreamPlaybackWAV::do_resample(const Depth *p_src, AudioFrame *p_dst,
|
||||
int16_t nibble, diff, step;
|
||||
|
||||
p_ima_adpcm[i].last_nibble++;
|
||||
const uint8_t *src_ptr = (const uint8_t *)base->data;
|
||||
src_ptr += AudioStreamWAV::DATA_PAD;
|
||||
|
||||
uint8_t nbb = src_ptr[(p_ima_adpcm[i].last_nibble >> 1) * (is_stereo ? 2 : 1) + i];
|
||||
uint8_t nbb = p_src[(p_ima_adpcm[i].last_nibble >> 1) * (is_stereo ? 2 : 1) + i];
|
||||
nibble = (p_ima_adpcm[i].last_nibble & 1) ? (nbb >> 4) : (nbb & 0xF);
|
||||
step = _ima_adpcm_step_table[p_ima_adpcm[i].step_index];
|
||||
|
||||
@@ -181,17 +179,16 @@ void AudioStreamPlaybackWAV::do_resample(const Depth *p_src, AudioFrame *p_dst,
|
||||
if (pos != p_qoa->cache_pos) { // Prevents triple decoding on lower mix rates.
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// Sign operations prevent triple decoding on backward loops, maxing prevents pop.
|
||||
uint32_t interp_pos = MIN(pos + (i * sign) + (sign < 0), p_qoa->desc->samples - 1);
|
||||
uint32_t interp_pos = MIN(pos + (i * sign) + (sign < 0), p_qoa->desc.samples - 1);
|
||||
uint32_t new_data_ofs = 8 + interp_pos / QOA_FRAME_LEN * p_qoa->frame_len;
|
||||
|
||||
if (p_qoa->data_ofs != new_data_ofs) {
|
||||
p_qoa->data_ofs = new_data_ofs;
|
||||
const uint8_t *src_ptr = (const uint8_t *)base->data;
|
||||
src_ptr += p_qoa->data_ofs + AudioStreamWAV::DATA_PAD;
|
||||
qoa_decode_frame(src_ptr, p_qoa->frame_len, p_qoa->desc, p_qoa->dec, &p_qoa->dec_len);
|
||||
const uint8_t *ofs_src = (uint8_t *)p_src + p_qoa->data_ofs;
|
||||
qoa_decode_frame(ofs_src, p_qoa->frame_len, &p_qoa->desc, p_qoa->dec.ptr(), &p_qoa->dec_len);
|
||||
}
|
||||
|
||||
uint32_t dec_idx = (interp_pos % QOA_FRAME_LEN) * p_qoa->desc->channels;
|
||||
uint32_t dec_idx = (interp_pos % QOA_FRAME_LEN) * p_qoa->desc.channels;
|
||||
|
||||
if ((sign > 0 && i == 0) || (sign < 0 && i == 1)) {
|
||||
final = p_qoa->dec[dec_idx];
|
||||
@@ -269,7 +266,7 @@ void AudioStreamPlaybackWAV::do_resample(const Depth *p_src, AudioFrame *p_dst,
|
||||
}
|
||||
|
||||
int AudioStreamPlaybackWAV::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
|
||||
if (!base->data || !active) {
|
||||
if (base->data.is_empty() || !active) {
|
||||
for (int i = 0; i < p_frames; i++) {
|
||||
p_buffer[i] = AudioFrame(0, 0);
|
||||
}
|
||||
@@ -288,7 +285,7 @@ int AudioStreamPlaybackWAV::mix(AudioFrame *p_buffer, float p_rate_scale, int p_
|
||||
len *= 2;
|
||||
break;
|
||||
case AudioStreamWAV::FORMAT_QOA:
|
||||
len = qoa.desc->samples * qoa.desc->channels;
|
||||
len = qoa.desc.samples * qoa.desc.channels;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -326,8 +323,7 @@ int AudioStreamPlaybackWAV::mix(AudioFrame *p_buffer, float p_rate_scale, int p_
|
||||
|
||||
/* audio data */
|
||||
|
||||
uint8_t *dataptr = (uint8_t *)base->data;
|
||||
const void *data = dataptr + AudioStreamWAV::DATA_PAD;
|
||||
const uint8_t *data = base->data.ptr() + AudioStreamWAV::DATA_PAD;
|
||||
AudioFrame *dst_buff = p_buffer;
|
||||
|
||||
if (format == AudioStreamWAV::FORMAT_IMA_ADPCM) {
|
||||
@@ -488,15 +484,7 @@ void AudioStreamPlaybackWAV::set_sample_playback(const Ref<AudioSamplePlayback>
|
||||
|
||||
AudioStreamPlaybackWAV::AudioStreamPlaybackWAV() {}
|
||||
|
||||
AudioStreamPlaybackWAV::~AudioStreamPlaybackWAV() {
|
||||
if (qoa.desc) {
|
||||
memfree(qoa.desc);
|
||||
}
|
||||
|
||||
if (qoa.dec) {
|
||||
memfree(qoa.dec);
|
||||
}
|
||||
}
|
||||
AudioStreamPlaybackWAV::~AudioStreamPlaybackWAV() {}
|
||||
|
||||
/////////////////////
|
||||
|
||||
@@ -562,9 +550,10 @@ double AudioStreamWAV::get_length() const {
|
||||
len *= 2;
|
||||
break;
|
||||
case AudioStreamWAV::FORMAT_QOA:
|
||||
qoa_desc desc = { 0, 0, 0, { { { 0 }, { 0 } } } };
|
||||
qoa_decode_header((uint8_t *)data + DATA_PAD, data_bytes, &desc);
|
||||
qoa_desc desc = {};
|
||||
qoa_decode_header(data.ptr() + DATA_PAD, data_bytes, &desc);
|
||||
len = desc.samples * desc.channels;
|
||||
break;
|
||||
}
|
||||
|
||||
if (stereo) {
|
||||
@@ -580,22 +569,16 @@ bool AudioStreamWAV::is_monophonic() const {
|
||||
|
||||
void AudioStreamWAV::set_data(const Vector<uint8_t> &p_data) {
|
||||
AudioServer::get_singleton()->lock();
|
||||
if (data) {
|
||||
memfree(data);
|
||||
data = nullptr;
|
||||
data_bytes = 0;
|
||||
}
|
||||
|
||||
int datalen = p_data.size();
|
||||
if (datalen) {
|
||||
const uint8_t *r = p_data.ptr();
|
||||
int alloc_len = datalen + DATA_PAD * 2;
|
||||
data = memalloc(alloc_len); //alloc with some padding for interpolation
|
||||
memset(data, 0, alloc_len);
|
||||
uint8_t *dataptr = (uint8_t *)data;
|
||||
memcpy(dataptr + DATA_PAD, r, datalen);
|
||||
data_bytes = datalen;
|
||||
}
|
||||
int src_data_len = p_data.size();
|
||||
|
||||
data.clear();
|
||||
|
||||
int alloc_len = src_data_len + DATA_PAD * 2;
|
||||
data.resize(alloc_len);
|
||||
memset(data.ptr(), 0, alloc_len);
|
||||
memcpy(data.ptr() + DATA_PAD, p_data.ptr(), src_data_len);
|
||||
data_bytes = src_data_len;
|
||||
|
||||
AudioServer::get_singleton()->unlock();
|
||||
}
|
||||
@@ -603,13 +586,9 @@ void AudioStreamWAV::set_data(const Vector<uint8_t> &p_data) {
|
||||
Vector<uint8_t> AudioStreamWAV::get_data() const {
|
||||
Vector<uint8_t> pv;
|
||||
|
||||
if (data) {
|
||||
if (data_bytes) {
|
||||
pv.resize(data_bytes);
|
||||
{
|
||||
uint8_t *w = pv.ptrw();
|
||||
uint8_t *dataptr = (uint8_t *)data;
|
||||
memcpy(w, dataptr + DATA_PAD, data_bytes);
|
||||
}
|
||||
memcpy(pv.ptrw(), data.ptr() + DATA_PAD, data_bytes);
|
||||
}
|
||||
|
||||
return pv;
|
||||
@@ -647,7 +626,7 @@ Error AudioStreamWAV::save_to_wav(const String &p_path) {
|
||||
}
|
||||
|
||||
String file_path = p_path;
|
||||
if (!(file_path.substr(file_path.length() - 4, 4) == ".wav")) {
|
||||
if (file_path.substr(file_path.length() - 4, 4).to_lower() != ".wav") {
|
||||
file_path += ".wav";
|
||||
}
|
||||
|
||||
@@ -701,13 +680,12 @@ Ref<AudioStreamPlayback> AudioStreamWAV::instantiate_playback() {
|
||||
sample->base = Ref<AudioStreamWAV>(this);
|
||||
|
||||
if (format == AudioStreamWAV::FORMAT_QOA) {
|
||||
sample->qoa.desc = (qoa_desc *)memalloc(sizeof(qoa_desc));
|
||||
uint32_t ffp = qoa_decode_header((uint8_t *)data + DATA_PAD, data_bytes, sample->qoa.desc);
|
||||
uint32_t ffp = qoa_decode_header(data.ptr() + DATA_PAD, data_bytes, &sample->qoa.desc);
|
||||
ERR_FAIL_COND_V(ffp != 8, Ref<AudioStreamPlaybackWAV>());
|
||||
sample->qoa.frame_len = qoa_max_frame_size(sample->qoa.desc);
|
||||
int samples_len = (sample->qoa.desc->samples > QOA_FRAME_LEN ? QOA_FRAME_LEN : sample->qoa.desc->samples);
|
||||
int alloc_len = sample->qoa.desc->channels * samples_len * sizeof(int16_t);
|
||||
sample->qoa.dec = (int16_t *)memalloc(alloc_len);
|
||||
sample->qoa.frame_len = qoa_max_frame_size(&sample->qoa.desc);
|
||||
int samples_len = (sample->qoa.desc.samples > QOA_FRAME_LEN ? QOA_FRAME_LEN : sample->qoa.desc.samples);
|
||||
int dec_len = sample->qoa.desc.channels * samples_len;
|
||||
sample->qoa.dec.resize(dec_len);
|
||||
}
|
||||
|
||||
return sample;
|
||||
@@ -789,10 +767,4 @@ void AudioStreamWAV::_bind_methods() {
|
||||
|
||||
AudioStreamWAV::AudioStreamWAV() {}
|
||||
|
||||
AudioStreamWAV::~AudioStreamWAV() {
|
||||
if (data) {
|
||||
memfree(data);
|
||||
data = nullptr;
|
||||
data_bytes = 0;
|
||||
}
|
||||
}
|
||||
AudioStreamWAV::~AudioStreamWAV() {}
|
||||
|
||||
@@ -61,10 +61,10 @@ class AudioStreamPlaybackWAV : public AudioStreamPlayback {
|
||||
} ima_adpcm[2];
|
||||
|
||||
struct QOA_State {
|
||||
qoa_desc *desc = nullptr;
|
||||
qoa_desc desc = {};
|
||||
uint32_t data_ofs = 0;
|
||||
uint32_t frame_len = 0;
|
||||
int16_t *dec = nullptr;
|
||||
LocalVector<int16_t> dec;
|
||||
uint32_t dec_len = 0;
|
||||
int64_t cache_pos = -1;
|
||||
int16_t cache[2] = { 0, 0 };
|
||||
@@ -139,7 +139,7 @@ private:
|
||||
int loop_begin = 0;
|
||||
int loop_end = 0;
|
||||
int mix_rate = 44100;
|
||||
void *data = nullptr;
|
||||
LocalVector<uint8_t> data;
|
||||
uint32_t data_bytes = 0;
|
||||
|
||||
protected:
|
||||
|
||||
@@ -288,6 +288,11 @@ void SMBPitchShift::smbFft(float *fftBuffer, long fftFrameSize, long sign)
|
||||
/* clang-format on */
|
||||
|
||||
void AudioEffectPitchShiftInstance::process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {
|
||||
// Avoid distortion by skipping processing if pitch_scale is 1.0.
|
||||
if (Math::is_equal_approx(base->pitch_scale, 1.0f)) {
|
||||
return;
|
||||
}
|
||||
|
||||
float sample_rate = AudioServer::get_singleton()->get_mix_rate();
|
||||
|
||||
float *in_l = (float *)p_src_frames;
|
||||
|
||||
@@ -373,10 +373,14 @@ void AudioServer::_mix_step() {
|
||||
bus->soloed = false;
|
||||
}
|
||||
}
|
||||
// This is legacy code from 3.x that allows video players and other audio sources that do not implement AudioStreamPlayback to output audio.
|
||||
for (CallbackItem *ci : mix_callback_list) {
|
||||
ci->callback(ci->userdata);
|
||||
}
|
||||
|
||||
// Main mixing loop for audio streams.
|
||||
// The basic idea here is to copy the samples returned by the AudioStreamPlayback's mix function into the audio buffers,
|
||||
// while always maintaining a lookahead buffer of size LOOKAHEAD_BUFFER_SIZE to allow fade-outs for sudden stoppages.
|
||||
for (AudioStreamPlaybackListNode *playback : playback_list) {
|
||||
// Paused streams are no-ops. Don't even mix audio from the stream playback.
|
||||
if (playback->state.load() == AudioStreamPlaybackListNode::PAUSED) {
|
||||
@@ -387,22 +391,26 @@ void AudioServer::_mix_step() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If `fading_out` is true, we're in the process of fading out the stream playback.
|
||||
// TODO: Currently this sets the volume of the stream to 0 which creates a linear interpolation between its previous volume and silence.
|
||||
// A more punchy option for fading out could be to just use the lookahead buffer.
|
||||
bool fading_out = playback->state.load() == AudioStreamPlaybackListNode::FADE_OUT_TO_DELETION || playback->state.load() == AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE;
|
||||
|
||||
AudioFrame *buf = mix_buffer.ptrw();
|
||||
|
||||
// Copy the lookeahead buffer into the mix buffer.
|
||||
// Copy the old contents of the lookahead buffer into the beginning of the mix buffer.
|
||||
for (int i = 0; i < LOOKAHEAD_BUFFER_SIZE; i++) {
|
||||
buf[i] = playback->lookahead[i];
|
||||
}
|
||||
|
||||
// Mix the audio stream
|
||||
// Mix the audio stream.
|
||||
unsigned int mixed_frames = playback->stream_playback->mix(&buf[LOOKAHEAD_BUFFER_SIZE], playback->pitch_scale.get(), buffer_size);
|
||||
|
||||
if (tag_used_audio_streams && playback->stream_playback->is_playing()) {
|
||||
playback->stream_playback->tag_used_streams();
|
||||
}
|
||||
|
||||
// Check to see if the stream has run out of samples.
|
||||
if (mixed_frames != buffer_size) {
|
||||
// We know we have at least the size of our lookahead buffer for fade-out purposes.
|
||||
|
||||
@@ -418,42 +426,52 @@ void AudioServer::_mix_step() {
|
||||
new_state = AudioStreamPlaybackListNode::AWAITING_DELETION;
|
||||
playback->state.store(new_state);
|
||||
} else {
|
||||
// Move the last little bit of what we just mixed into our lookahead buffer.
|
||||
// Move the last little bit of what we just mixed into our lookahead buffer for the next call to _mix_step.
|
||||
for (int i = 0; i < LOOKAHEAD_BUFFER_SIZE; i++) {
|
||||
playback->lookahead[i] = buf[buffer_size + i];
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamPlaybackBusDetails *ptr = playback->bus_details.load();
|
||||
ERR_FAIL_NULL(ptr);
|
||||
// By putting null into the bus details pointers, we're taking ownership of their memory for the duration of this mix.
|
||||
AudioStreamPlaybackBusDetails bus_details = *ptr;
|
||||
// Get the bus details for this playback. This contains information about which buses the playback is assigned to and the volume of the playback on each bus.
|
||||
AudioStreamPlaybackBusDetails *bus_details_ptr = playback->bus_details.load();
|
||||
ERR_FAIL_NULL(bus_details_ptr);
|
||||
// Make a copy of the bus details so we can modify it without worrying about other threads.
|
||||
AudioStreamPlaybackBusDetails bus_details = *bus_details_ptr;
|
||||
|
||||
// Mix to any active buses.
|
||||
for (int idx = 0; idx < MAX_BUSES_PER_PLAYBACK; idx++) {
|
||||
if (!bus_details.bus_active[idx]) {
|
||||
continue;
|
||||
}
|
||||
// This is the AudioServer-internal index of the bus we're mixing to in this step of the loop. Not to be confused with `idx` which is an index into `AudioStreamPlaybackBusDetails` member var arrays.
|
||||
int bus_idx = thread_find_bus_index(bus_details.bus[idx]);
|
||||
|
||||
// It's important to know whether or not this bus was active in the previous mix step of this stream. If it was, we need to perform volume interpolation to avoid pops.
|
||||
int prev_bus_idx = -1;
|
||||
for (int search_idx = 0; search_idx < MAX_BUSES_PER_PLAYBACK; search_idx++) {
|
||||
if (!playback->prev_bus_details->bus_active[search_idx]) {
|
||||
continue;
|
||||
}
|
||||
// If the StringNames of the buses match, we've found the previous bus index. This indicates that this playback mixed to `prev_bus_details->bus[prev_bus_index]` in the previous mix step, which gives us a way to look up the playback's previous volume.
|
||||
if (playback->prev_bus_details->bus[search_idx].hash() == bus_details.bus[idx].hash()) {
|
||||
prev_bus_idx = search_idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// It's now time to mix to the bus. We do this by going through each channel of the bus and mixing to it.
|
||||
// The channels correspond to output channels of the audio device, e.g. stereo or 5.1. To reduce needless nesting, this is done with a helper method named `_mix_step_for_channel`.
|
||||
for (int channel_idx = 0; channel_idx < channel_count; channel_idx++) {
|
||||
AudioFrame *channel_buf = thread_get_channel_mix_buffer(bus_idx, channel_idx);
|
||||
// TODO: This `fading_out` check could be replaced with with an exponential fadeout of the samples from the lookahead buffer for more punchy results.
|
||||
if (fading_out) {
|
||||
bus_details.volume[idx][channel_idx] = AudioFrame(0, 0);
|
||||
}
|
||||
AudioFrame channel_vol = bus_details.volume[idx][channel_idx];
|
||||
|
||||
AudioFrame prev_channel_vol = AudioFrame(0, 0);
|
||||
// If this bus was not active in the previous mix step, we want to start playback at the full volume to avoid crushing transients.
|
||||
AudioFrame prev_channel_vol = channel_vol;
|
||||
// If this bus was active in the previous mix step, we need to interpolate between the previous volume and the current volume to avoid pops. Set `prev_channel_volume` accordingly.
|
||||
if (prev_bus_idx != -1) {
|
||||
prev_channel_vol = playback->prev_bus_details->volume[prev_bus_idx][channel_idx];
|
||||
}
|
||||
@@ -482,7 +500,7 @@ void AudioServer::_mix_step() {
|
||||
for (int channel_idx = 0; channel_idx < channel_count; channel_idx++) {
|
||||
AudioFrame *channel_buf = thread_get_channel_mix_buffer(bus_idx, channel_idx);
|
||||
AudioFrame prev_channel_vol = playback->prev_bus_details->volume[idx][channel_idx];
|
||||
// Fade out to silence
|
||||
// Fade out to silence. This could be replaced with an exponential fadeout of the samples from the lookahead buffer for more punchy results.
|
||||
_mix_step_for_channel(channel_buf, buf, prev_channel_vol, AudioFrame(0, 0), playback->attenuation_filter_cutoff_hz.get(), playback->highshelf_gain.get(), &playback->filter_process[channel_idx * 2], &playback->filter_process[channel_idx * 2 + 1]);
|
||||
}
|
||||
}
|
||||
@@ -503,15 +521,12 @@ void AudioServer::_mix_step() {
|
||||
switch (playback->state.load()) {
|
||||
case AudioStreamPlaybackListNode::AWAITING_DELETION:
|
||||
case AudioStreamPlaybackListNode::FADE_OUT_TO_DELETION:
|
||||
// Remove the playback from the list.
|
||||
_delete_stream_playback_list_node(playback);
|
||||
break;
|
||||
case AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE: {
|
||||
// Pause the stream.
|
||||
AudioStreamPlaybackListNode::PlaybackState old_state, new_state;
|
||||
do {
|
||||
old_state = playback->state.load();
|
||||
new_state = AudioStreamPlaybackListNode::PAUSED;
|
||||
} while (!playback->state.compare_exchange_strong(/* expected= */ old_state, new_state));
|
||||
playback->state.store(AudioStreamPlaybackListNode::PAUSED);
|
||||
} break;
|
||||
case AudioStreamPlaybackListNode::PLAYING:
|
||||
case AudioStreamPlaybackListNode::PAUSED:
|
||||
@@ -520,13 +535,13 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
}
|
||||
|
||||
// Now that all of the buses have their audio sources mixed into them, we can process the effects and bus sends.
|
||||
for (int i = buses.size() - 1; i >= 0; i--) {
|
||||
//go bus by bus
|
||||
Bus *bus = buses[i];
|
||||
|
||||
for (int k = 0; k < bus->channels.size(); k++) {
|
||||
if (bus->channels[k].active && !bus->channels[k].used) {
|
||||
//buffer was not used, but it's still active, so it must be cleaned
|
||||
// Buffer was not used, but it's still active, so it must be cleaned.
|
||||
AudioFrame *buf = bus->channels.write[k].buffer.ptrw();
|
||||
|
||||
for (uint32_t j = 0; j < buffer_size; j++) {
|
||||
@@ -535,7 +550,7 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
}
|
||||
|
||||
//process effects
|
||||
// Process effects.
|
||||
if (!bus->bypass) {
|
||||
for (int j = 0; j < bus->effects.size(); j++) {
|
||||
if (!bus->effects[j].enabled) {
|
||||
@@ -553,7 +568,7 @@ void AudioServer::_mix_step() {
|
||||
bus->channels.write[k].effect_instances.write[j]->process(bus->channels[k].buffer.ptr(), temp_buffer.write[k].ptrw(), buffer_size);
|
||||
}
|
||||
|
||||
//swap buffers, so internal buffer always has the right data
|
||||
// Swap buffers, so internal buffer always has the right data.
|
||||
for (int k = 0; k < bus->channels.size(); k++) {
|
||||
if (!(buses[i]->channels[k].active || bus->channels[k].effect_instances[j]->process_silence())) {
|
||||
continue;
|
||||
@@ -567,17 +582,17 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
}
|
||||
|
||||
//process send
|
||||
// Process send.
|
||||
|
||||
Bus *send = nullptr;
|
||||
|
||||
if (i > 0) {
|
||||
//everything has a send save for master bus
|
||||
// Everything has a send except for the master bus.
|
||||
if (!bus_map.has(bus->send)) {
|
||||
send = buses[0];
|
||||
} else {
|
||||
send = bus_map[bus->send];
|
||||
if (send->index_cache >= bus->index_cache) { //invalid, send to master
|
||||
if (send->index_cache >= bus->index_cache) { // Invalid, send to master.
|
||||
send = buses[0];
|
||||
}
|
||||
}
|
||||
@@ -605,7 +620,7 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
}
|
||||
|
||||
//apply volume and compute peak
|
||||
// Apply volume and compute peak.
|
||||
for (uint32_t j = 0; j < buffer_size; j++) {
|
||||
buf[j] *= volume;
|
||||
|
||||
@@ -622,7 +637,7 @@ void AudioServer::_mix_step() {
|
||||
bus->channels.write[k].peak_volume = AudioFrame(Math::linear_to_db(peak.left + AUDIO_PEAK_OFFSET), Math::linear_to_db(peak.right + AUDIO_PEAK_OFFSET));
|
||||
|
||||
if (!bus->channels[k].used) {
|
||||
//see if any audio is contained, because channel was not used
|
||||
// See if any audio is contained, because channel was not used.
|
||||
|
||||
if (MAX(peak.right, peak.left) > Math::db_to_linear(channel_disable_threshold_db)) {
|
||||
bus->channels.write[k].last_mix_with_audio = mix_frames;
|
||||
@@ -633,7 +648,7 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
|
||||
if (send) {
|
||||
//if not master bus, send
|
||||
// If not master bus, send.
|
||||
AudioFrame *target_buf = thread_get_channel_mix_buffer(send->index_cache, k);
|
||||
|
||||
for (uint32_t j = 0; j < buffer_size; j++) {
|
||||
@@ -648,6 +663,7 @@ void AudioServer::_mix_step() {
|
||||
}
|
||||
|
||||
void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_source_buf, AudioFrame p_vol_start, AudioFrame p_vol_final, float p_attenuation_filter_cutoff_hz, float p_highshelf_gain, AudioFilterSW::Processor *p_processor_l, AudioFilterSW::Processor *p_processor_r) {
|
||||
// TODO: In the future it could be nice to replace all of these hardcoded effects with something a bit cleaner and more flexible, but for now this is what we do to support 3D audio players.
|
||||
if (p_highshelf_gain != 0) {
|
||||
AudioFilterSW filter;
|
||||
filter.set_mode(AudioFilterSW::HIGHSHELF);
|
||||
@@ -667,7 +683,7 @@ void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_sou
|
||||
p_processor_r->update_coeffs(buffer_size);
|
||||
|
||||
for (unsigned int frame_idx = 0; frame_idx < buffer_size; frame_idx++) {
|
||||
// Make this buffer size invariant if buffer_size ever becomes a project setting.
|
||||
// TODO: Make lerp speed buffer-size-invariant if buffer_size ever becomes a project setting to avoid very small buffer sizes causing pops due to too-fast lerps.
|
||||
float lerp_param = (float)frame_idx / buffer_size;
|
||||
AudioFrame vol = p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start;
|
||||
AudioFrame mixed = vol * p_source_buf[frame_idx];
|
||||
@@ -678,7 +694,7 @@ void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_sou
|
||||
|
||||
} else {
|
||||
for (unsigned int frame_idx = 0; frame_idx < buffer_size; frame_idx++) {
|
||||
// Make this buffer size invariant if buffer_size ever becomes a project setting.
|
||||
// TODO: Make lerp speed buffer-size-invariant if buffer_size ever becomes a project setting to avoid very small buffer sizes causing pops due to too-fast lerps.
|
||||
float lerp_param = (float)frame_idx / buffer_size;
|
||||
p_out_buf[frame_idx] += (p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start) * p_source_buf[frame_idx];
|
||||
}
|
||||
@@ -703,6 +719,7 @@ void AudioServer::_delete_stream_playback(Ref<AudioStreamPlayback> p_playback) {
|
||||
}
|
||||
|
||||
void AudioServer::_delete_stream_playback_list_node(AudioStreamPlaybackListNode *p_playback_node) {
|
||||
// Remove the playback from the list, registering a destructor to be run on the main thread.
|
||||
playback_list.erase(p_playback_node, [](AudioStreamPlaybackListNode *p) {
|
||||
delete p->prev_bus_details;
|
||||
delete p->bus_details.load();
|
||||
@@ -1467,7 +1484,9 @@ void AudioServer::init_channels_and_buffers() {
|
||||
void AudioServer::init() {
|
||||
channel_disable_threshold_db = GLOBAL_DEF_RST("audio/buses/channel_disable_threshold_db", -60.0);
|
||||
channel_disable_frames = float(GLOBAL_DEF_RST(PropertyInfo(Variant::FLOAT, "audio/buses/channel_disable_time", PROPERTY_HINT_RANGE, "0,5,0.01,or_greater"), 2.0)) * get_mix_rate();
|
||||
buffer_size = 512; //hardcoded for now
|
||||
// TODO: Buffer size is hardcoded for now. This would be really nice to have as a project setting because currently it limits audio latency to an absolute minimum of 11ms with default mix rate, but there's some additional work required to make that happen. See TODOs in `_mix_step_for_channel`.
|
||||
// When this becomes a project setting, it should be specified in milliseconds rather than raw sample count, because 512 samples at 192khz is shorter than it is at 48khz, for example.
|
||||
buffer_size = 512;
|
||||
|
||||
init_channels_and_buffers();
|
||||
|
||||
|
||||
@@ -272,6 +272,14 @@ private:
|
||||
};
|
||||
|
||||
struct AudioStreamPlaybackListNode {
|
||||
// The state machine for audio stream playbacks is as follows:
|
||||
// 1. The playback is created and added to the playback list in the playing state.
|
||||
// 2. The playback is (maybe) paused, and the state is set to FADE_OUT_TO_PAUSE.
|
||||
// 2.1. The playback is mixed after being paused, and the audio server thread atomically sets the state to PAUSED after performing a brief fade-out.
|
||||
// 3. The playback is (maybe) deleted, and the state is set to FADE_OUT_TO_DELETION.
|
||||
// 3.1. The playback is mixed after being deleted, and the audio server thread atomically sets the state to AWAITING_DELETION after performing a brief fade-out.
|
||||
// NOTE: The playback is not deallocated at this time because allocation and deallocation are not realtime-safe.
|
||||
// 4. The playback is removed and deallocated on the main thread using the SafeList maybe_cleanup method.
|
||||
enum PlaybackState {
|
||||
PAUSED = 0, // Paused. Keep this stream playback around though so it can be restarted.
|
||||
PLAYING = 1, // Playing. Fading may still be necessary if volume changes!
|
||||
|
||||
Reference in New Issue
Block a user