aboutsummaryrefslogtreecommitdiff
path: root/src/audio.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/audio.c')
-rw-r--r--src/audio.c647
1 files changed, 329 insertions, 318 deletions
diff --git a/src/audio.c b/src/audio.c
index d397b064..3ee1fe81 100644
--- a/src/audio.c
+++ b/src/audio.c
@@ -11,32 +11,40 @@
* - Manage raw audio context
*
* CONFIGURATION:
-*
+*
* #define AUDIO_STANDALONE
* Define to use the module as standalone library (independently of raylib).
* Required types and functions are defined in the same module.
*
+* #define USE_OPENAL_BACKEND
+* Use OpenAL Soft audio backend usage
+*
* #define SUPPORT_FILEFORMAT_WAV
* #define SUPPORT_FILEFORMAT_OGG
* #define SUPPORT_FILEFORMAT_XM
* #define SUPPORT_FILEFORMAT_MOD
* #define SUPPORT_FILEFORMAT_FLAC
-* Selected desired fileformats to be supported for loading. Some of those formats are
+* Selected desired fileformats to be supported for loading. Some of those formats are
* supported by default, to remove support, just comment unrequired #define in this module
*
-* LIMITATIONS:
+* LIMITATIONS (only OpenAL Soft):
* Only up to two channels supported: MONO and STEREO (for additional channels, use AL_EXT_MCFORMATS)
* Only the following sample sizes supported: 8bit PCM, 16bit PCM, 32-bit float PCM (using AL_EXT_FLOAT32)
*
* DEPENDENCIES:
-* OpenAL Soft - Audio device management (http://kcat.strangesoft.net/openal.html)
+* mini_al - Audio device/context management (https://github.com/dr-soft/mini_al)
* stb_vorbis - OGG audio files loading (http://www.nothings.org/stb_vorbis/)
* jar_xm - XM module file loading
* jar_mod - MOD audio file loading
* dr_flac - FLAC audio file loading
*
+* *OpenAL Soft - Audio device management, still used on HTML5 and OSX platforms
+*
* CONTRIBUTORS:
-* Joshua Reisenauer (github: @kd7tck):
+* David Reid (github: @mackron) (Nov. 2017):
+* - Complete port to mini_al library
+*
+* Joshua Reisenauer (github: @kd7tck) (2015)
* - XM audio module support (jar_xm)
* - MOD audio module support (jar_mod)
* - Mixing channels support
@@ -45,7 +53,7 @@
*
* LICENSE: zlib/libpng
*
-* Copyright (c) 2014-2017 Ramon Santamaria (@raysan5)
+* Copyright (c) 2014-2018 Ramon Santamaria (@raysan5)
*
* This software is provided "as-is", without any express or implied warranty. In no event
* will the authors be held liable for any damages arising from the use of this software.
@@ -64,16 +72,10 @@
*
**********************************************************************************************/
-// Default configuration flags (supported features)
-//-------------------------------------------------
-#define SUPPORT_FILEFORMAT_WAV
-#define SUPPORT_FILEFORMAT_OGG
-#define SUPPORT_FILEFORMAT_XM
-#define SUPPORT_FILEFORMAT_MOD
-//-------------------------------------------------
+#include "config.h"
-#ifndef USE_MINI_AL
-#define USE_MINI_AL 1 // Set to 1 to use mini_al; 0 to use OpenAL.
+#if !defined(USE_OPENAL_BACKEND)
+ #define USE_MINI_AL 1 // Set to 1 to use mini_al; 0 to use OpenAL.
#endif
#if defined(AUDIO_STANDALONE)
@@ -86,7 +88,7 @@
#include "external/mini_al.h" // Implemented in mini_al.c. Cannot implement this here because it conflicts with Win32 APIs such as CloseWindow(), etc.
-#if !defined(USE_MINI_AL) || USE_MINI_AL == 0
+#if !defined(USE_MINI_AL) || (USE_MINI_AL == 0)
#if defined(__APPLE__)
#include "OpenAL/al.h" // OpenAL basic header
#include "OpenAL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
@@ -153,7 +155,12 @@
// Types and Structures Definition
//----------------------------------------------------------------------------------
-typedef enum { MUSIC_AUDIO_OGG = 0, MUSIC_AUDIO_FLAC, MUSIC_MODULE_XM, MUSIC_MODULE_MOD } MusicContextType;
+typedef enum {
+ MUSIC_AUDIO_OGG = 0,
+ MUSIC_AUDIO_FLAC,
+ MUSIC_MODULE_XM,
+ MUSIC_MODULE_MOD
+} MusicContextType;
// Music type (file streaming from memory)
typedef struct MusicData {
@@ -179,7 +186,13 @@ typedef struct MusicData {
} MusicData;
#if defined(AUDIO_STANDALONE)
-typedef enum { LOG_INFO = 0, LOG_ERROR, LOG_WARNING, LOG_DEBUG, LOG_OTHER } TraceLogType;
+typedef enum {
+ LOG_INFO = 0,
+ LOG_ERROR,
+ LOG_WARNING,
+ LOG_DEBUG,
+ LOG_OTHER
+} TraceLogType;
#endif
//----------------------------------------------------------------------------------
@@ -206,105 +219,78 @@ void TraceLog(int msgType, const char *text, ...); // Show trace lo
#endif
//----------------------------------------------------------------------------------
-// Module Functions Definition - Audio Device initialization and Closing
+// mini_al AudioBuffer Functionality
//----------------------------------------------------------------------------------
#if USE_MINI_AL
+
#define DEVICE_FORMAT mal_format_f32
#define DEVICE_CHANNELS 2
#define DEVICE_SAMPLE_RATE 44100
typedef enum { AUDIO_BUFFER_USAGE_STATIC = 0, AUDIO_BUFFER_USAGE_STREAM } AudioBufferUsage;
+// Audio buffer structure
+// NOTE: Slightly different logic is used when feeding data to the playback device depending on whether or not data is streamed
typedef struct AudioBuffer AudioBuffer;
-struct AudioBuffer
-{
- mal_dsp dsp; // For format conversion.
+struct AudioBuffer {
+ mal_dsp dsp; // Required for format conversion
float volume;
float pitch;
bool playing;
bool paused;
- bool looping; // Always true for AudioStreams.
- AudioBufferUsage usage; // Slightly different logic is used when feeding data to the playback device depending on whether or not data is streamed.
+ bool looping; // Always true for AudioStreams
+ int usage; // AudioBufferUsage type
bool isSubBufferProcessed[2];
unsigned int frameCursorPos;
unsigned int bufferSizeInFrames;
- AudioBuffer* next;
- AudioBuffer* prev;
+ AudioBuffer *next;
+ AudioBuffer *prev;
unsigned char buffer[1];
};
-void StopAudioBuffer(AudioBuffer* audioBuffer);
-
-
+// mini_al global variables
static mal_context context;
static mal_device device;
-static mal_bool32 isAudioInitialized = MAL_FALSE;
-static float masterVolume = 1;
static mal_mutex audioLock;
-static AudioBuffer* firstAudioBuffer = NULL; // Audio buffers are tracked in a linked list.
-static AudioBuffer* lastAudioBuffer = NULL;
-
-static void TrackAudioBuffer(AudioBuffer* audioBuffer)
-{
- mal_mutex_lock(&audioLock);
- {
- if (firstAudioBuffer == NULL) {
- firstAudioBuffer = audioBuffer;
- } else {
- lastAudioBuffer->next = audioBuffer;
- audioBuffer->prev = lastAudioBuffer;
- }
-
- lastAudioBuffer = audioBuffer;
- }
- mal_mutex_unlock(&audioLock);
-}
-
-static void UntrackAudioBuffer(AudioBuffer* audioBuffer)
-{
- mal_mutex_lock(&audioLock);
- {
- if (audioBuffer->prev == NULL) {
- firstAudioBuffer = audioBuffer->next;
- } else {
- audioBuffer->prev->next = audioBuffer->next;
- }
-
- if (audioBuffer->next == NULL) {
- lastAudioBuffer = audioBuffer->prev;
- } else {
- audioBuffer->next->prev = audioBuffer->prev;
- }
-
- audioBuffer->prev = NULL;
- audioBuffer->next = NULL;
- }
- mal_mutex_unlock(&audioLock);
-}
-
-static void OnLog_MAL(mal_context* pContext, mal_device* pDevice, const char* message)
+static bool isAudioInitialized = MAL_FALSE;
+static float masterVolume = 1.0f;
+
+// Audio buffers are tracked in a linked list
+static AudioBuffer *firstAudioBuffer = NULL;
+static AudioBuffer *lastAudioBuffer = NULL;
+
+// mini_al functions declaration
+static void OnLog(mal_context *pContext, mal_device *pDevice, const char *message);
+static mal_uint32 OnSendAudioDataToDevice(mal_device *pDevice, mal_uint32 frameCount, void *pFramesOut);
+static mal_uint32 OnAudioBufferDSPRead(mal_dsp *pDSP, mal_uint32 frameCount, void *pFramesOut, void *pUserData);
+static void MixAudioFrames(float *framesOut, const float *framesIn, mal_uint32 frameCount, float localVolume);
+
+// AudioBuffer management functions declaration
+// NOTE: Those functions are not exposed by raylib... for the moment
+AudioBuffer *CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint32 sampleRate, mal_uint32 bufferSizeInFrames, AudioBufferUsage usage);
+void DeleteAudioBuffer(AudioBuffer *audioBuffer);
+bool IsAudioBufferPlaying(AudioBuffer *audioBuffer);
+void PlayAudioBuffer(AudioBuffer *audioBuffer);
+void StopAudioBuffer(AudioBuffer *audioBuffer);
+void PauseAudioBuffer(AudioBuffer *audioBuffer);
+void ResumeAudioBuffer(AudioBuffer *audioBuffer);
+void SetAudioBufferVolume(AudioBuffer *audioBuffer, float volume);
+void SetAudioBufferPitch(AudioBuffer *audioBuffer, float pitch);
+void TrackAudioBuffer(AudioBuffer *audioBuffer);
+void UntrackAudioBuffer(AudioBuffer *audioBuffer);
+
+
+// Log callback function
+static void OnLog(mal_context *pContext, mal_device *pDevice, const char *message)
{
(void)pContext;
(void)pDevice;
- TraceLog(LOG_ERROR, message); // All log messages from mini_al are errors.
-}
-
-// This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation.
-//
-// framesOut is both an input and an output. It will be initially filled with zeros outside of this function.
-static void MixFrames(float* framesOut, const float* framesIn, mal_uint32 frameCount, float localVolume)
-{
- for (mal_uint32 iFrame = 0; iFrame < frameCount; ++iFrame) {
- for (mal_uint32 iChannel = 0; iChannel < device.channels; ++iChannel) {
- float* frameOut = framesOut + (iFrame * device.channels);
- const float* frameIn = framesIn + (iFrame * device.channels);
-
- frameOut[iChannel] += frameIn[iChannel] * masterVolume * localVolume;
- }
- }
+
+ TraceLog(LOG_ERROR, message); // All log messages from mini_al are errors
}
-static mal_uint32 OnSendAudioDataToDevice(mal_device* pDevice, mal_uint32 frameCount, void* pFramesOut)
+// Sending audio data to device callback function
+static mal_uint32 OnSendAudioDataToDevice(mal_device *pDevice, mal_uint32 frameCount, void *pFramesOut)
{
// This is where all of the mixing takes place.
(void)pDevice;
@@ -316,80 +302,194 @@ static mal_uint32 OnSendAudioDataToDevice(mal_device* pDevice, mal_uint32 frameC
// want to consider how you might want to avoid this.
mal_mutex_lock(&audioLock);
{
- for (AudioBuffer* audioBuffer = firstAudioBuffer; audioBuffer != NULL; audioBuffer = audioBuffer->next)
+ for (AudioBuffer *audioBuffer = firstAudioBuffer; audioBuffer != NULL; audioBuffer = audioBuffer->next)
{
// Ignore stopped or paused sounds.
- if (!audioBuffer->playing || audioBuffer->paused) {
- continue;
- }
+ if (!audioBuffer->playing || audioBuffer->paused) continue;
mal_uint32 framesRead = 0;
- for (;;) {
- if (framesRead > frameCount) {
+ for (;;)
+ {
+ if (framesRead > frameCount)
+ {
TraceLog(LOG_DEBUG, "Mixed too many frames from audio buffer");
break;
}
- if (framesRead == frameCount) {
- break;
- }
+
+ if (framesRead == frameCount) break;
// Just read as much data as we can from the stream.
mal_uint32 framesToRead = (frameCount - framesRead);
- while (framesToRead > 0) {
+ while (framesToRead > 0)
+ {
float tempBuffer[1024]; // 512 frames for stereo.
mal_uint32 framesToReadRightNow = framesToRead;
- if (framesToReadRightNow > sizeof(tempBuffer)/sizeof(tempBuffer[0])/DEVICE_CHANNELS) {
+ if (framesToReadRightNow > sizeof(tempBuffer)/sizeof(tempBuffer[0])/DEVICE_CHANNELS)
+ {
framesToReadRightNow = sizeof(tempBuffer)/sizeof(tempBuffer[0])/DEVICE_CHANNELS;
}
// If we're not looping, we need to make sure we flush the internal buffers of the DSP pipeline to ensure we get the
// last few samples.
- mal_bool32 flushDSP = !audioBuffer->looping;
+ bool flushDSP = !audioBuffer->looping;
mal_uint32 framesJustRead = mal_dsp_read_frames_ex(&audioBuffer->dsp, framesToReadRightNow, tempBuffer, flushDSP);
- if (framesJustRead > 0) {
- float* framesOut = (float*)pFramesOut + (framesRead * device.channels);
- float* framesIn = tempBuffer;
- MixFrames(framesOut, framesIn, framesJustRead, audioBuffer->volume);
+ if (framesJustRead > 0)
+ {
+ float *framesOut = (float *)pFramesOut + (framesRead*device.channels);
+ float *framesIn = tempBuffer;
+ MixAudioFrames(framesOut, framesIn, framesJustRead, audioBuffer->volume);
framesToRead -= framesJustRead;
framesRead += framesJustRead;
}
// If we weren't able to read all the frames we requested, break.
- if (framesJustRead < framesToReadRightNow) {
- if (!audioBuffer->looping) {
+ if (framesJustRead < framesToReadRightNow)
+ {
+ if (!audioBuffer->looping)
+ {
StopAudioBuffer(audioBuffer);
break;
- } else {
- // Should never get here, but just for safety, move the cursor position back to the start and continue the loop.
+ }
+ else
+ {
+ // Should never get here, but just for safety,
+ // move the cursor position back to the start and continue the loop.
audioBuffer->frameCursorPos = 0;
continue;
}
}
}
- // If for some reason we weren't able to read every frame we'll need to break from the loop. Not doing this could
- // theoretically put us into an infinite loop.
- if (framesToRead > 0) {
- break;
- }
+ // If for some reason we weren't able to read every frame we'll need to break from the loop.
+ // Not doing this could theoretically put us into an infinite loop.
+ if (framesToRead > 0) break;
}
}
}
+
mal_mutex_unlock(&audioLock);
return frameCount; // We always output the same number of frames that were originally requested.
}
+
+// DSP read from audio buffer callback function
+static mal_uint32 OnAudioBufferDSPRead(mal_dsp *pDSP, mal_uint32 frameCount, void *pFramesOut, void *pUserData)
+{
+ AudioBuffer *audioBuffer = (AudioBuffer *)pUserData;
+
+ mal_uint32 subBufferSizeInFrames = audioBuffer->bufferSizeInFrames/2;
+ mal_uint32 currentSubBufferIndex = audioBuffer->frameCursorPos/subBufferSizeInFrames;
+
+ if (currentSubBufferIndex > 1)
+ {
+ TraceLog(LOG_DEBUG, "Frame cursor position moved too far forward in audio stream");
+ return 0;
+ }
+
+ // Another thread can update the processed state of buffers so we just take a copy here to try and avoid potential synchronization problems.
+ bool isSubBufferProcessed[2];
+ isSubBufferProcessed[0] = audioBuffer->isSubBufferProcessed[0];
+ isSubBufferProcessed[1] = audioBuffer->isSubBufferProcessed[1];
+
+ mal_uint32 frameSizeInBytes = mal_get_sample_size_in_bytes(audioBuffer->dsp.config.formatIn)*audioBuffer->dsp.config.channelsIn;
+
+ // Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0.
+ mal_uint32 framesRead = 0;
+ for (;;)
+ {
+ // We break from this loop differently depending on the buffer's usage. For static buffers, we simply fill as much data as we can. For
+ // streaming buffers we only fill the halves of the buffer that are processed. Unprocessed halves must keep their audio data in-tact.
+ if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC)
+ {
+ if (framesRead >= frameCount) break;
+ }
+ else
+ {
+ if (isSubBufferProcessed[currentSubBufferIndex]) break;
+ }
+
+ mal_uint32 totalFramesRemaining = (frameCount - framesRead);
+ if (totalFramesRemaining == 0) break;
+
+ mal_uint32 framesRemainingInOutputBuffer;
+ if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC)
+ {
+ framesRemainingInOutputBuffer = audioBuffer->bufferSizeInFrames - audioBuffer->frameCursorPos;
+ }
+ else
+ {
+ mal_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames * currentSubBufferIndex;
+ framesRemainingInOutputBuffer = subBufferSizeInFrames - (audioBuffer->frameCursorPos - firstFrameIndexOfThisSubBuffer);
+ }
+
+ mal_uint32 framesToRead = totalFramesRemaining;
+ if (framesToRead > framesRemainingInOutputBuffer) framesToRead = framesRemainingInOutputBuffer;
+
+ memcpy((unsigned char *)pFramesOut + (framesRead*frameSizeInBytes), audioBuffer->buffer + (audioBuffer->frameCursorPos*frameSizeInBytes), framesToRead*frameSizeInBytes);
+ audioBuffer->frameCursorPos = (audioBuffer->frameCursorPos + framesToRead) % audioBuffer->bufferSizeInFrames;
+ framesRead += framesToRead;
+
+ // If we've read to the end of the buffer, mark it as processed.
+ if (framesToRead == framesRemainingInOutputBuffer)
+ {
+ audioBuffer->isSubBufferProcessed[currentSubBufferIndex] = true;
+ isSubBufferProcessed[currentSubBufferIndex] = true;
+
+ currentSubBufferIndex = (currentSubBufferIndex + 1)%2;
+
+ // We need to break from this loop if we're not looping.
+ if (!audioBuffer->looping)
+ {
+ StopAudioBuffer(audioBuffer);
+ break;
+ }
+ }
+ }
+
+ // Zero-fill excess.
+ mal_uint32 totalFramesRemaining = (frameCount - framesRead);
+ if (totalFramesRemaining > 0)
+ {
+ memset((unsigned char*)pFramesOut + (framesRead*frameSizeInBytes), 0, totalFramesRemaining*frameSizeInBytes);
+
+ // For static buffers we can fill the remaining frames with silence for safety, but we don't want
+ // to report those frames as "read". The reason for this is that the caller uses the return value
+ // to know whether or not a non-looping sound has finished playback.
+ if (audioBuffer->usage != AUDIO_BUFFER_USAGE_STATIC) framesRead += totalFramesRemaining;
+ }
+
+ return framesRead;
+}
+
+// This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation.
+// NOTE: framesOut is both an input and an output. It will be initially filled with zeros outside of this function.
+static void MixAudioFrames(float *framesOut, const float *framesIn, mal_uint32 frameCount, float localVolume)
+{
+ for (mal_uint32 iFrame = 0; iFrame < frameCount; ++iFrame)
+ {
+ for (mal_uint32 iChannel = 0; iChannel < device.channels; ++iChannel)
+ {
+ float *frameOut = framesOut + (iFrame*device.channels);
+ const float *frameIn = framesIn + (iFrame*device.channels);
+
+ frameOut[iChannel] += frameIn[iChannel]*masterVolume*localVolume;
+ }
+ }
+}
#endif
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Audio Device initialization and Closing
+//----------------------------------------------------------------------------------
// Initialize audio device
void InitAudioDevice(void)
{
#if USE_MINI_AL
// Context.
- mal_context_config contextConfig = mal_context_config_init(OnLog_MAL);
+ mal_context_config contextConfig = mal_context_config_init(OnLog);
mal_result result = mal_context_init(NULL, 0, &contextConfig, &context);
if (result != MAL_SUCCESS)
{
@@ -400,12 +500,6 @@ void InitAudioDevice(void)
// Device. Using the default device. Format is floating point because it simplifies mixing.
mal_device_config deviceConfig = mal_device_config_init(DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, NULL, OnSendAudioDataToDevice);
- // Special case for PLATFORM_RPI.
-//#if defined(PLATFORM_RPI)
-// deviceConfig.alsa.noMMap = MAL_TRUE;
-// deviceConfig.bufferSizeInFrames = 2048;
-//#endif
-
result = mal_device_init(&context, mal_device_type_playback, NULL, &deviceConfig, NULL, &device);
if (result != MAL_SUCCESS)
{
@@ -471,11 +565,8 @@ void InitAudioDevice(void)
alListenerf(AL_GAIN, 1.0f);
- if (alIsExtensionPresent("AL_EXT_float32")) {
- TraceLog(LOG_INFO, "AL_EXT_float32 supported");
- } else {
- TraceLog(LOG_INFO, "AL_EXT_float32 not supported");
- }
+ if (alIsExtensionPresent("AL_EXT_float32")) TraceLog(LOG_INFO, "[EXTENSION] AL_EXT_float32 supported");
+ else TraceLog(LOG_INFO, "[EXTENSION] AL_EXT_float32 not supported");
}
}
#endif
@@ -485,7 +576,8 @@ void InitAudioDevice(void)
void CloseAudioDevice(void)
{
#if USE_MINI_AL
- if (!isAudioInitialized) {
+ if (!isAudioInitialized)
+ {
TraceLog(LOG_WARNING, "Could not close audio device because it is not currently initialized");
return;
}
@@ -535,110 +627,20 @@ void SetMasterVolume(float volume)
else if (volume > 1.0f) volume = 1.0f;
#if USE_MINI_AL
- masterVolume = 1;
+ masterVolume = volume;
#else
alListenerf(AL_GAIN, volume);
#endif
}
-
//----------------------------------------------------------------------------------
-// Audio Buffer
+// Module Functions Definition - Audio Buffer management
//----------------------------------------------------------------------------------
#if USE_MINI_AL
-static mal_uint32 AudioBuffer_OnDSPRead(mal_dsp* pDSP, mal_uint32 frameCount, void* pFramesOut, void* pUserData)
+// Create a new audio buffer. Initially filled with silence
+AudioBuffer *CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint32 sampleRate, mal_uint32 bufferSizeInFrames, AudioBufferUsage usage)
{
- AudioBuffer* audioBuffer = (AudioBuffer*)pUserData;
-
- mal_uint32 subBufferSizeInFrames = audioBuffer->bufferSizeInFrames / 2;
- mal_uint32 currentSubBufferIndex = audioBuffer->frameCursorPos / subBufferSizeInFrames;
- if (currentSubBufferIndex > 1) {
- TraceLog(LOG_DEBUG, "Frame cursor position moved too far forward in audio stream");
- return 0;
- }
-
- // Another thread can update the processed state of buffers so we just take a copy here to try and avoid potential synchronization problems.
- bool isSubBufferProcessed[2];
- isSubBufferProcessed[0] = audioBuffer->isSubBufferProcessed[0];
- isSubBufferProcessed[1] = audioBuffer->isSubBufferProcessed[1];
-
- mal_uint32 frameSizeInBytes = mal_get_sample_size_in_bytes(audioBuffer->dsp.config.formatIn) * audioBuffer->dsp.config.channelsIn;
-
- // Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0.
- mal_uint32 framesRead = 0;
- for (;;)
- {
- // We break from this loop differently depending on the buffer's usage. For static buffers, we simply fill as much data as we can. For
- // streaming buffers we only fill the halves of the buffer that are processed. Unprocessed halves must keep their audio data in-tact.
- if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) {
- if (framesRead >= frameCount) {
- break;
- }
- } else {
- if (isSubBufferProcessed[currentSubBufferIndex]) {
- break;
- }
- }
-
- mal_uint32 totalFramesRemaining = (frameCount - framesRead);
- if (totalFramesRemaining == 0) {
- break;
- }
-
- mal_uint32 framesRemainingInOutputBuffer;
- if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) {
- framesRemainingInOutputBuffer = audioBuffer->bufferSizeInFrames - audioBuffer->frameCursorPos;
- } else {
- mal_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames * currentSubBufferIndex;
- framesRemainingInOutputBuffer = subBufferSizeInFrames - (audioBuffer->frameCursorPos - firstFrameIndexOfThisSubBuffer);
- }
-
-
-
- mal_uint32 framesToRead = totalFramesRemaining;
- if (framesToRead > framesRemainingInOutputBuffer) {
- framesToRead = framesRemainingInOutputBuffer;
- }
-
- memcpy((unsigned char*)pFramesOut + (framesRead*frameSizeInBytes), audioBuffer->buffer + (audioBuffer->frameCursorPos*frameSizeInBytes), framesToRead*frameSizeInBytes);
- audioBuffer->frameCursorPos = (audioBuffer->frameCursorPos + framesToRead) % audioBuffer->bufferSizeInFrames;
- framesRead += framesToRead;
-
- // If we've read to the end of the buffer, mark it as processed.
- if (framesToRead == framesRemainingInOutputBuffer) {
- audioBuffer->isSubBufferProcessed[currentSubBufferIndex] = true;
- isSubBufferProcessed[currentSubBufferIndex] = true;
-
- currentSubBufferIndex = (currentSubBufferIndex + 1) % 2;
-
- // We need to break from this loop if we're not looping.
- if (!audioBuffer->looping) {
- StopAudioBuffer(audioBuffer);
- break;
- }
- }
- }
-
- // Zero-fill excess.
- mal_uint32 totalFramesRemaining = (frameCount - framesRead);
- if (totalFramesRemaining > 0) {
- memset((unsigned char*)pFramesOut + (framesRead*frameSizeInBytes), 0, totalFramesRemaining*frameSizeInBytes);
-
- // For static buffers we can fill the remaining frames with silence for safety, but we don't want
- // to report those frames as "read". The reason for this is that the caller uses the return value
- // to know whether or not a non-looping sound has finished playback.
- if (audioBuffer->usage != AUDIO_BUFFER_USAGE_STATIC) {
- framesRead += totalFramesRemaining;
- }
- }
-
- return framesRead;
-}
-
-// Create a new audio buffer. Initially filled with silence.
-AudioBuffer* CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint32 sampleRate, mal_uint32 bufferSizeInFrames, AudioBufferUsage usage)
-{
- AudioBuffer* audioBuffer = (AudioBuffer*)calloc(sizeof(*audioBuffer) + (bufferSizeInFrames*channels*mal_get_sample_size_in_bytes(format)), 1);
+ AudioBuffer *audioBuffer = (AudioBuffer *)calloc(sizeof(*audioBuffer) + (bufferSizeInFrames*channels*mal_get_sample_size_in_bytes(format)), 1);
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "CreateAudioBuffer() : Failed to allocate memory for audio buffer");
@@ -654,8 +656,9 @@ AudioBuffer* CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint3
dspConfig.channelsOut = DEVICE_CHANNELS;
dspConfig.sampleRateIn = sampleRate;
dspConfig.sampleRateOut = DEVICE_SAMPLE_RATE;
- mal_result resultMAL = mal_dsp_init(&dspConfig, AudioBuffer_OnDSPRead, audioBuffer, &audioBuffer->dsp);
- if (resultMAL != MAL_SUCCESS) {
+ mal_result resultMAL = mal_dsp_init(&dspConfig, OnAudioBufferDSPRead, audioBuffer, &audioBuffer->dsp);
+ if (resultMAL != MAL_SUCCESS)
+ {
TraceLog(LOG_ERROR, "LoadSoundFromWave() : Failed to create data conversion pipeline");
free(audioBuffer);
return NULL;
@@ -679,8 +682,8 @@ AudioBuffer* CreateAudioBuffer(mal_format format, mal_uint32 channels, mal_uint3
return audioBuffer;
}
-// Delete an audio buffer.
-void DeleteAudioBuffer(AudioBuffer* audioBuffer)
+// Delete an audio buffer
+void DeleteAudioBuffer(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -692,8 +695,8 @@ void DeleteAudioBuffer(AudioBuffer* audioBuffer)
free(audioBuffer);
}
-// Check if an audio buffer is playing.
-bool IsAudioBufferPlaying(AudioBuffer* audioBuffer)
+// Check if an audio buffer is playing
+bool IsAudioBufferPlaying(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -704,11 +707,10 @@ bool IsAudioBufferPlaying(AudioBuffer* audioBuffer)
return audioBuffer->playing && !audioBuffer->paused;
}
-// Play an audio buffer.
-//
-// This will restart the buffer from the start. Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position
-// should be maintained.
-void PlayAudioBuffer(AudioBuffer* audioBuffer)
+// Play an audio buffer
+// NOTE: Buffer is restarted to the start.
+// Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position should be maintained.
+void PlayAudioBuffer(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -721,8 +723,8 @@ void PlayAudioBuffer(AudioBuffer* audioBuffer)
audioBuffer->frameCursorPos = 0;
}
-// Stop an audio buffer.
-void StopAudioBuffer(AudioBuffer* audioBuffer)
+// Stop an audio buffer
+void StopAudioBuffer(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -731,10 +733,7 @@ void StopAudioBuffer(AudioBuffer* audioBuffer)
}
// Don't do anything if the audio buffer is already stopped.
- if (!IsAudioBufferPlaying(audioBuffer))
- {
- return;
- }
+ if (!IsAudioBufferPlaying(audioBuffer)) return;
audioBuffer->playing = false;
audioBuffer->paused = false;
@@ -743,8 +742,8 @@ void StopAudioBuffer(AudioBuffer* audioBuffer)
audioBuffer->isSubBufferProcessed[1] = true;
}
-// Pause an audio buffer.
-void PauseAudioBuffer(AudioBuffer* audioBuffer)
+// Pause an audio buffer
+void PauseAudioBuffer(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -755,8 +754,8 @@ void PauseAudioBuffer(AudioBuffer* audioBuffer)
audioBuffer->paused = true;
}
-// Resume an audio buffer.
-void ResumeAudioBuffer(AudioBuffer* audioBuffer)
+// Resume an audio buffer
+void ResumeAudioBuffer(AudioBuffer *audioBuffer)
{
if (audioBuffer == NULL)
{
@@ -767,8 +766,8 @@ void ResumeAudioBuffer(AudioBuffer* audioBuffer)
audioBuffer->paused = false;
}
-// Set volume for an audio buffer.
-void SetAudioBufferVolume(AudioBuffer* audioBuffer, float volume)
+// Set volume for an audio buffer
+void SetAudioBufferVolume(AudioBuffer *audioBuffer, float volume)
{
if (audioBuffer == NULL)
{
@@ -779,8 +778,8 @@ void SetAudioBufferVolume(AudioBuffer* audioBuffer, float volume)
audioBuffer->volume = volume;
}
-// Set pitch for an audio buffer.
-void SetAudioBufferPitch(AudioBuffer* audioBuffer, float pitch)
+// Set pitch for an audio buffer
+void SetAudioBufferPitch(AudioBuffer *audioBuffer, float pitch)
{
if (audioBuffer == NULL)
{
@@ -795,6 +794,44 @@ void SetAudioBufferPitch(AudioBuffer* audioBuffer, float pitch)
mal_uint32 newOutputSampleRate = (mal_uint32)((((float)audioBuffer->dsp.config.sampleRateOut / (float)audioBuffer->dsp.config.sampleRateIn) / pitch) * audioBuffer->dsp.config.sampleRateIn);
mal_dsp_set_output_sample_rate(&audioBuffer->dsp, newOutputSampleRate);
}
+
+// Track audio buffer to linked list next position
+void TrackAudioBuffer(AudioBuffer *audioBuffer)
+{
+ mal_mutex_lock(&audioLock);
+
+ {
+ if (firstAudioBuffer == NULL) firstAudioBuffer = audioBuffer;
+ else
+ {
+ lastAudioBuffer->next = audioBuffer;
+ audioBuffer->prev = lastAudioBuffer;
+ }
+
+ lastAudioBuffer = audioBuffer;
+ }
+
+ mal_mutex_unlock(&audioLock);
+}
+
+// Untrack audio buffer from linked list
+void UntrackAudioBuffer(AudioBuffer *audioBuffer)
+{
+ mal_mutex_lock(&audioLock);
+
+ {
+ if (audioBuffer->prev == NULL) firstAudioBuffer = audioBuffer->next;
+ else audioBuffer->prev->next = audioBuffer->next;
+
+ if (audioBuffer->next == NULL) lastAudioBuffer = audioBuffer->prev;
+ else audioBuffer->next->prev = audioBuffer->prev;
+
+ audioBuffer->prev = NULL;
+ audioBuffer->next = NULL;
+ }
+
+ mal_mutex_unlock(&audioLock);
+}
#endif
//----------------------------------------------------------------------------------
@@ -813,19 +850,6 @@ Wave LoadWave(const char *fileName)
#if defined(SUPPORT_FILEFORMAT_FLAC)
else if (IsFileExtension(fileName, ".flac")) wave = LoadFLAC(fileName);
#endif
-#if !defined(AUDIO_STANDALONE)
- else if (IsFileExtension(fileName, ".rres"))
- {
- RRES rres = LoadResource(fileName, 0);
-
- // NOTE: Parameters for RRES_TYPE_WAVE are: sampleCount, sampleRate, sampleSize, channels
-
- if (rres[0].type == RRES_TYPE_WAVE) wave = LoadWaveEx(rres[0].data, rres[0].param1, rres[0].param2, rres[0].param3, rres[0].param4);
- else TraceLog(LOG_WARNING, "[%s] Resource file does not contain wave data", fileName);
-
- UnloadResource(rres);
- }
-#endif
else TraceLog(LOG_WARNING, "[%s] Audio fileformat not supported, it can't be loaded", fileName);
return wave;
@@ -884,23 +908,13 @@ Sound LoadSoundFromWave(Wave wave)
mal_uint32 frameCountIn = wave.sampleCount; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
mal_uint32 frameCount = mal_convert_frames(NULL, DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, NULL, formatIn, wave.channels, wave.sampleRate, frameCountIn);
- if (frameCount == 0) {
- TraceLog(LOG_ERROR, "LoadSoundFromWave() : Failed to get frame count for format conversion");
- }
-
+ if (frameCount == 0) TraceLog(LOG_WARNING, "LoadSoundFromWave() : Failed to get frame count for format conversion");
AudioBuffer* audioBuffer = CreateAudioBuffer(DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, frameCount, AUDIO_BUFFER_USAGE_STATIC);
- if (audioBuffer == NULL)
- {
- TraceLog(LOG_ERROR, "LoadSoundFromWave() : Failed to create audio buffer");
- }
-
+ if (audioBuffer == NULL) TraceLog(LOG_WARNING, "LoadSoundFromWave() : Failed to create audio buffer");
frameCount = mal_convert_frames(audioBuffer->buffer, audioBuffer->dsp.config.formatIn, audioBuffer->dsp.config.channelsIn, audioBuffer->dsp.config.sampleRateIn, wave.data, formatIn, wave.channels, wave.sampleRate, frameCountIn);
- if (frameCount == 0)
- {
- TraceLog(LOG_ERROR, "LoadSoundFromWave() : Format conversion failed");
- }
+ if (frameCount == 0) TraceLog(LOG_WARNING, "LoadSoundFromWave() : Format conversion failed");
sound.audioBuffer = audioBuffer;
#else
@@ -975,7 +989,7 @@ void UnloadWave(Wave wave)
void UnloadSound(Sound sound)
{
#if USE_MINI_AL
- DeleteAudioBuffer((AudioBuffer*)sound.audioBuffer);
+ DeleteAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourceStop(sound.source);
@@ -991,7 +1005,7 @@ void UnloadSound(Sound sound)
void UpdateSound(Sound sound, const void *data, int samplesCount)
{
#if USE_MINI_AL
- AudioBuffer* audioBuffer = (AudioBuffer*)sound.audioBuffer;
+ AudioBuffer *audioBuffer = (AudioBuffer *)sound.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "UpdateSound() : Invalid sound - no audio buffer");
@@ -1031,7 +1045,7 @@ void UpdateSound(Sound sound, const void *data, int samplesCount)
void PlaySound(Sound sound)
{
#if USE_MINI_AL
- PlayAudioBuffer((AudioBuffer*)sound.audioBuffer);
+ PlayAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourcePlay(sound.source); // Play the sound
#endif
@@ -1056,7 +1070,7 @@ void PlaySound(Sound sound)
void PauseSound(Sound sound)
{
#if USE_MINI_AL
- PauseAudioBuffer((AudioBuffer*)sound.audioBuffer);
+ PauseAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourcePause(sound.source);
#endif
@@ -1066,7 +1080,7 @@ void PauseSound(Sound sound)
void ResumeSound(Sound sound)
{
#if USE_MINI_AL
- ResumeAudioBuffer((AudioBuffer*)sound.audioBuffer);
+ ResumeAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
ALenum state;
@@ -1080,7 +1094,7 @@ void ResumeSound(Sound sound)
void StopSound(Sound sound)
{
#if USE_MINI_AL
- StopAudioBuffer((AudioBuffer*)sound.audioBuffer);
+ StopAudioBuffer((AudioBuffer *)sound.audioBuffer);
#else
alSourceStop(sound.source);
#endif
@@ -1090,7 +1104,7 @@ void StopSound(Sound sound)
bool IsSoundPlaying(Sound sound)
{
#if USE_MINI_AL
- return IsAudioBufferPlaying((AudioBuffer*)sound.audioBuffer);
+ return IsAudioBufferPlaying((AudioBuffer *)sound.audioBuffer);
#else
bool playing = false;
ALint state;
@@ -1106,7 +1120,7 @@ bool IsSoundPlaying(Sound sound)
void SetSoundVolume(Sound sound, float volume)
{
#if USE_MINI_AL
- SetAudioBufferVolume((AudioBuffer*)sound.audioBuffer, volume);
+ SetAudioBufferVolume((AudioBuffer *)sound.audioBuffer, volume);
#else
alSourcef(sound.source, AL_GAIN, volume);
#endif
@@ -1116,7 +1130,7 @@ void SetSoundVolume(Sound sound, float volume)
void SetSoundPitch(Sound sound, float pitch)
{
#if USE_MINI_AL
- SetAudioBufferPitch((AudioBuffer*)sound.audioBuffer, pitch);
+ SetAudioBufferPitch((AudioBuffer *)sound.audioBuffer, pitch);
#else
alSourcef(sound.source, AL_PITCH, pitch);
#endif
@@ -1125,21 +1139,24 @@ void SetSoundPitch(Sound sound, float pitch)
// Convert wave data to desired format
void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels)
{
+#if USE_MINI_AL
mal_format formatIn = ((wave->sampleSize == 8) ? mal_format_u8 : ((wave->sampleSize == 16) ? mal_format_s16 : mal_format_f32));
mal_format formatOut = (( sampleSize == 8) ? mal_format_u8 : (( sampleSize == 16) ? mal_format_s16 : mal_format_f32));
mal_uint32 frameCountIn = wave->sampleCount; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
mal_uint32 frameCount = mal_convert_frames(NULL, formatOut, channels, sampleRate, NULL, formatIn, wave->channels, wave->sampleRate, frameCountIn);
- if (frameCount == 0) {
+ if (frameCount == 0)
+ {
TraceLog(LOG_ERROR, "WaveFormat() : Failed to get frame count for format conversion.");
return;
}
- void* data = malloc(frameCount * channels * (sampleSize/8));
+ void *data = malloc(frameCount*channels*(sampleSize/8));
frameCount = mal_convert_frames(data, formatOut, channels, sampleRate, wave->data, formatIn, wave->channels, wave->sampleRate, frameCountIn);
- if (frameCount == 0) {
+ if (frameCount == 0)
+ {
TraceLog(LOG_ERROR, "WaveFormat() : Format conversion failed.");
return;
}
@@ -1151,7 +1168,7 @@ void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels)
free(wave->data);
wave->data = data;
-#if 0
+#else
// Format sample rate
// NOTE: Only supported 22050 <--> 44100
if (wave->sampleRate != sampleRate)
@@ -1414,7 +1431,7 @@ void UnloadMusicStream(Music music)
void PlayMusicStream(Music music)
{
#if USE_MINI_AL
- AudioBuffer* audioBuffer = (AudioBuffer*)music->stream.audioBuffer;
+ AudioBuffer *audioBuffer = (AudioBuffer *)music->stream.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "PlayMusicStream() : No audio buffer");
@@ -1426,9 +1443,9 @@ void PlayMusicStream(Music music)
// // just make sure to play again on window restore
// if (IsMusicPlaying(music)) PlayMusicStream(music);
mal_uint32 frameCursorPos = audioBuffer->frameCursorPos;
- {
- PlayAudioStream(music->stream); // <-- This resets the cursor position.
- }
+
+ PlayAudioStream(music->stream); // <-- This resets the cursor position.
+
audioBuffer->frameCursorPos = frameCursorPos;
#else
alSourcePlay(music->stream.source);
@@ -1512,7 +1529,7 @@ void UpdateMusicStream(Music music)
#if USE_MINI_AL
bool streamEnding = false;
- unsigned int subBufferSizeInFrames = ((AudioBuffer*)music->stream.audioBuffer)->bufferSizeInFrames / 2;
+ unsigned int subBufferSizeInFrames = ((AudioBuffer *)music->stream.audioBuffer)->bufferSizeInFrames/2;
// NOTE: Using dynamic allocation because it could require more than 16KB
void *pcm = calloc(subBufferSizeInFrames*music->stream.sampleSize/8*music->stream.channels, 1);
@@ -1530,7 +1547,7 @@ void UpdateMusicStream(Music music)
case MUSIC_AUDIO_OGG:
{
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
- int numSamplesOgg = stb_vorbis_get_samples_short_interleaved(music->ctxOgg, music->stream.channels, (short *)pcm, samplesCount*music->stream.channels);
+ stb_vorbis_get_samples_short_interleaved(music->ctxOgg, music->stream.channels, (short *)pcm, samplesCount*music->stream.channels);
} break;
#if defined(SUPPORT_FILEFORMAT_FLAC)
@@ -1576,10 +1593,7 @@ void UpdateMusicStream(Music music)
}
else
{
- if (music->loopCount == -1)
- {
- PlayMusicStream(music);
- }
+ if (music->loopCount == -1) PlayMusicStream(music);
}
}
else
@@ -1766,11 +1780,9 @@ AudioStream InitAudioStream(unsigned int sampleRate, unsigned int sampleSize, un
// The size of a streaming buffer must be at least double the size of a period.
unsigned int periodSize = device.bufferSizeInFrames / device.periods;
unsigned int subBufferSize = AUDIO_BUFFER_SIZE;
- if (subBufferSize < periodSize) {
- subBufferSize = periodSize;
- }
+ if (subBufferSize < periodSize) subBufferSize = periodSize;
- AudioBuffer* audioBuffer = CreateAudioBuffer(formatIn, stream.channels, stream.sampleRate, subBufferSize*2, AUDIO_BUFFER_USAGE_STREAM);
+ AudioBuffer *audioBuffer = CreateAudioBuffer(formatIn, stream.channels, stream.sampleRate, subBufferSize*2, AUDIO_BUFFER_USAGE_STREAM);
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "InitAudioStream() : Failed to create audio buffer");
@@ -1835,7 +1847,7 @@ AudioStream InitAudioStream(unsigned int sampleRate, unsigned int sampleSize, un
void CloseAudioStream(AudioStream stream)
{
#if USE_MINI_AL
- DeleteAudioBuffer((AudioBuffer*)stream.audioBuffer);
+ DeleteAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
// Stop playing channel
alSourceStop(stream.source);
@@ -1866,7 +1878,7 @@ void CloseAudioStream(AudioStream stream)
void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
{
#if USE_MINI_AL
- AudioBuffer* audioBuffer = (AudioBuffer*)stream.audioBuffer;
+ AudioBuffer *audioBuffer = (AudioBuffer *)stream.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "UpdateAudioStream() : No audio buffer");
@@ -1889,23 +1901,22 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
}
mal_uint32 subBufferSizeInFrames = audioBuffer->bufferSizeInFrames/2;
- unsigned char *subBuffer = audioBuffer->buffer + ((subBufferSizeInFrames * stream.channels * (stream.sampleSize/8)) * subBufferToUpdate);
+ unsigned char *subBuffer = audioBuffer->buffer + ((subBufferSizeInFrames*stream.channels*(stream.sampleSize/8))*subBufferToUpdate);
// Does this API expect a whole buffer to be updated in one go? Assuming so, but if not will need to change this logic.
if (subBufferSizeInFrames >= (mal_uint32)samplesCount)
{
mal_uint32 framesToWrite = subBufferSizeInFrames;
- if (framesToWrite > (mal_uint32)samplesCount) {
- framesToWrite = (mal_uint32)samplesCount;
- }
+ if (framesToWrite > (mal_uint32)samplesCount) framesToWrite = (mal_uint32)samplesCount;
- mal_uint32 bytesToWrite = framesToWrite * stream.channels * (stream.sampleSize/8);
+ mal_uint32 bytesToWrite = framesToWrite*stream.channels*(stream.sampleSize/8);
memcpy(subBuffer, data, bytesToWrite);
// Any leftover frames should be filled with zeros.
mal_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite;
- if (leftoverFrameCount > 0) {
- memset(subBuffer + bytesToWrite, 0, leftoverFrameCount * stream.channels * (stream.sampleSize/8));
+ if (leftoverFrameCount > 0)
+ {
+ memset(subBuffer + bytesToWrite, 0, leftoverFrameCount*stream.channels*(stream.sampleSize/8));
}
audioBuffer->isSubBufferProcessed[subBufferToUpdate] = false;
@@ -1939,7 +1950,7 @@ void UpdateAudioStream(AudioStream stream, const void *data, int samplesCount)
bool IsAudioBufferProcessed(AudioStream stream)
{
#if USE_MINI_AL
- AudioBuffer* audioBuffer = (AudioBuffer*)stream.audioBuffer;
+ AudioBuffer *audioBuffer = (AudioBuffer *)stream.audioBuffer;
if (audioBuffer == NULL)
{
TraceLog(LOG_ERROR, "IsAudioBufferProcessed() : No audio buffer");
@@ -1961,7 +1972,7 @@ bool IsAudioBufferProcessed(AudioStream stream)
void PlayAudioStream(AudioStream stream)
{
#if USE_MINI_AL
- PlayAudioBuffer((AudioBuffer*)stream.audioBuffer);
+ PlayAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourcePlay(stream.source);
#endif
@@ -1971,7 +1982,7 @@ void PlayAudioStream(AudioStream stream)
void PauseAudioStream(AudioStream stream)
{
#if USE_MINI_AL
- PauseAudioBuffer((AudioBuffer*)stream.audioBuffer);
+ PauseAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourcePause(stream.source);
#endif
@@ -1981,7 +1992,7 @@ void PauseAudioStream(AudioStream stream)
void ResumeAudioStream(AudioStream stream)
{
#if USE_MINI_AL
- ResumeAudioBuffer((AudioBuffer*)stream.audioBuffer);
+ ResumeAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
ALenum state;
alGetSourcei(stream.source, AL_SOURCE_STATE, &state);
@@ -1994,7 +2005,7 @@ void ResumeAudioStream(AudioStream stream)
bool IsAudioStreamPlaying(AudioStream stream)
{
#if USE_MINI_AL
- return IsAudioBufferPlaying((AudioBuffer*)stream.audioBuffer);
+ return IsAudioBufferPlaying((AudioBuffer *)stream.audioBuffer);
#else
bool playing = false;
ALint state;
@@ -2011,7 +2022,7 @@ bool IsAudioStreamPlaying(AudioStream stream)
void StopAudioStream(AudioStream stream)
{
#if USE_MINI_AL
- StopAudioBuffer((AudioBuffer*)stream.audioBuffer);
+ StopAudioBuffer((AudioBuffer *)stream.audioBuffer);
#else
alSourceStop(stream.source);
#endif
@@ -2020,7 +2031,7 @@ void StopAudioStream(AudioStream stream)
void SetAudioStreamVolume(AudioStream stream, float volume)
{
#if USE_MINI_AL
- SetAudioBufferVolume((AudioBuffer*)stream.audioBuffer, volume);
+ SetAudioBufferVolume((AudioBuffer *)stream.audioBuffer, volume);
#else
alSourcef(stream.source, AL_GAIN, volume);
#endif
@@ -2029,7 +2040,7 @@ void SetAudioStreamVolume(AudioStream stream, float volume)
void SetAudioStreamPitch(AudioStream stream, float pitch)
{
#if USE_MINI_AL
- SetAudioBufferPitch((AudioBuffer*)stream.audioBuffer, pitch);
+ SetAudioBufferPitch((AudioBuffer *)stream.audioBuffer, pitch);
#else
alSourcef(stream.source, AL_PITCH, pitch);
#endif