first commit
This commit is contained in:
23
thirdparty/miniaudio/examples/build/README.md
vendored
Normal file
23
thirdparty/miniaudio/examples/build/README.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
Examples
|
||||
--------
|
||||
gcc ../simple_playback.c -o bin/simple_playback -ldl -lm -lpthread
|
||||
gcc ../simple_playback.c -o bin/simple_playback -ldl -lm -lpthread -Wall -Wextra -Wpedantic -std=c89
|
||||
|
||||
Emscripten
|
||||
----------
|
||||
On Windows, you need to move into the build and run emsdk_env.bat from a command prompt using an absolute
|
||||
path like "C:\emsdk\emsdk_env.bat". Note that PowerShell doesn't work for me for some reason. Examples:
|
||||
|
||||
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html
|
||||
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html -s WASM=0 -Wall -Wextra
|
||||
|
||||
To compile with support for Audio Worklets:
|
||||
|
||||
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html -DMA_ENABLE_AUDIO_WORKLETS -sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY
|
||||
|
||||
If you output WASM it may not work when running the web page locally. To test you can run with something
|
||||
like this:
|
||||
|
||||
emrun ./bin/simple_playback_sine.html
|
||||
|
||||
If you want to see stdout on the command line when running from emrun, add `--emrun` to your emcc command.
|
||||
740
thirdparty/miniaudio/examples/custom_backend.c
vendored
Normal file
740
thirdparty/miniaudio/examples/custom_backend.c
vendored
Normal file
@@ -0,0 +1,740 @@
|
||||
/*
|
||||
This example show how a custom backend can be implemented.
|
||||
|
||||
This implements a full-featured SDL2 backend. It's intentionally built using the same paradigms as the built-in backends in order to make
|
||||
it suitable as a solid basis for a custom implementation. The SDL2 backend can be disabled with MA_NO_SDL, exactly like the build-in
|
||||
backends. It supports both runtime and compile-time linking and respects the MA_NO_RUNTIME_LINKING option. It also works on Emscripten
|
||||
which requires the `-s USE_SDL=2` option.
|
||||
|
||||
There may be times where you want to support more than one custom backend. This example has been designed to make it easy to plug-in extra
|
||||
custom backends without needing to modify any of the base miniaudio initialization code. A custom context structure is declared called
|
||||
`ma_context_ex`. The first member of this structure is a `ma_context` object which allows it to be cast between the two. The same is done
|
||||
for devices, which is called `ma_device_ex`. In these structures there is a section for each custom backend, which in this example is just
|
||||
SDL. These are only enabled at compile time if `MA_SUPPORT_SDL` is defined, which it always is in this example (you may want to have some
|
||||
logic which more intelligently enables or disables SDL support).
|
||||
|
||||
To use a custom backend, at a minimum you must set the `custom.onContextInit()` callback in the context config. You do not need to set the
|
||||
other callbacks, but if you don't, you must set them in the implementation of the `onContextInit()` callback which is done via an output
|
||||
parameter. This is the approach taken by this example because it's the simplest way to support multiple custom backends. The idea is that
|
||||
the `onContextInit()` callback is set to a generic "loader", which then calls out to a backend-specific implementation which then sets the
|
||||
remaining callbacks if it is successfully initialized.
|
||||
|
||||
Custom backends are identified with the `ma_backend_custom` backend type. For the purpose of demonstration, this example only uses the
|
||||
`ma_backend_custom` backend type because otherwise the built-in backends would always get chosen first and none of the code for the custom
|
||||
backends would actually get hit. By default, the `ma_backend_custom` backend is the lowest priority backend, except for `ma_backend_null`.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
#include <emscripten.h>
|
||||
|
||||
void main_loop__em()
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Support SDL on everything. */
|
||||
#define MA_SUPPORT_SDL
|
||||
|
||||
/*
|
||||
Only enable SDL if it's hasn't been explicitly disabled (MA_NO_SDL) or enabled (MA_ENABLE_SDL with
|
||||
MA_ENABLE_ONLY_SPECIFIC_BACKENDS) and it's supported at compile time (MA_SUPPORT_SDL).
|
||||
*/
|
||||
#if defined(MA_SUPPORT_SDL) && !defined(MA_NO_SDL) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_SDL))
|
||||
#define MA_HAS_SDL
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_context context; /* Make this the first member so we can cast between ma_context and ma_context_ex. */
|
||||
#if defined(MA_SUPPORT_SDL)
|
||||
struct
|
||||
{
|
||||
ma_handle hSDL; /* A handle to the SDL2 shared object. We dynamically load function pointers at runtime so we can avoid linking. */
|
||||
ma_proc SDL_InitSubSystem;
|
||||
ma_proc SDL_QuitSubSystem;
|
||||
ma_proc SDL_GetNumAudioDevices;
|
||||
ma_proc SDL_GetAudioDeviceName;
|
||||
ma_proc SDL_CloseAudioDevice;
|
||||
ma_proc SDL_OpenAudioDevice;
|
||||
ma_proc SDL_PauseAudioDevice;
|
||||
} sdl;
|
||||
#endif
|
||||
} ma_context_ex;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_device device; /* Make this the first member so we can cast between ma_device and ma_device_ex. */
|
||||
#if defined(MA_SUPPORT_SDL)
|
||||
struct
|
||||
{
|
||||
int deviceIDPlayback;
|
||||
int deviceIDCapture;
|
||||
} sdl;
|
||||
#endif
|
||||
} ma_device_ex;
|
||||
|
||||
|
||||
|
||||
#if defined(MA_HAS_SDL)
|
||||
/* SDL headers are necessary if using compile-time linking. */
|
||||
#ifdef MA_NO_RUNTIME_LINKING
|
||||
#ifdef __has_include
|
||||
#ifdef MA_EMSCRIPTEN
|
||||
#if !__has_include(<SDL/SDL_audio.h>)
|
||||
#undef MA_HAS_SDL
|
||||
#endif
|
||||
#else
|
||||
#if !__has_include(<SDL2/SDL_audio.h>)
|
||||
#undef MA_HAS_SDL
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MA_HAS_SDL)
|
||||
#define MA_SDL_INIT_AUDIO 0x00000010
|
||||
#define MA_AUDIO_U8 0x0008
|
||||
#define MA_AUDIO_S16 0x8010
|
||||
#define MA_AUDIO_S32 0x8020
|
||||
#define MA_AUDIO_F32 0x8120
|
||||
#define MA_SDL_AUDIO_ALLOW_FREQUENCY_CHANGE 0x00000001
|
||||
#define MA_SDL_AUDIO_ALLOW_FORMAT_CHANGE 0x00000002
|
||||
#define MA_SDL_AUDIO_ALLOW_CHANNELS_CHANGE 0x00000004
|
||||
#define MA_SDL_AUDIO_ALLOW_ANY_CHANGE (MA_SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | MA_SDL_AUDIO_ALLOW_FORMAT_CHANGE | MA_SDL_AUDIO_ALLOW_CHANNELS_CHANGE)
|
||||
|
||||
/* If we are linking at compile time we'll just #include SDL.h. Otherwise we can just redeclare some stuff to avoid the need for development packages to be installed. */
|
||||
#ifdef MA_NO_RUNTIME_LINKING
|
||||
#define SDL_MAIN_HANDLED
|
||||
#ifdef MA_EMSCRIPTEN
|
||||
#include <SDL/SDL.h>
|
||||
#else
|
||||
#include <SDL2/SDL.h>
|
||||
#endif
|
||||
|
||||
typedef SDL_AudioCallback MA_SDL_AudioCallback;
|
||||
typedef SDL_AudioSpec MA_SDL_AudioSpec;
|
||||
typedef SDL_AudioFormat MA_SDL_AudioFormat;
|
||||
typedef SDL_AudioDeviceID MA_SDL_AudioDeviceID;
|
||||
#else
|
||||
typedef void (* MA_SDL_AudioCallback)(void* userdata, ma_uint8* stream, int len);
|
||||
typedef ma_uint16 MA_SDL_AudioFormat;
|
||||
typedef ma_uint32 MA_SDL_AudioDeviceID;
|
||||
|
||||
typedef struct MA_SDL_AudioSpec
|
||||
{
|
||||
int freq;
|
||||
MA_SDL_AudioFormat format;
|
||||
ma_uint8 channels;
|
||||
ma_uint8 silence;
|
||||
ma_uint16 samples;
|
||||
ma_uint16 padding;
|
||||
ma_uint32 size;
|
||||
MA_SDL_AudioCallback callback;
|
||||
void* userdata;
|
||||
} MA_SDL_AudioSpec;
|
||||
#endif
|
||||
|
||||
typedef int (* MA_PFN_SDL_InitSubSystem)(ma_uint32 flags);
|
||||
typedef void (* MA_PFN_SDL_QuitSubSystem)(ma_uint32 flags);
|
||||
typedef int (* MA_PFN_SDL_GetNumAudioDevices)(int iscapture);
|
||||
typedef const char* (* MA_PFN_SDL_GetAudioDeviceName)(int index, int iscapture);
|
||||
typedef void (* MA_PFN_SDL_CloseAudioDevice)(MA_SDL_AudioDeviceID dev);
|
||||
typedef MA_SDL_AudioDeviceID (* MA_PFN_SDL_OpenAudioDevice)(const char* device, int iscapture, const MA_SDL_AudioSpec* desired, MA_SDL_AudioSpec* obtained, int allowed_changes);
|
||||
typedef void (* MA_PFN_SDL_PauseAudioDevice)(MA_SDL_AudioDeviceID dev, int pause_on);
|
||||
|
||||
MA_SDL_AudioFormat ma_format_to_sdl(ma_format format)
|
||||
{
|
||||
switch (format)
|
||||
{
|
||||
case ma_format_unknown: return 0;
|
||||
case ma_format_u8: return MA_AUDIO_U8;
|
||||
case ma_format_s16: return MA_AUDIO_S16;
|
||||
case ma_format_s24: return MA_AUDIO_S32; /* Closest match. */
|
||||
case ma_format_s32: return MA_AUDIO_S32;
|
||||
case ma_format_f32: return MA_AUDIO_F32;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ma_format ma_format_from_sdl(MA_SDL_AudioFormat format)
|
||||
{
|
||||
switch (format)
|
||||
{
|
||||
case MA_AUDIO_U8: return ma_format_u8;
|
||||
case MA_AUDIO_S16: return ma_format_s16;
|
||||
case MA_AUDIO_S32: return ma_format_s32;
|
||||
case MA_AUDIO_F32: return ma_format_f32;
|
||||
default: return ma_format_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
static ma_result ma_context_enumerate_devices__sdl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
|
||||
{
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pContext;
|
||||
ma_bool32 isTerminated = MA_FALSE;
|
||||
ma_bool32 cbResult;
|
||||
int iDevice;
|
||||
|
||||
MA_ASSERT(pContext != NULL);
|
||||
MA_ASSERT(callback != NULL);
|
||||
|
||||
/* Playback */
|
||||
if (!isTerminated) {
|
||||
int deviceCount = ((MA_PFN_SDL_GetNumAudioDevices)pContextEx->sdl.SDL_GetNumAudioDevices)(0);
|
||||
for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
|
||||
ma_device_info deviceInfo;
|
||||
MA_ZERO_OBJECT(&deviceInfo);
|
||||
|
||||
deviceInfo.id.custom.i = iDevice;
|
||||
ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MA_PFN_SDL_GetAudioDeviceName)pContextEx->sdl.SDL_GetAudioDeviceName)(iDevice, 0), (size_t)-1);
|
||||
|
||||
if (iDevice == 0) {
|
||||
deviceInfo.isDefault = MA_TRUE;
|
||||
}
|
||||
|
||||
cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
|
||||
if (cbResult == MA_FALSE) {
|
||||
isTerminated = MA_TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Capture */
|
||||
if (!isTerminated) {
|
||||
int deviceCount = ((MA_PFN_SDL_GetNumAudioDevices)pContextEx->sdl.SDL_GetNumAudioDevices)(1);
|
||||
for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
|
||||
ma_device_info deviceInfo;
|
||||
MA_ZERO_OBJECT(&deviceInfo);
|
||||
|
||||
deviceInfo.id.custom.i = iDevice;
|
||||
ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MA_PFN_SDL_GetAudioDeviceName)pContextEx->sdl.SDL_GetAudioDeviceName)(iDevice, 1), (size_t)-1);
|
||||
|
||||
if (iDevice == 0) {
|
||||
deviceInfo.isDefault = MA_TRUE;
|
||||
}
|
||||
|
||||
cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
|
||||
if (cbResult == MA_FALSE) {
|
||||
isTerminated = MA_TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_context_get_device_info__sdl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
|
||||
{
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pContext;
|
||||
|
||||
#if !defined(__EMSCRIPTEN__)
|
||||
MA_SDL_AudioSpec desiredSpec;
|
||||
MA_SDL_AudioSpec obtainedSpec;
|
||||
MA_SDL_AudioDeviceID tempDeviceID;
|
||||
const char* pDeviceName;
|
||||
#endif
|
||||
|
||||
MA_ASSERT(pContext != NULL);
|
||||
|
||||
if (pDeviceID == NULL) {
|
||||
if (deviceType == ma_device_type_playback) {
|
||||
pDeviceInfo->id.custom.i = 0;
|
||||
ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
|
||||
} else {
|
||||
pDeviceInfo->id.custom.i = 0;
|
||||
ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
|
||||
}
|
||||
} else {
|
||||
pDeviceInfo->id.custom.i = pDeviceID->custom.i;
|
||||
ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ((MA_PFN_SDL_GetAudioDeviceName)pContextEx->sdl.SDL_GetAudioDeviceName)(pDeviceID->custom.i, (deviceType == ma_device_type_playback) ? 0 : 1), (size_t)-1);
|
||||
}
|
||||
|
||||
if (pDeviceInfo->id.custom.i == 0) {
|
||||
pDeviceInfo->isDefault = MA_TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
To get an accurate idea on the backend's native format we need to open the device. Not ideal, but it's the only way. An
|
||||
alternative to this is to report all channel counts, sample rates and formats, but that doesn't offer a good representation
|
||||
of the device's _actual_ ideal format.
|
||||
|
||||
Note: With Emscripten, it looks like non-zero values need to be specified for desiredSpec. Whatever is specified in
|
||||
desiredSpec will be used by SDL since it uses it just does it's own format conversion internally. Therefore, from what
|
||||
I can tell, there's no real way to know the device's actual format which means I'm just going to fall back to the full
|
||||
range of channels and sample rates on Emscripten builds.
|
||||
*/
|
||||
#if defined(__EMSCRIPTEN__)
|
||||
/* Good practice to prioritize the best format first so that the application can use the first data format as their chosen one if desired. */
|
||||
pDeviceInfo->nativeDataFormatCount = 3;
|
||||
pDeviceInfo->nativeDataFormats[0].format = ma_format_s16;
|
||||
pDeviceInfo->nativeDataFormats[0].channels = 0; /* All channel counts supported. */
|
||||
pDeviceInfo->nativeDataFormats[0].sampleRate = 0; /* All sample rates supported. */
|
||||
pDeviceInfo->nativeDataFormats[0].flags = 0;
|
||||
pDeviceInfo->nativeDataFormats[1].format = ma_format_s32;
|
||||
pDeviceInfo->nativeDataFormats[1].channels = 0; /* All channel counts supported. */
|
||||
pDeviceInfo->nativeDataFormats[1].sampleRate = 0; /* All sample rates supported. */
|
||||
pDeviceInfo->nativeDataFormats[1].flags = 0;
|
||||
pDeviceInfo->nativeDataFormats[2].format = ma_format_u8;
|
||||
pDeviceInfo->nativeDataFormats[2].channels = 0; /* All channel counts supported. */
|
||||
pDeviceInfo->nativeDataFormats[2].sampleRate = 0; /* All sample rates supported. */
|
||||
pDeviceInfo->nativeDataFormats[2].flags = 0;
|
||||
#else
|
||||
MA_ZERO_MEMORY(&desiredSpec, sizeof(desiredSpec));
|
||||
|
||||
pDeviceName = NULL;
|
||||
if (pDeviceID != NULL) {
|
||||
pDeviceName = ((MA_PFN_SDL_GetAudioDeviceName)pContextEx->sdl.SDL_GetAudioDeviceName)(pDeviceID->custom.i, (deviceType == ma_device_type_playback) ? 0 : 1);
|
||||
}
|
||||
|
||||
tempDeviceID = ((MA_PFN_SDL_OpenAudioDevice)pContextEx->sdl.SDL_OpenAudioDevice)(pDeviceName, (deviceType == ma_device_type_playback) ? 0 : 1, &desiredSpec, &obtainedSpec, MA_SDL_AUDIO_ALLOW_ANY_CHANGE);
|
||||
if (tempDeviceID == 0) {
|
||||
ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to open SDL device.");
|
||||
return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
|
||||
}
|
||||
|
||||
((MA_PFN_SDL_CloseAudioDevice)pContextEx->sdl.SDL_CloseAudioDevice)(tempDeviceID);
|
||||
|
||||
/* Only reporting a single native data format. It'll be whatever SDL decides is the best. */
|
||||
pDeviceInfo->nativeDataFormatCount = 1;
|
||||
pDeviceInfo->nativeDataFormats[0].format = ma_format_from_sdl(obtainedSpec.format);
|
||||
pDeviceInfo->nativeDataFormats[0].channels = obtainedSpec.channels;
|
||||
pDeviceInfo->nativeDataFormats[0].sampleRate = obtainedSpec.freq;
|
||||
pDeviceInfo->nativeDataFormats[0].flags = 0;
|
||||
|
||||
/* If miniaudio does not support the format, just use f32 as the native format (SDL will do the necessary conversions for us). */
|
||||
if (pDeviceInfo->nativeDataFormats[0].format == ma_format_unknown) {
|
||||
pDeviceInfo->nativeDataFormats[0].format = ma_format_f32;
|
||||
}
|
||||
#endif /* __EMSCRIPTEN__ */
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
void ma_audio_callback_capture__sdl(void* pUserData, ma_uint8* pBuffer, int bufferSizeInBytes)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pUserData;
|
||||
|
||||
MA_ASSERT(pDeviceEx != NULL);
|
||||
|
||||
ma_device_handle_backend_data_callback((ma_device*)pDeviceEx, NULL, pBuffer, (ma_uint32)bufferSizeInBytes / ma_get_bytes_per_frame(pDeviceEx->device.capture.internalFormat, pDeviceEx->device.capture.internalChannels));
|
||||
}
|
||||
|
||||
void ma_audio_callback_playback__sdl(void* pUserData, ma_uint8* pBuffer, int bufferSizeInBytes)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pUserData;
|
||||
|
||||
MA_ASSERT(pDeviceEx != NULL);
|
||||
|
||||
ma_device_handle_backend_data_callback((ma_device*)pDeviceEx, pBuffer, NULL, (ma_uint32)bufferSizeInBytes / ma_get_bytes_per_frame(pDeviceEx->device.playback.internalFormat, pDeviceEx->device.playback.internalChannels));
|
||||
}
|
||||
|
||||
static ma_result ma_device_init_internal__sdl(ma_device_ex* pDeviceEx, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor)
|
||||
{
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pDeviceEx->device.pContext;
|
||||
MA_SDL_AudioSpec desiredSpec;
|
||||
MA_SDL_AudioSpec obtainedSpec;
|
||||
const char* pDeviceName;
|
||||
int deviceID;
|
||||
|
||||
MA_ASSERT(pDeviceEx != NULL);
|
||||
MA_ASSERT(pDescriptor != NULL);
|
||||
|
||||
/*
|
||||
SDL is a little bit awkward with specifying the buffer size, You need to specify the size of the buffer in frames, but since we may
|
||||
have requested a period size in milliseconds we'll need to convert, which depends on the sample rate. But there's a possibility that
|
||||
the sample rate just set to 0, which indicates that the native sample rate should be used. There's no practical way to calculate this
|
||||
that I can think of right now so I'm just using MA_DEFAULT_SAMPLE_RATE.
|
||||
*/
|
||||
if (pDescriptor->sampleRate == 0) {
|
||||
pDescriptor->sampleRate = MA_DEFAULT_SAMPLE_RATE;
|
||||
}
|
||||
|
||||
/*
|
||||
When determining the period size, you need to take defaults into account. This is how the size of the period should be determined.
|
||||
|
||||
1) If periodSizeInFrames is not 0, use periodSizeInFrames; else
|
||||
2) If periodSizeInMilliseconds is not 0, use periodSizeInMilliseconds; else
|
||||
3) If both periodSizeInFrames and periodSizeInMilliseconds is 0, use the backend's default. If the backend does not allow a default
|
||||
buffer size, use a default value of MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY or
|
||||
MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE depending on the value of pConfig->performanceProfile.
|
||||
|
||||
Note that options 2 and 3 require knowledge of the sample rate in order to convert it to a frame count. You should try to keep the
|
||||
calculation of the period size as accurate as possible, but sometimes it's just not practical so just use whatever you can.
|
||||
|
||||
A helper function called ma_calculate_buffer_size_in_frames_from_descriptor() is available to do all of this for you which is what
|
||||
we'll be using here.
|
||||
*/
|
||||
pDescriptor->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, pDescriptor->sampleRate, pConfig->performanceProfile);
|
||||
|
||||
/* SDL wants the buffer size to be a power of 2 for some reason. */
|
||||
if (pDescriptor->periodSizeInFrames > 32768) {
|
||||
pDescriptor->periodSizeInFrames = 32768;
|
||||
} else {
|
||||
pDescriptor->periodSizeInFrames = ma_next_power_of_2(pDescriptor->periodSizeInFrames);
|
||||
}
|
||||
|
||||
|
||||
/* We now have enough information to set up the device. */
|
||||
MA_ZERO_OBJECT(&desiredSpec);
|
||||
desiredSpec.freq = (int)pDescriptor->sampleRate;
|
||||
desiredSpec.format = ma_format_to_sdl(pDescriptor->format);
|
||||
desiredSpec.channels = (ma_uint8)pDescriptor->channels;
|
||||
desiredSpec.samples = (ma_uint16)pDescriptor->periodSizeInFrames;
|
||||
desiredSpec.callback = (pConfig->deviceType == ma_device_type_capture) ? ma_audio_callback_capture__sdl : ma_audio_callback_playback__sdl;
|
||||
desiredSpec.userdata = pDeviceEx;
|
||||
|
||||
/* We'll fall back to f32 if we don't have an appropriate mapping between SDL and miniaudio. */
|
||||
if (desiredSpec.format == 0) {
|
||||
desiredSpec.format = MA_AUDIO_F32;
|
||||
}
|
||||
|
||||
pDeviceName = NULL;
|
||||
if (pDescriptor->pDeviceID != NULL) {
|
||||
pDeviceName = ((MA_PFN_SDL_GetAudioDeviceName)pContextEx->sdl.SDL_GetAudioDeviceName)(pDescriptor->pDeviceID->custom.i, (pConfig->deviceType == ma_device_type_playback) ? 0 : 1);
|
||||
}
|
||||
|
||||
deviceID = ((MA_PFN_SDL_OpenAudioDevice)pContextEx->sdl.SDL_OpenAudioDevice)(pDeviceName, (pConfig->deviceType == ma_device_type_playback) ? 0 : 1, &desiredSpec, &obtainedSpec, MA_SDL_AUDIO_ALLOW_ANY_CHANGE);
|
||||
if (deviceID == 0) {
|
||||
ma_log_postf(ma_device_get_log((ma_device*)pDeviceEx), MA_LOG_LEVEL_ERROR, "Failed to open SDL2 device.");
|
||||
return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
|
||||
}
|
||||
|
||||
if (pConfig->deviceType == ma_device_type_playback) {
|
||||
pDeviceEx->sdl.deviceIDPlayback = deviceID;
|
||||
} else {
|
||||
pDeviceEx->sdl.deviceIDCapture = deviceID;
|
||||
}
|
||||
|
||||
/* The descriptor needs to be updated with our actual settings. */
|
||||
pDescriptor->format = ma_format_from_sdl(obtainedSpec.format);
|
||||
pDescriptor->channels = obtainedSpec.channels;
|
||||
pDescriptor->sampleRate = (ma_uint32)obtainedSpec.freq;
|
||||
ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels);
|
||||
pDescriptor->periodSizeInFrames = obtainedSpec.samples;
|
||||
pDescriptor->periodCount = 1; /* SDL doesn't use the notion of period counts, so just set to 1. */
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_device_init__sdl(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice;
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext;
|
||||
ma_result result;
|
||||
|
||||
MA_ASSERT(pDevice != NULL);
|
||||
|
||||
/* SDL does not support loopback mode, so must return MA_DEVICE_TYPE_NOT_SUPPORTED if it's requested. */
|
||||
if (pConfig->deviceType == ma_device_type_loopback) {
|
||||
return MA_DEVICE_TYPE_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
|
||||
result = ma_device_init_internal__sdl(pDeviceEx, pConfig, pDescriptorCapture);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
|
||||
result = ma_device_init_internal__sdl(pDeviceEx, pConfig, pDescriptorPlayback);
|
||||
if (result != MA_SUCCESS) {
|
||||
if (pConfig->deviceType == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_CloseAudioDevice)pContextEx->sdl.SDL_CloseAudioDevice)(pDeviceEx->sdl.deviceIDCapture);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_device_uninit__sdl(ma_device* pDevice)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice;
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext;
|
||||
|
||||
MA_ASSERT(pDevice != NULL);
|
||||
|
||||
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_CloseAudioDevice)pContextEx->sdl.SDL_CloseAudioDevice)(pDeviceEx->sdl.deviceIDCapture);
|
||||
}
|
||||
|
||||
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_CloseAudioDevice)pContextEx->sdl.SDL_CloseAudioDevice)(pDeviceEx->sdl.deviceIDCapture);
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_device_start__sdl(ma_device* pDevice)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice;
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext;
|
||||
|
||||
MA_ASSERT(pDevice != NULL);
|
||||
|
||||
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDCapture, 0);
|
||||
}
|
||||
|
||||
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDPlayback, 0);
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_device_stop__sdl(ma_device* pDevice)
|
||||
{
|
||||
ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice;
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext;
|
||||
|
||||
MA_ASSERT(pDevice != NULL);
|
||||
|
||||
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDCapture, 1);
|
||||
}
|
||||
|
||||
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
|
||||
((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDPlayback, 1);
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_context_uninit__sdl(ma_context* pContext)
|
||||
{
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pContext;
|
||||
|
||||
MA_ASSERT(pContext != NULL);
|
||||
|
||||
((MA_PFN_SDL_QuitSubSystem)pContextEx->sdl.SDL_QuitSubSystem)(MA_SDL_INIT_AUDIO);
|
||||
|
||||
/* Close the handle to the SDL shared object last. */
|
||||
ma_dlclose(ma_context_get_log(pContext), pContextEx->sdl.hSDL);
|
||||
pContextEx->sdl.hSDL = NULL;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_context_init__sdl(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
|
||||
{
|
||||
ma_context_ex* pContextEx = (ma_context_ex*)pContext;
|
||||
int resultSDL;
|
||||
|
||||
#ifndef MA_NO_RUNTIME_LINKING
|
||||
/* We'll use a list of possible shared object names for easier extensibility. */
|
||||
size_t iName;
|
||||
const char* pSDLNames[] = {
|
||||
#if defined(_WIN32)
|
||||
"SDL2.dll"
|
||||
#elif defined(__APPLE__)
|
||||
"SDL2.framework/SDL2"
|
||||
#else
|
||||
"libSDL2-2.0.so.0"
|
||||
#endif
|
||||
};
|
||||
|
||||
MA_ASSERT(pContext != NULL);
|
||||
|
||||
(void)pConfig;
|
||||
|
||||
/* Check if we have SDL2 installed somewhere. If not it's not usable and we need to abort. */
|
||||
for (iName = 0; iName < ma_countof(pSDLNames); iName += 1) {
|
||||
pContextEx->sdl.hSDL = ma_dlopen(ma_context_get_log(pContext), pSDLNames[iName]);
|
||||
if (pContextEx->sdl.hSDL != NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pContextEx->sdl.hSDL == NULL) {
|
||||
return MA_NO_BACKEND; /* SDL2 could not be loaded. */
|
||||
}
|
||||
|
||||
/* Now that we have the handle to the shared object we can go ahead and load some function pointers. */
|
||||
pContextEx->sdl.SDL_InitSubSystem = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_InitSubSystem");
|
||||
pContextEx->sdl.SDL_QuitSubSystem = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_QuitSubSystem");
|
||||
pContextEx->sdl.SDL_GetNumAudioDevices = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_GetNumAudioDevices");
|
||||
pContextEx->sdl.SDL_GetAudioDeviceName = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_GetAudioDeviceName");
|
||||
pContextEx->sdl.SDL_CloseAudioDevice = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_CloseAudioDevice");
|
||||
pContextEx->sdl.SDL_OpenAudioDevice = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_OpenAudioDevice");
|
||||
pContextEx->sdl.SDL_PauseAudioDevice = ma_dlsym(ma_context_get_log(pContext), pContextEx->sdl.hSDL, "SDL_PauseAudioDevice");
|
||||
#else
|
||||
pContextEx->sdl.SDL_InitSubSystem = (ma_proc)SDL_InitSubSystem;
|
||||
pContextEx->sdl.SDL_QuitSubSystem = (ma_proc)SDL_QuitSubSystem;
|
||||
pContextEx->sdl.SDL_GetNumAudioDevices = (ma_proc)SDL_GetNumAudioDevices;
|
||||
pContextEx->sdl.SDL_GetAudioDeviceName = (ma_proc)SDL_GetAudioDeviceName;
|
||||
pContextEx->sdl.SDL_CloseAudioDevice = (ma_proc)SDL_CloseAudioDevice;
|
||||
pContextEx->sdl.SDL_OpenAudioDevice = (ma_proc)SDL_OpenAudioDevice;
|
||||
pContextEx->sdl.SDL_PauseAudioDevice = (ma_proc)SDL_PauseAudioDevice;
|
||||
#endif /* MA_NO_RUNTIME_LINKING */
|
||||
|
||||
resultSDL = ((MA_PFN_SDL_InitSubSystem)pContextEx->sdl.SDL_InitSubSystem)(MA_SDL_INIT_AUDIO);
|
||||
if (resultSDL != 0) {
|
||||
ma_dlclose(ma_context_get_log(pContext), pContextEx->sdl.hSDL);
|
||||
return MA_ERROR;
|
||||
}
|
||||
|
||||
/*
|
||||
The last step is to make sure the callbacks are set properly in `pCallbacks`. Internally, miniaudio will copy these callbacks into the
|
||||
context object and then use them for then on for calling into our custom backend.
|
||||
*/
|
||||
pCallbacks->onContextInit = ma_context_init__sdl;
|
||||
pCallbacks->onContextUninit = ma_context_uninit__sdl;
|
||||
pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__sdl;
|
||||
pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__sdl;
|
||||
pCallbacks->onDeviceInit = ma_device_init__sdl;
|
||||
pCallbacks->onDeviceUninit = ma_device_uninit__sdl;
|
||||
pCallbacks->onDeviceStart = ma_device_start__sdl;
|
||||
pCallbacks->onDeviceStop = ma_device_stop__sdl;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
#endif /* MA_HAS_SDL */
|
||||
|
||||
|
||||
/*
|
||||
This is our custom backend "loader". All this does is attempts to initialize our custom backends in the order they are listed. The first
|
||||
one to successfully initialize is the one that's chosen. In this example we're just listing them statically, but you can use whatever logic
|
||||
you want to handle backend selection.
|
||||
|
||||
This is used as the onContextInit() callback in the context config.
|
||||
*/
|
||||
static ma_result ma_context_init__custom_loader(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
|
||||
{
|
||||
ma_result result = MA_NO_BACKEND;
|
||||
|
||||
/* Silence some unused parameter warnings just in case no custom backends are enabled. */
|
||||
(void)pContext;
|
||||
(void)pCallbacks;
|
||||
|
||||
/* SDL. */
|
||||
#if !defined(MA_NO_SDL)
|
||||
if (result != MA_SUCCESS) {
|
||||
result = ma_context_init__sdl(pContext, pConfig, pCallbacks);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ... plug in any other custom backends here ... */
|
||||
|
||||
/* If we have a success result we have initialized a backend. Otherwise we need to tell miniaudio about the error so it can skip over our custom backends. */
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Main program starts here.
|
||||
*/
|
||||
#define DEVICE_FORMAT ma_format_f32
|
||||
#define DEVICE_CHANNELS 2
|
||||
#define DEVICE_SAMPLE_RATE 48000
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS);
|
||||
|
||||
if (pDevice->type == ma_device_type_playback) {
|
||||
ma_waveform* pSineWave;
|
||||
|
||||
pSineWave = (ma_waveform*)pDevice->pUserData;
|
||||
MA_ASSERT(pSineWave != NULL);
|
||||
|
||||
ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
if (pDevice->type == ma_device_type_duplex) {
|
||||
ma_copy_pcm_frames(pOutput, pInput, frameCount, pDevice->playback.format, pDevice->playback.channels);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_context_config contextConfig;
|
||||
ma_context_ex context;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device_ex device;
|
||||
ma_waveform_config sineWaveConfig;
|
||||
ma_waveform sineWave;
|
||||
|
||||
/*
|
||||
We're just using ma_backend_custom in this example for demonstration purposes, but a more realistic use case would probably want to include
|
||||
other backends as well for robustness.
|
||||
*/
|
||||
ma_backend backends[] = {
|
||||
ma_backend_custom
|
||||
};
|
||||
|
||||
/*
|
||||
To implement a custom backend you need to implement the callbacks in the "custom" member of the context config. The only mandatory
|
||||
callback required at this point is the onContextInit() callback. If you do not set the other callbacks, you must set them in
|
||||
onContextInit() by setting them on the `pCallbacks` parameter.
|
||||
|
||||
The way we're doing it in this example enables us to easily plug in multiple custom backends. What we do is set the onContextInit()
|
||||
callback to a generic "loader" function (ma_context_init__custom_loader() in this example), which then calls out to backend-specific
|
||||
context initialization routines, one of which will be for SDL. That way, if for example we wanted to add support for another backend,
|
||||
we don't need to touch this part of the code. Instead we add logic to ma_context_init__custom_loader() to choose the most appropriate
|
||||
custom backend. That will then fill out the other callbacks appropriately.
|
||||
*/
|
||||
contextConfig = ma_context_config_init();
|
||||
contextConfig.custom.onContextInit = ma_context_init__custom_loader;
|
||||
|
||||
result = ma_context_init(backends, sizeof(backends)/sizeof(backends[0]), &contextConfig, (ma_context*)&context);
|
||||
if (result != MA_SUCCESS) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* In playback mode we're just going to play a sine wave. */
|
||||
sineWaveConfig = ma_waveform_config_init(DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, ma_waveform_type_sine, 0.2, 220);
|
||||
ma_waveform_init(&sineWaveConfig, &sineWave);
|
||||
|
||||
/* The device is created exactly as per normal. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.capture.format = DEVICE_FORMAT;
|
||||
deviceConfig.capture.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.sampleRate = DEVICE_SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &sineWave;
|
||||
|
||||
result = ma_device_init((ma_context*)&context, &deviceConfig, (ma_device*)&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_context_uninit((ma_context*)&context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
printf("Device Name: %s\n", ((ma_device*)&device)->playback.name);
|
||||
|
||||
if (ma_device_start((ma_device*)&device) != MA_SUCCESS) {
|
||||
ma_device_uninit((ma_device*)&device);
|
||||
ma_context_uninit((ma_context*)&context);
|
||||
return -5;
|
||||
}
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
emscripten_set_main_loop(main_loop__em, 0, 1);
|
||||
#else
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_device_uninit((ma_device*)&device);
|
||||
ma_context_uninit((ma_context*)&context);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
270
thirdparty/miniaudio/examples/custom_decoder.c
vendored
Normal file
270
thirdparty/miniaudio/examples/custom_decoder.c
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
/*
|
||||
Demonstrates how to implement a custom decoder.
|
||||
|
||||
This example implements two custom decoders:
|
||||
|
||||
* Vorbis via libvorbis
|
||||
* Opus via libopus
|
||||
|
||||
A custom decoder must implement a data source. In this example, the libvorbis data source is called
|
||||
`ma_libvorbis` and the Opus data source is called `ma_libopus`. These two objects are compatible
|
||||
with the `ma_data_source` APIs and can be taken straight from this example and used in real code.
|
||||
|
||||
The custom decoding data sources (`ma_libvorbis` and `ma_libopus` in this example) are connected to
|
||||
the decoder via the decoder config (`ma_decoder_config`). You need to implement a vtable for each
|
||||
of your custom decoders. See `ma_decoding_backend_vtable` for the functions you need to implement.
|
||||
The `onInitFile`, `onInitFileW` and `onInitMemory` functions are optional.
|
||||
*/
|
||||
#define MA_NO_VORBIS /* Disable the built-in Vorbis decoder to ensure the libvorbis decoder is picked. */
|
||||
#define MA_NO_OPUS /* Disable the (not yet implemented) built-in Opus decoder to ensure the libopus decoder is picked. */
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
#include "../extras/miniaudio_libvorbis.h"
|
||||
#include "../extras/miniaudio_libopus.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
static ma_result ma_decoding_backend_init__libvorbis(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libvorbis* pVorbis;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pVorbis = (ma_libvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks);
|
||||
if (pVorbis == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libvorbis_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pVorbis);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pVorbis;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_init_file__libvorbis(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libvorbis* pVorbis;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pVorbis = (ma_libvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks);
|
||||
if (pVorbis == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libvorbis_init_file(pFilePath, pConfig, pAllocationCallbacks, pVorbis);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pVorbis;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static void ma_decoding_backend_uninit__libvorbis(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
ma_libvorbis* pVorbis = (ma_libvorbis*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
ma_libvorbis_uninit(pVorbis, pAllocationCallbacks);
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_get_channel_map__libvorbis(void* pUserData, ma_data_source* pBackend, ma_channel* pChannelMap, size_t channelMapCap)
|
||||
{
|
||||
ma_libvorbis* pVorbis = (ma_libvorbis*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
return ma_libvorbis_get_data_format(pVorbis, NULL, NULL, NULL, pChannelMap, channelMapCap);
|
||||
}
|
||||
|
||||
static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_libvorbis =
|
||||
{
|
||||
ma_decoding_backend_init__libvorbis,
|
||||
ma_decoding_backend_init_file__libvorbis,
|
||||
NULL, /* onInitFileW() */
|
||||
NULL, /* onInitMemory() */
|
||||
ma_decoding_backend_uninit__libvorbis
|
||||
};
|
||||
|
||||
|
||||
|
||||
static ma_result ma_decoding_backend_init__libopus(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libopus* pOpus;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pOpus = (ma_libopus*)ma_malloc(sizeof(*pOpus), pAllocationCallbacks);
|
||||
if (pOpus == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libopus_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pOpus);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pOpus;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_init_file__libopus(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libopus* pOpus;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pOpus = (ma_libopus*)ma_malloc(sizeof(*pOpus), pAllocationCallbacks);
|
||||
if (pOpus == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libopus_init_file(pFilePath, pConfig, pAllocationCallbacks, pOpus);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pOpus;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static void ma_decoding_backend_uninit__libopus(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
ma_libopus* pOpus = (ma_libopus*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
ma_libopus_uninit(pOpus, pAllocationCallbacks);
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_get_channel_map__libopus(void* pUserData, ma_data_source* pBackend, ma_channel* pChannelMap, size_t channelMapCap)
|
||||
{
|
||||
ma_libopus* pOpus = (ma_libopus*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
return ma_libopus_get_data_format(pOpus, NULL, NULL, NULL, pChannelMap, channelMapCap);
|
||||
}
|
||||
|
||||
static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_libopus =
|
||||
{
|
||||
ma_decoding_backend_init__libopus,
|
||||
ma_decoding_backend_init_file__libopus,
|
||||
NULL, /* onInitFileW() */
|
||||
NULL, /* onInitMemory() */
|
||||
ma_decoding_backend_uninit__libopus
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_data_source* pDataSource = (ma_data_source*)pDevice->pUserData;
|
||||
if (pDataSource == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ma_data_source_read_pcm_frames(pDataSource, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder_config decoderConfig;
|
||||
ma_decoder decoder;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_format format;
|
||||
ma_uint32 channels;
|
||||
ma_uint32 sampleRate;
|
||||
|
||||
/*
|
||||
Add your custom backend vtables here. The order in the array defines the order of priority. The
|
||||
vtables will be passed in via the decoder config.
|
||||
*/
|
||||
ma_decoding_backend_vtable* pCustomBackendVTables[] =
|
||||
{
|
||||
&g_ma_decoding_backend_vtable_libvorbis,
|
||||
&g_ma_decoding_backend_vtable_libopus
|
||||
};
|
||||
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Initialize the decoder. */
|
||||
decoderConfig = ma_decoder_config_init_default();
|
||||
decoderConfig.pCustomBackendUserData = NULL; /* In this example our backend objects are contained within a ma_decoder_ex object to avoid a malloc. Our vtables need to know about this. */
|
||||
decoderConfig.ppCustomBackendVTables = pCustomBackendVTables;
|
||||
decoderConfig.customBackendCount = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]);
|
||||
|
||||
result = ma_decoder_init_file(argv[1], &decoderConfig, &decoder);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize decoder.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ma_data_source_set_looping(&decoder, MA_TRUE);
|
||||
|
||||
|
||||
/* Initialize the device. */
|
||||
result = ma_data_source_get_data_format(&decoder, &format, &channels, &sampleRate, NULL, 0);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to retrieve decoder data format.");
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = format;
|
||||
deviceConfig.playback.channels = channels;
|
||||
deviceConfig.sampleRate = sampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &decoder;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
printf("Failed to open playback device.\n");
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
printf("Failed to start playback device.\n");
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
230
thirdparty/miniaudio/examples/custom_decoder_engine.c
vendored
Normal file
230
thirdparty/miniaudio/examples/custom_decoder_engine.c
vendored
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
Demonstrates how to implement a custom decoder and use it with the high level API.
|
||||
|
||||
This is the same as the custom_decoder example, only it's used with the high level engine API
|
||||
rather than the low level decoding API. You can use this to add support for Opus to your games, for
|
||||
example (via libopus).
|
||||
*/
|
||||
#define MA_NO_VORBIS /* Disable the built-in Vorbis decoder to ensure the libvorbis decoder is picked. */
|
||||
#define MA_NO_OPUS /* Disable the (not yet implemented) built-in Opus decoder to ensure the libopus decoder is picked. */
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
#include "../extras/miniaudio_libvorbis.h"
|
||||
#include "../extras/miniaudio_libopus.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
static ma_result ma_decoding_backend_init__libvorbis(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libvorbis* pVorbis;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pVorbis = (ma_libvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks);
|
||||
if (pVorbis == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libvorbis_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pVorbis);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pVorbis;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_init_file__libvorbis(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libvorbis* pVorbis;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pVorbis = (ma_libvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks);
|
||||
if (pVorbis == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libvorbis_init_file(pFilePath, pConfig, pAllocationCallbacks, pVorbis);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pVorbis;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static void ma_decoding_backend_uninit__libvorbis(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
ma_libvorbis* pVorbis = (ma_libvorbis*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
ma_libvorbis_uninit(pVorbis, pAllocationCallbacks);
|
||||
ma_free(pVorbis, pAllocationCallbacks);
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_get_channel_map__libvorbis(void* pUserData, ma_data_source* pBackend, ma_channel* pChannelMap, size_t channelMapCap)
|
||||
{
|
||||
ma_libvorbis* pVorbis = (ma_libvorbis*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
return ma_libvorbis_get_data_format(pVorbis, NULL, NULL, NULL, pChannelMap, channelMapCap);
|
||||
}
|
||||
|
||||
static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_libvorbis =
|
||||
{
|
||||
ma_decoding_backend_init__libvorbis,
|
||||
ma_decoding_backend_init_file__libvorbis,
|
||||
NULL, /* onInitFileW() */
|
||||
NULL, /* onInitMemory() */
|
||||
ma_decoding_backend_uninit__libvorbis
|
||||
};
|
||||
|
||||
|
||||
|
||||
static ma_result ma_decoding_backend_init__libopus(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libopus* pOpus;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pOpus = (ma_libopus*)ma_malloc(sizeof(*pOpus), pAllocationCallbacks);
|
||||
if (pOpus == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libopus_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pOpus);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pOpus;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_init_file__libopus(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend)
|
||||
{
|
||||
ma_result result;
|
||||
ma_libopus* pOpus;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
pOpus = (ma_libopus*)ma_malloc(sizeof(*pOpus), pAllocationCallbacks);
|
||||
if (pOpus == NULL) {
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
result = ma_libopus_init_file(pFilePath, pConfig, pAllocationCallbacks, pOpus);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
*ppBackend = pOpus;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
static void ma_decoding_backend_uninit__libopus(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
ma_libopus* pOpus = (ma_libopus*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
ma_libopus_uninit(pOpus, pAllocationCallbacks);
|
||||
ma_free(pOpus, pAllocationCallbacks);
|
||||
}
|
||||
|
||||
static ma_result ma_decoding_backend_get_channel_map__libopus(void* pUserData, ma_data_source* pBackend, ma_channel* pChannelMap, size_t channelMapCap)
|
||||
{
|
||||
ma_libopus* pOpus = (ma_libopus*)pBackend;
|
||||
|
||||
(void)pUserData;
|
||||
|
||||
return ma_libopus_get_data_format(pOpus, NULL, NULL, NULL, pChannelMap, channelMapCap);
|
||||
}
|
||||
|
||||
static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_libopus =
|
||||
{
|
||||
ma_decoding_backend_init__libopus,
|
||||
ma_decoding_backend_init_file__libopus,
|
||||
NULL, /* onInitFileW() */
|
||||
NULL, /* onInitMemory() */
|
||||
ma_decoding_backend_uninit__libopus
|
||||
};
|
||||
|
||||
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_resource_manager_config resourceManagerConfig;
|
||||
ma_resource_manager resourceManager;
|
||||
ma_engine_config engineConfig;
|
||||
ma_engine engine;
|
||||
|
||||
/*
|
||||
Add your custom backend vtables here. The order in the array defines the order of priority. The
|
||||
vtables will be passed in to the resource manager config.
|
||||
*/
|
||||
ma_decoding_backend_vtable* pCustomBackendVTables[] =
|
||||
{
|
||||
&g_ma_decoding_backend_vtable_libvorbis,
|
||||
&g_ma_decoding_backend_vtable_libopus
|
||||
};
|
||||
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Using custom decoding backends requires a resource manager. */
|
||||
resourceManagerConfig = ma_resource_manager_config_init();
|
||||
resourceManagerConfig.ppCustomDecodingBackendVTables = pCustomBackendVTables;
|
||||
resourceManagerConfig.customDecodingBackendCount = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]);
|
||||
resourceManagerConfig.pCustomDecodingBackendUserData = NULL; /* <-- This will be passed in to the pUserData parameter of each function in the decoding backend vtables. */
|
||||
|
||||
result = ma_resource_manager_init(&resourceManagerConfig, &resourceManager);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize resource manager.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Once we have a resource manager we can create the engine. */
|
||||
engineConfig = ma_engine_config_init();
|
||||
engineConfig.pResourceManager = &resourceManager;
|
||||
|
||||
result = ma_engine_init(&engineConfig, &engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Now we can play our sound. */
|
||||
result = ma_engine_play_sound(&engine, argv[1], NULL);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to play sound.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
return 0;
|
||||
}
|
||||
159
thirdparty/miniaudio/examples/data_source_chaining.c
vendored
Normal file
159
thirdparty/miniaudio/examples/data_source_chaining.c
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
Demonstrates one way to chain together a number of data sources so they play back seamlessly
|
||||
without gaps.
|
||||
|
||||
This example uses the chaining system built into the `ma_data_source` API. It will take every sound
|
||||
passed onto the command line in order, and then loop back and start again. When looping a chain of
|
||||
data sources, you need only link the last data source back to the first one.
|
||||
|
||||
To play a chain of data sources, you first need to set up your chain. To set the data source that
|
||||
should be played after another, you have two options:
|
||||
|
||||
* Set a pointer to a specific data source
|
||||
* Set a callback that will fire when the next data source needs to be retrieved
|
||||
|
||||
The first option is good for simple scenarios. The second option is useful if you need to perform
|
||||
some action when the end of a sound is reached. This example will be using both.
|
||||
|
||||
When reading data from a chain, you always read from the head data source. Internally miniaudio
|
||||
will track a pointer to the data source in the chain that is currently playing. If you don't
|
||||
consistently read from the head data source this state will become inconsistent and things won't
|
||||
work correctly. When using a chain, this pointer needs to be reset if you need to play the
|
||||
chain again from the start:
|
||||
|
||||
```c
|
||||
ma_data_source_set_current(&headDataSource, &headDataSource);
|
||||
ma_data_source_seek_to_pcm_frame(&headDataSource, 0);
|
||||
```
|
||||
|
||||
The code above is setting the "current" data source in the chain to the head data source, thereby
|
||||
starting the chain from the start again. It is also seeking the head data source back to the start
|
||||
so that playback starts from the start as expected. You do not need to seek non-head items back to
|
||||
the start as miniaudio will do that for you internally.
|
||||
*/
|
||||
#define MA_EXPERIMENTAL__DATA_LOOPING_AND_CHAINING
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
/*
|
||||
For simplicity, this example requires the device to use floating point samples.
|
||||
*/
|
||||
#define SAMPLE_FORMAT ma_format_f32
|
||||
#define CHANNEL_COUNT 2
|
||||
#define SAMPLE_RATE 48000
|
||||
|
||||
ma_uint32 g_decoderCount;
|
||||
ma_decoder* g_pDecoders;
|
||||
|
||||
static ma_data_source* next_callback_tail(ma_data_source* pDataSource)
|
||||
{
|
||||
MA_ASSERT(g_decoderCount > 0); /* <-- We check for this in main() so should never happen. */
|
||||
|
||||
/*
|
||||
This will be fired when the last item in the chain has reached the end. In this example we want
|
||||
to loop back to the start, so we need only return a pointer back to the head.
|
||||
*/
|
||||
return &g_pDecoders[0];
|
||||
}
|
||||
|
||||
static void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
/*
|
||||
We can just read from the first decoder and miniaudio will resolve the chain for us. Note that
|
||||
if you want to loop the chain, like we're doing in this example, you need to set the `loop`
|
||||
parameter to false, or else only the current data source will be looped.
|
||||
*/
|
||||
ma_data_source_read_pcm_frames(&g_pDecoders[0], pOutput, frameCount, NULL);
|
||||
|
||||
/* Unused in this example. */
|
||||
(void)pDevice;
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result = MA_SUCCESS;
|
||||
ma_uint32 iDecoder;
|
||||
ma_decoder_config decoderConfig;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input files.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_decoderCount = argc-1;
|
||||
g_pDecoders = (ma_decoder*)malloc(sizeof(*g_pDecoders) * g_decoderCount);
|
||||
|
||||
/* In this example, all decoders need to have the same output format. */
|
||||
decoderConfig = ma_decoder_config_init(SAMPLE_FORMAT, CHANNEL_COUNT, SAMPLE_RATE);
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
result = ma_decoder_init_file(argv[1+iDecoder], &decoderConfig, &g_pDecoders[iDecoder]);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_uint32 iDecoder2;
|
||||
for (iDecoder2 = 0; iDecoder2 < iDecoder; ++iDecoder2) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder2]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
|
||||
printf("Failed to load %s.\n", argv[1+iDecoder]);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
We're going to set up our decoders to run one after the other, but then have the last one loop back
|
||||
to the first one. For demonstration purposes we're going to use the callback method for the last
|
||||
data source.
|
||||
*/
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount-1; iDecoder += 1) {
|
||||
ma_data_source_set_next(&g_pDecoders[iDecoder], &g_pDecoders[iDecoder+1]);
|
||||
}
|
||||
|
||||
/*
|
||||
For the last data source we'll loop back to the start, but for demonstration purposes we'll use a
|
||||
callback to determine the next data source in the chain.
|
||||
*/
|
||||
ma_data_source_set_next_callback(&g_pDecoders[g_decoderCount-1], next_callback_tail);
|
||||
|
||||
|
||||
/*
|
||||
The data source chain has been established so now we can get the device up and running so we
|
||||
can listen to it.
|
||||
*/
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = SAMPLE_FORMAT;
|
||||
deviceConfig.playback.channels = CHANNEL_COUNT;
|
||||
deviceConfig.sampleRate = SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = NULL;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
printf("Failed to open playback device.\n");
|
||||
result = -1;
|
||||
goto done_decoders;
|
||||
}
|
||||
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
printf("Failed to start playback device.\n");
|
||||
result = -1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
done:
|
||||
ma_device_uninit(&device);
|
||||
|
||||
done_decoders:
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
|
||||
return 0;
|
||||
}
|
||||
148
thirdparty/miniaudio/examples/duplex_effect.c
vendored
Normal file
148
thirdparty/miniaudio/examples/duplex_effect.c
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
Demonstrates how to apply an effect to a duplex stream using the node graph system.
|
||||
|
||||
This example applies a vocoder effect to the input stream before outputting it. A custom node
|
||||
called `ma_vocoder_node` is used to achieve the effect which can be found in the extras folder in
|
||||
the miniaudio repository. The vocoder node uses https://github.com/blastbay/voclib to achieve the
|
||||
effect.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
#include "../extras/nodes/ma_vocoder_node/ma_vocoder_node.c"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32; /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 1 /* For this example, always set to 1. */
|
||||
|
||||
static ma_waveform g_sourceData; /* The underlying data source of the source node. */
|
||||
static ma_audio_buffer_ref g_exciteData; /* The underlying data source of the excite node. */
|
||||
static ma_data_source_node g_sourceNode; /* A data source node containing the source data we'll be sending through to the vocoder. This will be routed into the first bus of the vocoder node. */
|
||||
static ma_data_source_node g_exciteNode; /* A data source node containing the excite data we'll be sending through to the vocoder. This will be routed into the second bus of the vocoder node. */
|
||||
static ma_vocoder_node g_vocoderNode; /* The vocoder node. */
|
||||
static ma_node_graph g_nodeGraph;
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->capture.format == pDevice->playback.format);
|
||||
MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels);
|
||||
|
||||
/*
|
||||
The node graph system is a pulling style of API. At the lowest level of the chain will be a
|
||||
node acting as a data source for the purpose of delivering the initial audio data. In our case,
|
||||
the data source is our `pInput` buffer. We need to update the underlying data source so that it
|
||||
read data from `pInput`.
|
||||
*/
|
||||
ma_audio_buffer_ref_set_data(&g_exciteData, pInput, frameCount);
|
||||
|
||||
/* With the source buffer configured we can now read directly from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_vocoder_node_config vocoderNodeConfig;
|
||||
ma_data_source_node_config sourceNodeConfig;
|
||||
ma_data_source_node_config exciteNodeConfig;
|
||||
ma_waveform_config waveformConfig;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_duplex);
|
||||
deviceConfig.capture.pDeviceID = NULL;
|
||||
deviceConfig.capture.format = DEVICE_FORMAT;
|
||||
deviceConfig.capture.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.capture.shareMode = ma_share_mode_shared;
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Now we can setup our node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.capture.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Vocoder. Attached straight to the endpoint. */
|
||||
vocoderNodeConfig = ma_vocoder_node_config_init(device.capture.channels, device.sampleRate);
|
||||
|
||||
result = ma_vocoder_node_init(&g_nodeGraph, &vocoderNodeConfig, NULL, &g_vocoderNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize vocoder node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_vocoderNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
/* Amplify the volume of the vocoder output because in my testing it is a bit quiet. */
|
||||
ma_node_set_output_bus_volume(&g_vocoderNode, 0, 4);
|
||||
|
||||
|
||||
/* Source/carrier. Attached to input bus 0 of the vocoder node. */
|
||||
waveformConfig = ma_waveform_config_init(device.capture.format, device.capture.channels, device.sampleRate, ma_waveform_type_sawtooth, 1.0, 50);
|
||||
|
||||
result = ma_waveform_init(&waveformConfig, &g_sourceData);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize waveform for excite node.");
|
||||
goto done3;
|
||||
}
|
||||
|
||||
sourceNodeConfig = ma_data_source_node_config_init(&g_sourceData);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &sourceNodeConfig, NULL, &g_sourceNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize excite node.");
|
||||
goto done3;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_sourceNode, 0, &g_vocoderNode, 0);
|
||||
|
||||
|
||||
/* Excite/modulator. Attached to input bus 1 of the vocoder node. */
|
||||
result = ma_audio_buffer_ref_init(device.capture.format, device.capture.channels, NULL, 0, &g_exciteData);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio buffer for source.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
exciteNodeConfig = ma_data_source_node_config_init(&g_exciteData);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &exciteNodeConfig, NULL, &g_exciteNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize source node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_exciteNode, 0, &g_vocoderNode, 1);
|
||||
|
||||
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
/*done4:*/ ma_data_source_node_uninit(&g_exciteNode, NULL);
|
||||
done3: ma_data_source_node_uninit(&g_sourceNode, NULL);
|
||||
done2: ma_vocoder_node_uninit(&g_vocoderNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
251
thirdparty/miniaudio/examples/engine_advanced.c
vendored
Normal file
251
thirdparty/miniaudio/examples/engine_advanced.c
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
/*
|
||||
This example demonstrates some of the advanced features of the high level engine API.
|
||||
|
||||
The following features are demonstrated:
|
||||
|
||||
* Initialization of the engine from a pre-initialized device.
|
||||
* Self-managed resource managers.
|
||||
* Multiple engines with a shared resource manager.
|
||||
* Creation and management of `ma_sound` objects.
|
||||
|
||||
This example will play the sound that's passed in on the command line.
|
||||
|
||||
Using a shared resource manager, as we do in this example, is useful for when you want to user
|
||||
multiple engines so that you can output to multiple playback devices simultaneoulys. An example
|
||||
might be a local co-op multiplayer game where each player has their own headphones.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#define MAX_DEVICES 2
|
||||
#define MAX_SOUNDS 32
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
(void)pInput;
|
||||
|
||||
/*
|
||||
Since we're managing the underlying device ourselves, we need to read from the engine directly.
|
||||
To do this we need access to the `ma_engine` object which we passed in to the user data. One
|
||||
advantage of this is that you could do your own audio processing in addition to the engine's
|
||||
standard processing.
|
||||
*/
|
||||
ma_engine_read_pcm_frames((ma_engine*)pDevice->pUserData, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_context context;
|
||||
ma_resource_manager_config resourceManagerConfig;
|
||||
ma_resource_manager resourceManager;
|
||||
ma_engine engines[MAX_DEVICES];
|
||||
ma_device devices[MAX_DEVICES];
|
||||
ma_uint32 engineCount = 0;
|
||||
ma_uint32 iEngine;
|
||||
ma_device_info* pPlaybackDeviceInfos;
|
||||
ma_uint32 playbackDeviceCount;
|
||||
ma_uint32 iAvailableDevice;
|
||||
ma_uint32 iChosenDevice;
|
||||
ma_sound sounds[MAX_SOUNDS];
|
||||
ma_uint32 soundCount;
|
||||
ma_uint32 iSound;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We are going to be initializing multiple engines. In order to save on memory usage we can use a self managed
|
||||
resource manager so we can share a single resource manager across multiple engines.
|
||||
*/
|
||||
resourceManagerConfig = ma_resource_manager_config_init();
|
||||
resourceManagerConfig.decodedFormat = ma_format_f32; /* ma_format_f32 should almost always be used as that's what the engine (and most everything else) uses for mixing. */
|
||||
resourceManagerConfig.decodedChannels = 0; /* Setting the channel count to 0 will cause sounds to use their native channel count. */
|
||||
resourceManagerConfig.decodedSampleRate = 48000; /* Using a consistent sample rate is useful for avoiding expensive resampling in the audio thread. This will result in resampling being performed by the loading thread(s). */
|
||||
|
||||
result = ma_resource_manager_init(&resourceManagerConfig, &resourceManager);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize resource manager.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* We're going to want a context so we can enumerate our playback devices. */
|
||||
result = ma_context_init(NULL, 0, NULL, &context);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize context.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
Now that we have a context we will want to enumerate over each device so we can display them to the user and give
|
||||
them a chance to select the output devices they want to use.
|
||||
*/
|
||||
result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, NULL, NULL);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to enumerate playback devices.");
|
||||
ma_context_uninit(&context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* We have our devices, so now we want to get the user to select the devices they want to output to. */
|
||||
engineCount = 0;
|
||||
|
||||
for (iChosenDevice = 0; iChosenDevice < MAX_DEVICES; iChosenDevice += 1) {
|
||||
int c = 0;
|
||||
for (;;) {
|
||||
printf("Select playback device %d ([%d - %d], Q to quit):\n", iChosenDevice+1, 0, ma_min((int)playbackDeviceCount, 9));
|
||||
|
||||
for (iAvailableDevice = 0; iAvailableDevice < playbackDeviceCount; iAvailableDevice += 1) {
|
||||
printf(" %d: %s\n", iAvailableDevice, pPlaybackDeviceInfos[iAvailableDevice].name);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
c = getchar();
|
||||
if (c != '\n') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (c == 'q' || c == 'Q') {
|
||||
return 0; /* User aborted. */
|
||||
}
|
||||
|
||||
if (c >= '0' && c <= '9') {
|
||||
c -= '0';
|
||||
|
||||
if (c < (int)playbackDeviceCount) {
|
||||
ma_device_config deviceConfig;
|
||||
ma_engine_config engineConfig;
|
||||
|
||||
/*
|
||||
Create the device first before the engine. We'll specify the device in the engine's config. This is optional. When a device is
|
||||
not pre-initialized the engine will create one for you internally. The device does not need to be started here - the engine will
|
||||
do that for us in `ma_engine_start()`. The device's format is derived from the resource manager, but can be whatever you want.
|
||||
It's useful to keep the format consistent with the resource manager to avoid data conversions costs in the audio callback. In
|
||||
this example we're using the resource manager's sample format and sample rate, but leaving the channel count set to the device's
|
||||
native channels. You can use whatever format/channels/rate you like.
|
||||
*/
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.pDeviceID = &pPlaybackDeviceInfos[c].id;
|
||||
deviceConfig.playback.format = resourceManager.config.decodedFormat;
|
||||
deviceConfig.playback.channels = 0;
|
||||
deviceConfig.sampleRate = resourceManager.config.decodedSampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &engines[engineCount];
|
||||
|
||||
result = ma_device_init(&context, &deviceConfig, &devices[engineCount]);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize device for %s.\n", pPlaybackDeviceInfos[c].name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Now that we have the device we can initialize the engine. The device is passed into the engine's config. */
|
||||
engineConfig = ma_engine_config_init();
|
||||
engineConfig.pDevice = &devices[engineCount];
|
||||
engineConfig.pResourceManager = &resourceManager;
|
||||
engineConfig.noAutoStart = MA_TRUE; /* Don't start the engine by default - we'll do that manually below. */
|
||||
|
||||
result = ma_engine_init(&engineConfig, &engines[engineCount]);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize engine for %s.\n", pPlaybackDeviceInfos[c].name);
|
||||
ma_device_uninit(&devices[engineCount]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
engineCount += 1;
|
||||
break;
|
||||
} else {
|
||||
printf("Invalid device number.\n");
|
||||
}
|
||||
} else {
|
||||
printf("Invalid device number.\n");
|
||||
}
|
||||
}
|
||||
|
||||
printf("Device %d: %s\n", iChosenDevice+1, pPlaybackDeviceInfos[c].name);
|
||||
}
|
||||
|
||||
|
||||
/* We should now have our engine's initialized. We can now start them. */
|
||||
for (iEngine = 0; iEngine < engineCount; iEngine += 1) {
|
||||
result = ma_engine_start(&engines[iEngine]);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("WARNING: Failed to start engine %d.\n", iEngine);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
At this point our engine's are running and outputting nothing but silence. To get them playing something we'll need
|
||||
some sounds. In this example we're just using one sound per engine, but you can create as many as you like. Since
|
||||
we're using a shared resource manager, the sound data will only be loaded once. This is how you would implement
|
||||
multiple listeners.
|
||||
*/
|
||||
soundCount = 0;
|
||||
for (iEngine = 0; iEngine < engineCount; iEngine += 1) {
|
||||
/* Just one sound per engine in this example. We're going to be loading this asynchronously. */
|
||||
result = ma_sound_init_from_file(&engines[iEngine], argv[1], MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM, NULL, NULL, &sounds[iEngine]);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("WARNING: Failed to load sound \"%s\"", argv[1]);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
The sound can be started as soon as ma_sound_init_from_file() returns, even for sounds that are initialized
|
||||
with MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC. The sound will start playing while it's being loaded. Note that if the
|
||||
asynchronous loading process cannot keep up with the rate at which you try reading you'll end up glitching.
|
||||
If this is an issue, you need to not load sounds asynchronously.
|
||||
*/
|
||||
result = ma_sound_start(&sounds[iEngine]);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("WARNING: Failed to start sound.");
|
||||
}
|
||||
|
||||
soundCount += 1;
|
||||
}
|
||||
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
for (;;) {
|
||||
int c = getchar();
|
||||
if (c == '\n') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Teardown. */
|
||||
|
||||
/* The application owns the `ma_sound` object which means you're responsible for uninitializing them. */
|
||||
for (iSound = 0; iSound < soundCount; iSound += 1) {
|
||||
ma_sound_uninit(&sounds[iSound]);
|
||||
}
|
||||
|
||||
/* We can now uninitialize each engine. */
|
||||
for (iEngine = 0; iEngine < engineCount; iEngine += 1) {
|
||||
ma_engine_uninit(&engines[iEngine]);
|
||||
|
||||
/*
|
||||
The engine has been uninitialized so now lets uninitialize the device. Do this first to ensure we don't
|
||||
uninitialize the resource manager from under the device while the data callback is running.
|
||||
*/
|
||||
ma_device_uninit(&devices[iEngine]);
|
||||
}
|
||||
|
||||
/* The context can only be uninitialized after the devices. */
|
||||
ma_context_uninit(&context);
|
||||
|
||||
/*
|
||||
Do the resource manager last. This way we can guarantee the data callbacks of each device aren't trying to access
|
||||
and data managed by the resource manager.
|
||||
*/
|
||||
ma_resource_manager_uninit(&resourceManager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
104
thirdparty/miniaudio/examples/engine_effects.c
vendored
Normal file
104
thirdparty/miniaudio/examples/engine_effects.c
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
/*
|
||||
Demonstrates how to apply an effect to sounds using the high level engine API.
|
||||
|
||||
This example will load a file from the command line and apply an echo/delay effect to it. It will
|
||||
show you how to manage `ma_sound` objects and how to insert an effect into the graph.
|
||||
|
||||
The `ma_engine` object is a node graph and is compatible with the `ma_node_graph` API. The
|
||||
`ma_sound` object is a node within the node and is compatible with the `ma_node` API. This means
|
||||
that applying an effect is as simple as inserting an effect node into the graph and plugging in the
|
||||
sound's output into the effect's input. See the Node Graph example for how to use the node graph.
|
||||
|
||||
This example is playing only a single sound at a time which means only a single `ma_sound` object
|
||||
it being used. If you want to play multiple sounds at the same time, even if they're for the same
|
||||
sound file, you need multiple `ma_sound` objects.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#define DELAY_IN_SECONDS 0.2f
|
||||
#define DECAY 0.25f /* Volume falloff for each echo. */
|
||||
|
||||
static ma_engine g_engine;
|
||||
static ma_sound g_sound; /* This example will play only a single sound at once, so we only need one `ma_sound` object. */
|
||||
static ma_delay_node g_delayNode; /* The echo effect is achieved using a delay node. */
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* The engine needs to be initialized first. */
|
||||
result = ma_engine_init(NULL, &g_engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We'll build our graph starting from the end so initialize the delay node now. The output of
|
||||
this node will be connected straight to the output. You could also attach it to a sound group
|
||||
or any other node that accepts an input.
|
||||
|
||||
Creating a node requires a pointer to the node graph that owns it. The engine itself is a node
|
||||
graph. In the code below we can get a pointer to the node graph with `ma_engine_get_node_graph()`
|
||||
or we could simple cast the engine to a ma_node_graph* like so:
|
||||
|
||||
(ma_node_graph*)&g_engine
|
||||
|
||||
The endpoint of the graph can be retrieved with `ma_engine_get_endpoint()`.
|
||||
*/
|
||||
{
|
||||
ma_delay_node_config delayNodeConfig;
|
||||
ma_uint32 channels;
|
||||
ma_uint32 sampleRate;
|
||||
|
||||
channels = ma_engine_get_channels(&g_engine);
|
||||
sampleRate = ma_engine_get_sample_rate(&g_engine);
|
||||
|
||||
delayNodeConfig = ma_delay_node_config_init(channels, sampleRate, (ma_uint32)(sampleRate * DELAY_IN_SECONDS), DECAY);
|
||||
|
||||
result = ma_delay_node_init(ma_engine_get_node_graph(&g_engine), &delayNodeConfig, NULL, &g_delayNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize delay node.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect the output of the delay node to the input of the endpoint. */
|
||||
ma_node_attach_output_bus(&g_delayNode, 0, ma_engine_get_endpoint(&g_engine), 0);
|
||||
}
|
||||
|
||||
|
||||
/* Now we can load the sound and connect it to the delay node. */
|
||||
{
|
||||
result = ma_sound_init_from_file(&g_engine, argv[1], 0, NULL, NULL, &g_sound);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize sound \"%s\".", argv[1]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect the output of the sound to the input of the effect. */
|
||||
ma_node_attach_output_bus(&g_sound, 0, &g_delayNode, 0);
|
||||
|
||||
/*
|
||||
Start the sound after it's applied to the sound. Otherwise there could be a scenario where
|
||||
the very first part of it is read before the attachment to the effect is made.
|
||||
*/
|
||||
ma_sound_start(&g_sound);
|
||||
}
|
||||
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
ma_sound_uninit(&g_sound);
|
||||
ma_delay_node_uninit(&g_delayNode, NULL);
|
||||
ma_engine_uninit(&g_engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
35
thirdparty/miniaudio/examples/engine_hello_world.c
vendored
Normal file
35
thirdparty/miniaudio/examples/engine_hello_world.c
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
This example demonstrates how to initialize an audio engine and play a sound.
|
||||
|
||||
This will play the sound specified on the command line.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_engine engine;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = ma_engine_init(NULL, &engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ma_engine_play_sound(&engine, argv[1], NULL);
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
ma_engine_uninit(&engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
134
thirdparty/miniaudio/examples/engine_sdl.c
vendored
Normal file
134
thirdparty/miniaudio/examples/engine_sdl.c
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
Shows how to use the high level engine API with SDL.
|
||||
|
||||
By default, miniaudio's engine API will initialize a device internally for audio output. You can
|
||||
instead use the engine independently of a device. To show this off, this example will use SDL for
|
||||
audio output instead of miniaudio.
|
||||
|
||||
This example will load the sound specified on the command line and rotate it around the listener's
|
||||
head.
|
||||
*/
|
||||
#define MA_NO_DEVICE_IO /* <-- Disables the `ma_device` API. We don't need that in this example since SDL will be doing that part for us. */
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#define SDL_MAIN_HANDLED
|
||||
#include <SDL.h> /* Change this to your include location. Might be <SDL2/SDL.h>. */
|
||||
|
||||
#define CHANNELS 2 /* Must be stereo for this example. */
|
||||
#define SAMPLE_RATE 48000
|
||||
|
||||
static ma_engine g_engine;
|
||||
static ma_sound g_sound; /* This example will play only a single sound at once, so we only need one `ma_sound` object. */
|
||||
|
||||
void data_callback(void* pUserData, ma_uint8* pBuffer, int bufferSizeInBytes)
|
||||
{
|
||||
/* Reading is just a matter of reading straight from the engine. */
|
||||
ma_uint32 bufferSizeInFrames = (ma_uint32)bufferSizeInBytes / ma_get_bytes_per_frame(ma_format_f32, ma_engine_get_channels(&g_engine));
|
||||
ma_engine_read_pcm_frames(&g_engine, pBuffer, bufferSizeInFrames, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_engine_config engineConfig;
|
||||
SDL_AudioSpec desiredSpec;
|
||||
SDL_AudioSpec obtainedSpec;
|
||||
SDL_AudioDeviceID deviceID;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
We'll initialize the engine first for the purpose of the example, but since the engine and SDL
|
||||
are independent of each other you can initialize them in any order. You need only make sure the
|
||||
channel count and sample rates are consistent between the two.
|
||||
|
||||
When initializing the engine it's important to make sure we don't initialize a device
|
||||
internally because we want SDL to be dealing with that for us instead.
|
||||
*/
|
||||
engineConfig = ma_engine_config_init();
|
||||
engineConfig.noDevice = MA_TRUE; /* <-- Make sure this is set so that no device is created (we'll deal with that ourselves). */
|
||||
engineConfig.channels = CHANNELS;
|
||||
engineConfig.sampleRate = SAMPLE_RATE;
|
||||
|
||||
result = ma_engine_init(&engineConfig, &g_engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Now load our sound. */
|
||||
result = ma_sound_init_from_file(&g_engine, argv[1], 0, NULL, NULL, &g_sound);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize sound.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Loop the sound so we can continuously hear it. */
|
||||
ma_sound_set_looping(&g_sound, MA_TRUE);
|
||||
|
||||
/*
|
||||
The sound will not be started by default, so start it now. We won't hear anything until the SDL
|
||||
audio device has been opened and started.
|
||||
*/
|
||||
ma_sound_start(&g_sound);
|
||||
|
||||
|
||||
/*
|
||||
Now that we have the engine and sound we can initialize SDL. This could have also been done
|
||||
first before the engine and sound.
|
||||
*/
|
||||
if (SDL_InitSubSystem(SDL_INIT_AUDIO) != 0) {
|
||||
printf("Failed to initialize SDL sub-system.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(&desiredSpec);
|
||||
desiredSpec.freq = ma_engine_get_sample_rate(&g_engine);
|
||||
desiredSpec.format = AUDIO_F32;
|
||||
desiredSpec.channels = ma_engine_get_channels(&g_engine);
|
||||
desiredSpec.samples = 512;
|
||||
desiredSpec.callback = data_callback;
|
||||
desiredSpec.userdata = NULL;
|
||||
|
||||
deviceID = SDL_OpenAudioDevice(NULL, 0, &desiredSpec, &obtainedSpec, SDL_AUDIO_ALLOW_ANY_CHANGE);
|
||||
if (deviceID == 0) {
|
||||
printf("Failed to open SDL audio device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Start playback. */
|
||||
SDL_PauseAudioDevice(deviceID, 0);
|
||||
|
||||
#if 1
|
||||
{
|
||||
/* We'll move the sound around the listener which we'll leave at the origin. */
|
||||
float stepAngle = 0.002f;
|
||||
float angle = 0;
|
||||
float distance = 2;
|
||||
|
||||
for (;;) {
|
||||
double x = ma_cosd(angle) - ma_sind(angle);
|
||||
double y = ma_sind(angle) + ma_cosd(angle);
|
||||
|
||||
ma_sound_set_position(&g_sound, (float)x * distance, 0, (float)y * distance);
|
||||
|
||||
angle += stepAngle;
|
||||
ma_sleep(1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_sound_uninit(&g_sound);
|
||||
ma_engine_uninit(&g_engine);
|
||||
SDL_CloseAudioDevice(deviceID);
|
||||
SDL_QuitSubSystem(SDL_INIT_AUDIO);
|
||||
|
||||
return 0;
|
||||
}
|
||||
432
thirdparty/miniaudio/examples/engine_steamaudio.c
vendored
Normal file
432
thirdparty/miniaudio/examples/engine_steamaudio.c
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
/*
|
||||
Demonstrates integration of Steam Audio with miniaudio's engine API.
|
||||
|
||||
In this example a HRTF effect from Steam Audio will be applied. To do this a custom node will be
|
||||
implemented which uses Steam Audio's IPLBinauralEffect and IPLHRTF objects.
|
||||
|
||||
By implementing this as a node, it can be plugged into any position within the graph. The output
|
||||
channel count of this node is always stereo.
|
||||
|
||||
Steam Audio requires fixed sized processing, the size of which must be specified at initialization
|
||||
time of the IPLBinauralEffect and IPLHRTF objects. This creates a problem because the node graph
|
||||
will at times need to break down processing into smaller chunks for it's internal processing. The
|
||||
node graph internally will read into a temporary buffer which is then mixed into the final output
|
||||
buffer. This temporary buffer is allocated on the stack and is a fixed size. However, variability
|
||||
comes into play because the channel count of the node is variable. It's not safe to just blindly
|
||||
process the effect with the frame count specified in miniaudio's node processing callback. Doing so
|
||||
results in glitching. To work around this, this example is just setting the update size to a known
|
||||
value that works (256). If it's set to something too big it'll exceed miniaudio's processing size
|
||||
used by the node graph. Alternatively you could use some kind of intermediary cache which
|
||||
accumulates input data until enough is available and then do the processing. Ideally, Steam Audio
|
||||
would support variable sized updates which would avoid this whole mess entirely.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <phonon.h> /* Steam Audio */
|
||||
#include <stdint.h> /* Required for uint32_t which is used by STEAMAUDIO_VERSION. That dependency needs to be removed from Steam Audio - use IPLuint32 or "unsigned int" instead! */
|
||||
|
||||
#define FORMAT ma_format_f32 /* Must be floating point. */
|
||||
#define CHANNELS 2 /* Must be stereo for this example. */
|
||||
#define SAMPLE_RATE 48000
|
||||
|
||||
|
||||
static ma_result ma_result_from_IPLerror(IPLerror error)
|
||||
{
|
||||
switch (error)
|
||||
{
|
||||
case IPL_STATUS_SUCCESS: return MA_SUCCESS;
|
||||
case IPL_STATUS_OUTOFMEMORY: return MA_OUT_OF_MEMORY;
|
||||
case IPL_STATUS_INITIALIZATION:
|
||||
case IPL_STATUS_FAILURE:
|
||||
default: return MA_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channelsIn;
|
||||
IPLAudioSettings iplAudioSettings;
|
||||
IPLContext iplContext;
|
||||
IPLHRTF iplHRTF; /* There is one HRTF object to many binaural effect objects. */
|
||||
} ma_steamaudio_binaural_node_config;
|
||||
|
||||
MA_API ma_steamaudio_binaural_node_config ma_steamaudio_binaural_node_config_init(ma_uint32 channelsIn, IPLAudioSettings iplAudioSettings, IPLContext iplContext, IPLHRTF iplHRTF);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
IPLAudioSettings iplAudioSettings;
|
||||
IPLContext iplContext;
|
||||
IPLHRTF iplHRTF;
|
||||
IPLBinauralEffect iplEffect;
|
||||
ma_vec3f direction;
|
||||
float* ppBuffersIn[2]; /* Each buffer is an offset of _pHeap. */
|
||||
float* ppBuffersOut[2]; /* Each buffer is an offset of _pHeap. */
|
||||
void* _pHeap;
|
||||
} ma_steamaudio_binaural_node;
|
||||
|
||||
MA_API ma_result ma_steamaudio_binaural_node_init(ma_node_graph* pNodeGraph, const ma_steamaudio_binaural_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_steamaudio_binaural_node* pBinauralNode);
|
||||
MA_API void ma_steamaudio_binaural_node_uninit(ma_steamaudio_binaural_node* pBinauralNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
MA_API ma_result ma_steamaudio_binaural_node_set_direction(ma_steamaudio_binaural_node* pBinauralNode, float x, float y, float z);
|
||||
|
||||
|
||||
MA_API ma_steamaudio_binaural_node_config ma_steamaudio_binaural_node_config_init(ma_uint32 channelsIn, IPLAudioSettings iplAudioSettings, IPLContext iplContext, IPLHRTF iplHRTF)
|
||||
{
|
||||
ma_steamaudio_binaural_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init();
|
||||
config.channelsIn = channelsIn;
|
||||
config.iplAudioSettings = iplAudioSettings;
|
||||
config.iplContext = iplContext;
|
||||
config.iplHRTF = iplHRTF;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_steamaudio_binaural_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_steamaudio_binaural_node* pBinauralNode = (ma_steamaudio_binaural_node*)pNode;
|
||||
IPLBinauralEffectParams binauralParams;
|
||||
IPLAudioBuffer inputBufferDesc;
|
||||
IPLAudioBuffer outputBufferDesc;
|
||||
ma_uint32 totalFramesToProcess = *pFrameCountOut;
|
||||
ma_uint32 totalFramesProcessed = 0;
|
||||
|
||||
binauralParams.direction.x = pBinauralNode->direction.x;
|
||||
binauralParams.direction.y = pBinauralNode->direction.y;
|
||||
binauralParams.direction.z = pBinauralNode->direction.z;
|
||||
binauralParams.interpolation = IPL_HRTFINTERPOLATION_NEAREST;
|
||||
binauralParams.spatialBlend = 1.0f;
|
||||
binauralParams.hrtf = pBinauralNode->iplHRTF;
|
||||
|
||||
inputBufferDesc.numChannels = (IPLint32)ma_node_get_input_channels(pNode, 0);
|
||||
|
||||
/* We'll run this in a loop just in case our deinterleaved buffers are too small. */
|
||||
outputBufferDesc.numSamples = pBinauralNode->iplAudioSettings.frameSize;
|
||||
outputBufferDesc.numChannels = 2;
|
||||
outputBufferDesc.data = pBinauralNode->ppBuffersOut;
|
||||
|
||||
while (totalFramesProcessed < totalFramesToProcess) {
|
||||
ma_uint32 framesToProcessThisIteration = totalFramesToProcess - totalFramesProcessed;
|
||||
if (framesToProcessThisIteration > (ma_uint32)pBinauralNode->iplAudioSettings.frameSize) {
|
||||
framesToProcessThisIteration = (ma_uint32)pBinauralNode->iplAudioSettings.frameSize;
|
||||
}
|
||||
|
||||
if (inputBufferDesc.numChannels == 1) {
|
||||
/* Fast path. No need for deinterleaving since it's a mono stream. */
|
||||
pBinauralNode->ppBuffersIn[0] = (float*)ma_offset_pcm_frames_const_ptr_f32(ppFramesIn[0], totalFramesProcessed, 1);
|
||||
} else {
|
||||
/* Slow path. Need to deinterleave the input data. */
|
||||
ma_deinterleave_pcm_frames(ma_format_f32, inputBufferDesc.numChannels, framesToProcessThisIteration, ma_offset_pcm_frames_const_ptr_f32(ppFramesIn[0], totalFramesProcessed, inputBufferDesc.numChannels), pBinauralNode->ppBuffersIn);
|
||||
}
|
||||
|
||||
inputBufferDesc.data = pBinauralNode->ppBuffersIn;
|
||||
inputBufferDesc.numSamples = (IPLint32)framesToProcessThisIteration;
|
||||
|
||||
/* Apply the effect. */
|
||||
iplBinauralEffectApply(pBinauralNode->iplEffect, &binauralParams, &inputBufferDesc, &outputBufferDesc);
|
||||
|
||||
/* Interleave straight into the output buffer. */
|
||||
ma_interleave_pcm_frames(ma_format_f32, 2, framesToProcessThisIteration, pBinauralNode->ppBuffersOut, ma_offset_pcm_frames_ptr_f32(ppFramesOut[0], totalFramesProcessed, 2));
|
||||
|
||||
/* Advance. */
|
||||
totalFramesProcessed += framesToProcessThisIteration;
|
||||
}
|
||||
|
||||
(void)pFrameCountIn; /* Unused. */
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_steamaudio_binaural_node_vtable =
|
||||
{
|
||||
ma_steamaudio_binaural_node_process_pcm_frames,
|
||||
NULL,
|
||||
1, /* 1 input channel. */
|
||||
1, /* 1 output channel. */
|
||||
0
|
||||
};
|
||||
|
||||
MA_API ma_result ma_steamaudio_binaural_node_init(ma_node_graph* pNodeGraph, const ma_steamaudio_binaural_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_steamaudio_binaural_node* pBinauralNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
ma_uint32 channelsIn;
|
||||
ma_uint32 channelsOut;
|
||||
IPLBinauralEffectSettings iplBinauralEffectSettings;
|
||||
size_t heapSizeInBytes;
|
||||
|
||||
if (pBinauralNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pBinauralNode);
|
||||
|
||||
if (pConfig == NULL || pConfig->iplAudioSettings.frameSize == 0 || pConfig->iplContext == NULL || pConfig->iplHRTF == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
/* Steam Audio only supports mono and stereo input. */
|
||||
if (pConfig->channelsIn < 1 || pConfig->channelsIn > 2) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
channelsIn = pConfig->channelsIn;
|
||||
channelsOut = 2; /* Always stereo output. */
|
||||
|
||||
baseConfig = ma_node_config_init();
|
||||
baseConfig.vtable = &g_ma_steamaudio_binaural_node_vtable;
|
||||
baseConfig.pInputChannels = &channelsIn;
|
||||
baseConfig.pOutputChannels = &channelsOut;
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pBinauralNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
pBinauralNode->iplAudioSettings = pConfig->iplAudioSettings;
|
||||
pBinauralNode->iplContext = pConfig->iplContext;
|
||||
pBinauralNode->iplHRTF = pConfig->iplHRTF;
|
||||
|
||||
MA_ZERO_OBJECT(&iplBinauralEffectSettings);
|
||||
iplBinauralEffectSettings.hrtf = pBinauralNode->iplHRTF;
|
||||
|
||||
result = ma_result_from_IPLerror(iplBinauralEffectCreate(pBinauralNode->iplContext, &pBinauralNode->iplAudioSettings, &iplBinauralEffectSettings, &pBinauralNode->iplEffect));
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_node_uninit(&pBinauralNode->baseNode, pAllocationCallbacks);
|
||||
return result;
|
||||
}
|
||||
|
||||
heapSizeInBytes = 0;
|
||||
|
||||
/*
|
||||
Unfortunately Steam Audio uses deinterleaved buffers for everything so we'll need to use some
|
||||
intermediary buffers. We'll allocate one big buffer on the heap and then use offsets. We'll
|
||||
use the frame size from the IPLAudioSettings structure as a basis for the size of the buffer.
|
||||
*/
|
||||
heapSizeInBytes += sizeof(float) * channelsOut * pBinauralNode->iplAudioSettings.frameSize; /* Output buffer. */
|
||||
heapSizeInBytes += sizeof(float) * channelsIn * pBinauralNode->iplAudioSettings.frameSize; /* Input buffer. */
|
||||
|
||||
pBinauralNode->_pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks);
|
||||
if (pBinauralNode->_pHeap == NULL) {
|
||||
iplBinauralEffectRelease(&pBinauralNode->iplEffect);
|
||||
ma_node_uninit(&pBinauralNode->baseNode, pAllocationCallbacks);
|
||||
return MA_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pBinauralNode->ppBuffersOut[0] = (float*)pBinauralNode->_pHeap;
|
||||
pBinauralNode->ppBuffersOut[1] = (float*)ma_offset_ptr(pBinauralNode->_pHeap, sizeof(float) * pBinauralNode->iplAudioSettings.frameSize);
|
||||
|
||||
{
|
||||
ma_uint32 iChannelIn;
|
||||
for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) {
|
||||
pBinauralNode->ppBuffersIn[iChannelIn] = (float*)ma_offset_ptr(pBinauralNode->_pHeap, sizeof(float) * pBinauralNode->iplAudioSettings.frameSize * (channelsOut + iChannelIn));
|
||||
}
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_steamaudio_binaural_node_uninit(ma_steamaudio_binaural_node* pBinauralNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
if (pBinauralNode == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* The base node is always uninitialized first. */
|
||||
ma_node_uninit(&pBinauralNode->baseNode, pAllocationCallbacks);
|
||||
|
||||
/*
|
||||
The Steam Audio objects are deleted after the base node. This ensures the base node is removed from the graph
|
||||
first to ensure these objects aren't getting used by the audio thread.
|
||||
*/
|
||||
iplBinauralEffectRelease(&pBinauralNode->iplEffect);
|
||||
ma_free(pBinauralNode->_pHeap, pAllocationCallbacks);
|
||||
}
|
||||
|
||||
MA_API ma_result ma_steamaudio_binaural_node_set_direction(ma_steamaudio_binaural_node* pBinauralNode, float x, float y, float z)
|
||||
{
|
||||
if (pBinauralNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
pBinauralNode->direction.x = x;
|
||||
pBinauralNode->direction.y = y;
|
||||
pBinauralNode->direction.z = z;
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static ma_engine g_engine;
|
||||
static ma_sound g_sound; /* This example will play only a single sound at once, so we only need one `ma_sound` object. */
|
||||
static ma_steamaudio_binaural_node g_binauralNode; /* The echo effect is achieved using a delay node. */
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_engine_config engineConfig;
|
||||
IPLAudioSettings iplAudioSettings;
|
||||
IPLContextSettings iplContextSettings;
|
||||
IPLContext iplContext;
|
||||
IPLHRTFSettings iplHRTFSettings;
|
||||
IPLHRTF iplHRTF;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* The engine needs to be initialized first. */
|
||||
engineConfig = ma_engine_config_init();
|
||||
engineConfig.channels = CHANNELS;
|
||||
engineConfig.sampleRate = SAMPLE_RATE;
|
||||
engineConfig.periodSizeInFrames = 256;
|
||||
|
||||
result = ma_engine_init(&engineConfig, &g_engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
Now that we have the engine we can initialize the Steam Audio objects.
|
||||
*/
|
||||
MA_ZERO_OBJECT(&iplAudioSettings);
|
||||
iplAudioSettings.samplingRate = ma_engine_get_sample_rate(&g_engine);
|
||||
|
||||
/*
|
||||
If there's any Steam Audio developers reading this, why is the frame size needed? This needs to
|
||||
be documented. If this is for some kind of buffer management with FFT or something, then this
|
||||
need not be exposed to the public API. There should be no need for the public API to require a
|
||||
fixed sized update.
|
||||
*/
|
||||
iplAudioSettings.frameSize = engineConfig.periodSizeInFrames;
|
||||
|
||||
|
||||
/* IPLContext */
|
||||
MA_ZERO_OBJECT(&iplContextSettings);
|
||||
iplContextSettings.version = STEAMAUDIO_VERSION;
|
||||
|
||||
result = ma_result_from_IPLerror(iplContextCreate(&iplContextSettings, &iplContext));
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_engine_uninit(&g_engine);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* IPLHRTF */
|
||||
MA_ZERO_OBJECT(&iplHRTFSettings);
|
||||
iplHRTFSettings.type = IPL_HRTFTYPE_DEFAULT;
|
||||
|
||||
result = ma_result_from_IPLerror(iplHRTFCreate(iplContext, &iplAudioSettings, &iplHRTFSettings, &iplHRTF));
|
||||
if (result != MA_SUCCESS) {
|
||||
iplContextRelease(&iplContext);
|
||||
ma_engine_uninit(&g_engine);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
The binaural node will need to know the input channel count of the sound so we'll need to load
|
||||
the sound first. We'll initialize this such that it'll be initially detached from the graph.
|
||||
It will be attached to the graph after the binaural node is initialized.
|
||||
*/
|
||||
{
|
||||
ma_sound_config soundConfig;
|
||||
|
||||
soundConfig = ma_sound_config_init();
|
||||
soundConfig.pFilePath = argv[1];
|
||||
soundConfig.flags = MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT; /* We'll attach this to the graph later. */
|
||||
|
||||
result = ma_sound_init_ex(&g_engine, &soundConfig, &g_sound);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/* We'll let the Steam Audio binaural effect do the directional attenuation for us. */
|
||||
ma_sound_set_directional_attenuation_factor(&g_sound, 0);
|
||||
|
||||
/* Loop the sound so we can get a continuous sound. */
|
||||
ma_sound_set_looping(&g_sound, MA_TRUE);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We'll build our graph starting from the end so initialize the binaural node now. The output of
|
||||
this node will be connected straight to the output. You could also attach it to a sound group
|
||||
or any other node that accepts an input.
|
||||
|
||||
Creating a node requires a pointer to the node graph that owns it. The engine itself is a node
|
||||
graph. In the code below we can get a pointer to the node graph with `ma_engine_get_node_graph()`
|
||||
or we could simple cast the engine to a ma_node_graph* like so:
|
||||
|
||||
(ma_node_graph*)&g_engine
|
||||
|
||||
The endpoint of the graph can be retrieved with `ma_engine_get_endpoint()`.
|
||||
*/
|
||||
{
|
||||
ma_steamaudio_binaural_node_config binauralNodeConfig;
|
||||
|
||||
/*
|
||||
For this example we're just using the engine's channel count, but a more optimal solution
|
||||
might be to set this to mono if the source data is also mono.
|
||||
*/
|
||||
binauralNodeConfig = ma_steamaudio_binaural_node_config_init(CHANNELS, iplAudioSettings, iplContext, iplHRTF);
|
||||
|
||||
result = ma_steamaudio_binaural_node_init(ma_engine_get_node_graph(&g_engine), &binauralNodeConfig, NULL, &g_binauralNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize binaural node.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect the output of the delay node to the input of the endpoint. */
|
||||
ma_node_attach_output_bus(&g_binauralNode, 0, ma_engine_get_endpoint(&g_engine), 0);
|
||||
}
|
||||
|
||||
|
||||
/* We can now wire up the sound to the binaural node and start it. */
|
||||
ma_node_attach_output_bus(&g_sound, 0, &g_binauralNode, 0);
|
||||
ma_sound_start(&g_sound);
|
||||
|
||||
#if 1
|
||||
{
|
||||
/*
|
||||
We'll move the sound around the listener which we'll leave at the origin. We'll then get
|
||||
the direction to the listener and update the binaural node appropriately.
|
||||
*/
|
||||
float stepAngle = 0.002f;
|
||||
float angle = 0;
|
||||
float distance = 2;
|
||||
|
||||
for (;;) {
|
||||
double x = ma_cosd(angle) - ma_sind(angle);
|
||||
double y = ma_sind(angle) + ma_cosd(angle);
|
||||
ma_vec3f direction;
|
||||
|
||||
ma_sound_set_position(&g_sound, (float)x * distance, 0, (float)y * distance);
|
||||
direction = ma_sound_get_direction_to_listener(&g_sound);
|
||||
|
||||
/* Update the direction of the sound. */
|
||||
ma_steamaudio_binaural_node_set_direction(&g_binauralNode, direction.x, direction.y, direction.z);
|
||||
angle += stepAngle;
|
||||
|
||||
ma_sleep(1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_sound_uninit(&g_sound);
|
||||
ma_steamaudio_binaural_node_uninit(&g_binauralNode, NULL);
|
||||
ma_engine_uninit(&g_engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
148
thirdparty/miniaudio/examples/hilo_interop.c
vendored
Normal file
148
thirdparty/miniaudio/examples/hilo_interop.c
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
Demonstrates interop between the high-level and the low-level API.
|
||||
|
||||
In this example we are using `ma_device` (the low-level API) to capture data from the microphone
|
||||
which we then play back through the engine as a sound. We use a ring buffer to act as the data
|
||||
source for the sound.
|
||||
|
||||
This is just a very basic example to show the general idea on how this might be achieved. In
|
||||
this example a ring buffer is being used as the intermediary data source, but you can use anything
|
||||
that works best for your situation. So long as the data is captured from the microphone, and then
|
||||
delivered to the sound (via a data source), you should be good to go.
|
||||
|
||||
A more robust example would probably not want to use a ring buffer directly as the data source.
|
||||
Instead you would probably want to do a custom data source that handles underruns and overruns of
|
||||
the ring buffer and deals with desyncs between capture and playback. In the future this example
|
||||
may be updated to make use of a more advanced data source that handles all of this.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
static ma_pcm_rb rb;
|
||||
static ma_device device;
|
||||
static ma_engine engine;
|
||||
static ma_sound sound; /* The sound will be the playback of the capture side. */
|
||||
|
||||
void capture_data_callback(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
|
||||
{
|
||||
ma_result result;
|
||||
ma_uint32 framesWritten;
|
||||
|
||||
/* We need to write to the ring buffer. Need to do this in a loop. */
|
||||
framesWritten = 0;
|
||||
while (framesWritten < frameCount) {
|
||||
void* pMappedBuffer;
|
||||
ma_uint32 framesToWrite = frameCount - framesWritten;
|
||||
|
||||
result = ma_pcm_rb_acquire_write(&rb, &framesToWrite, &pMappedBuffer);
|
||||
if (result != MA_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (framesToWrite == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Copy the data from the capture buffer to the ring buffer. */
|
||||
ma_copy_pcm_frames(pMappedBuffer, ma_offset_pcm_frames_const_ptr_f32(pFramesIn, framesWritten, pDevice->capture.channels), framesToWrite, pDevice->capture.format, pDevice->capture.channels);
|
||||
|
||||
result = ma_pcm_rb_commit_write(&rb, framesToWrite);
|
||||
if (result != MA_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
framesWritten += framesToWrite;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
|
||||
/*
|
||||
The first thing we'll do is set up the capture side. There are two parts to this. The first is
|
||||
the device itself, and the other is the ring buffer. It doesn't matter what order we initialize
|
||||
these in, so long as the ring buffer is created before the device is started so that the
|
||||
callback can be guaranteed to have a valid destination. We'll initialize the device first, and
|
||||
then use the format, channels and sample rate to initialize the ring buffer.
|
||||
|
||||
It's important that the sample format of the device is set to f32 because that's what the engine
|
||||
uses internally.
|
||||
*/
|
||||
|
||||
/* Initialize the capture device. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_capture);
|
||||
deviceConfig.capture.format = ma_format_f32;
|
||||
deviceConfig.dataCallback = capture_data_callback;
|
||||
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize capture device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Initialize the ring buffer. */
|
||||
result = ma_pcm_rb_init(device.capture.format, device.capture.channels, device.capture.internalPeriodSizeInFrames * 5, NULL, NULL, &rb);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize the ring buffer.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
Ring buffers don't require a sample rate for their normal operation, but we can associate it
|
||||
with a sample rate. We'll want to do this so the engine can resample if necessary.
|
||||
*/
|
||||
ma_pcm_rb_set_sample_rate(&rb, device.sampleRate);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
At this point the capture side is set up and we can now set up the playback side. Here we are
|
||||
using `ma_engine` and linking the captured data to a sound so it can be manipulated just like
|
||||
any other sound in the world.
|
||||
|
||||
Note that we have not yet started the capture device. Since the captured data is tied to a
|
||||
sound, we'll link the starting and stopping of the capture device to the starting and stopping
|
||||
of the sound.
|
||||
*/
|
||||
|
||||
/* We'll get the engine up and running before we start the capture device. */
|
||||
result = ma_engine_init(NULL, &engine);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize the engine.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
We can now create our sound. This is created from a data source, which in this example is a
|
||||
ring buffer. The capture side will be writing data into the ring buffer, whereas the sound
|
||||
will be reading from it.
|
||||
*/
|
||||
result = ma_sound_init_from_data_source(&engine, &rb, 0, NULL, &sound);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize the sound.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Make sure the sound is set to looping or else it'll stop if the ring buffer runs out of data. */
|
||||
ma_sound_set_looping(&sound, MA_TRUE);
|
||||
|
||||
/* Link the starting of the device and sound together. */
|
||||
ma_device_start(&device);
|
||||
ma_sound_start(&sound);
|
||||
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
ma_sound_uninit(&sound);
|
||||
ma_engine_uninit(&engine);
|
||||
ma_device_uninit(&device);
|
||||
ma_pcm_rb_uninit(&rb);
|
||||
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
250
thirdparty/miniaudio/examples/node_graph.c
vendored
Normal file
250
thirdparty/miniaudio/examples/node_graph.c
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
This example shows how to use the node graph system.
|
||||
|
||||
The node graph system can be used for doing complex mixing and effect processing. The idea is that
|
||||
you have a number of nodes that are connected to each other to form a graph. At the end of the
|
||||
graph is an endpoint which all nodes eventually connect to.
|
||||
|
||||
A node is used to do some kind of processing on zero or more input streams and produce one or more
|
||||
output streams. Each node can have a number of inputs and outputs. Each of these is called a bus in
|
||||
miniaudio. Some nodes, particularly data source nodes, have no inputs and instead generate their
|
||||
outputs dynamically. All nodes will have at least one output or else it'll be disconnected from the
|
||||
graph and will never get processed. Each output bus of a node will be connected to an input bus of
|
||||
another node, but they don't all need to connect to the same input node. For example, a splitter
|
||||
node has 1 input bus and 2 output buses and is used to duplicate a signal. You could then branch
|
||||
off and have one output bus connected to one input node and the other connected to a different
|
||||
input node, and then have two different effects process for each of the duplicated branches.
|
||||
|
||||
Any number of output buses can be connected to an input bus in which case the output buses will be
|
||||
mixed before processing by the input node. This is how you would achieve the mixing part of the
|
||||
node graph.
|
||||
|
||||
This example will be using the following node graph set up:
|
||||
|
||||
```
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
|
||||
+---------------+ +-----------------+
|
||||
| Data Source 1 =----+ +----------+ +----= Low Pass Filter =----+
|
||||
+---------------+ | | =----+ +-----------------+ | +----------+
|
||||
+----= Splitter | +----= ENDPOINT |
|
||||
+---------------+ | | =----+ +-----------------+ | +----------+
|
||||
| Data Source 2 =----+ +----------+ +----= Echo / Delay =----+
|
||||
+---------------+ +-----------------+
|
||||
```
|
||||
|
||||
This does not represent a realistic real-world scenario, but it demonstrates how to make use of
|
||||
mixing, multiple outputs and multiple effects.
|
||||
|
||||
The data source nodes are connected to the input of the splitter. They'll be mixed before being
|
||||
processed by the splitter. The splitter has two output buses. In the graph above, one bus will be
|
||||
routed to a low pass filter, whereas the other bus will be routed to an echo effect. Then, the
|
||||
outputs of these two effects will be connected to the input bus of the endpoint. Because both of
|
||||
the outputs are connected to the same input bus, they'll be mixed at that point.
|
||||
|
||||
The two data sources at the start of the graph have no inputs. They'll instead generate their
|
||||
output by reading from a data source. The data source in this case will be one `ma_decoder` for
|
||||
each input file specified on the command line.
|
||||
|
||||
You can also control the volume of an output bus. In this example, we set the volumes of the low
|
||||
pass and echo effects so that one of them becomes more obvious than the other.
|
||||
|
||||
When you want to read from the graph, you simply call `ma_node_graph_read_pcm_frames()`.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
/* Data Format */
|
||||
#define FORMAT ma_format_f32 /* Must always be f32. */
|
||||
#define CHANNELS 2
|
||||
#define SAMPLE_RATE 48000
|
||||
|
||||
/* Effect Properties */
|
||||
#define LPF_BIAS 0.9f /* Higher values means more bias towards the low pass filter (the low pass filter will be more audible). Lower values means more bias towards the echo. Must be between 0 and 1. */
|
||||
#define LPF_CUTOFF_FACTOR 80 /* High values = more filter. */
|
||||
#define LPF_ORDER 8
|
||||
#define DELAY_IN_SECONDS 0.2f
|
||||
#define DECAY 0.5f /* Volume falloff for each echo. */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_data_source_node node; /* If you make this the first member, you can pass a pointer to this struct into any `ma_node_*` API and it will "Just Work". */
|
||||
ma_decoder decoder;
|
||||
} sound_node;
|
||||
|
||||
static ma_node_graph g_nodeGraph;
|
||||
static ma_lpf_node g_lpfNode;
|
||||
static ma_delay_node g_delayNode;
|
||||
static ma_splitter_node g_splitterNode;
|
||||
static sound_node* g_pSoundNodes;
|
||||
static int g_soundNodeCount;
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->playback.channels == CHANNELS);
|
||||
|
||||
/*
|
||||
Hearing the output of the node graph is as easy as reading straight into the output buffer. You just need to
|
||||
make sure you use a consistent data format or else you'll need to do your own conversion.
|
||||
*/
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput; /* Unused. */
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int iarg;
|
||||
ma_result result;
|
||||
|
||||
/* We'll set up our nodes starting from the end and working our way back to the start. We'll need to set up the graph first. */
|
||||
{
|
||||
ma_node_graph_config nodeGraphConfig = ma_node_graph_config_init(CHANNELS);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("ERROR: Failed to initialize node graph.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Low Pass Filter. */
|
||||
{
|
||||
ma_lpf_node_config lpfNodeConfig = ma_lpf_node_config_init(CHANNELS, SAMPLE_RATE, SAMPLE_RATE / LPF_CUTOFF_FACTOR, LPF_ORDER);
|
||||
|
||||
result = ma_lpf_node_init(&g_nodeGraph, &lpfNodeConfig, NULL, &g_lpfNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("ERROR: Failed to initialize low pass filter node.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect the output bus of the low pass filter node to the input bus of the endpoint. */
|
||||
ma_node_attach_output_bus(&g_lpfNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
/* Set the volume of the low pass filter to make it more of less impactful. */
|
||||
ma_node_set_output_bus_volume(&g_lpfNode, 0, LPF_BIAS);
|
||||
}
|
||||
|
||||
|
||||
/* Echo / Delay. */
|
||||
{
|
||||
ma_delay_node_config delayNodeConfig = ma_delay_node_config_init(CHANNELS, SAMPLE_RATE, (ma_uint32)(SAMPLE_RATE * DELAY_IN_SECONDS), DECAY);
|
||||
|
||||
result = ma_delay_node_init(&g_nodeGraph, &delayNodeConfig, NULL, &g_delayNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("ERROR: Failed to initialize delay node.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect the output bus of the delay node to the input bus of the endpoint. */
|
||||
ma_node_attach_output_bus(&g_delayNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
/* Set the volume of the delay filter to make it more of less impactful. */
|
||||
ma_node_set_output_bus_volume(&g_delayNode, 0, 1 - LPF_BIAS);
|
||||
}
|
||||
|
||||
|
||||
/* Splitter. */
|
||||
{
|
||||
ma_splitter_node_config splitterNodeConfig = ma_splitter_node_config_init(CHANNELS);
|
||||
|
||||
result = ma_splitter_node_init(&g_nodeGraph, &splitterNodeConfig, NULL, &g_splitterNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("ERROR: Failed to initialize splitter node.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Connect output bus 0 to the input bus of the low pass filter node, and output bus 1 to the input bus of the delay node. */
|
||||
ma_node_attach_output_bus(&g_splitterNode, 0, &g_lpfNode, 0);
|
||||
ma_node_attach_output_bus(&g_splitterNode, 1, &g_delayNode, 0);
|
||||
}
|
||||
|
||||
|
||||
/* Data sources. Ignore any that cannot be loaded. */
|
||||
g_pSoundNodes = (sound_node*)ma_malloc(sizeof(*g_pSoundNodes) * argc-1, NULL);
|
||||
if (g_pSoundNodes == NULL) {
|
||||
printf("Failed to allocate memory for sounds.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_soundNodeCount = 0;
|
||||
for (iarg = 1; iarg < argc; iarg += 1) {
|
||||
ma_decoder_config decoderConfig = ma_decoder_config_init(FORMAT, CHANNELS, SAMPLE_RATE);
|
||||
|
||||
result = ma_decoder_init_file(argv[iarg], &decoderConfig, &g_pSoundNodes[g_soundNodeCount].decoder);
|
||||
if (result == MA_SUCCESS) {
|
||||
ma_data_source_node_config dataSourceNodeConfig = ma_data_source_node_config_init(&g_pSoundNodes[g_soundNodeCount].decoder);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &dataSourceNodeConfig, NULL, &g_pSoundNodes[g_soundNodeCount].node);
|
||||
if (result == MA_SUCCESS) {
|
||||
/* The data source node has been created successfully. Attach it to the splitter. */
|
||||
ma_node_attach_output_bus(&g_pSoundNodes[g_soundNodeCount].node, 0, &g_splitterNode, 0);
|
||||
g_soundNodeCount += 1;
|
||||
} else {
|
||||
printf("WARNING: Failed to init data source node for sound \"%s\". Ignoring.", argv[iarg]);
|
||||
ma_decoder_uninit(&g_pSoundNodes[g_soundNodeCount].decoder);
|
||||
}
|
||||
} else {
|
||||
printf("WARNING: Failed to load sound \"%s\". Ignoring.", argv[iarg]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Everything has been initialized successfully so now we can set up a playback device so we can listen to the result. */
|
||||
{
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = FORMAT;
|
||||
deviceConfig.playback.channels = CHANNELS;
|
||||
deviceConfig.sampleRate = SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = NULL;
|
||||
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("ERROR: Failed to initialize device.");
|
||||
goto cleanup_graph;
|
||||
}
|
||||
|
||||
result = ma_device_start(&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
goto cleanup_graph;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* We're done. Clean up the device. */
|
||||
ma_device_uninit(&device);
|
||||
}
|
||||
|
||||
|
||||
cleanup_graph:
|
||||
{
|
||||
/* It's good practice to tear down the graph from the lowest level nodes first. */
|
||||
int iSound;
|
||||
|
||||
/* Sounds. */
|
||||
for (iSound = 0; iSound < g_soundNodeCount; iSound += 1) {
|
||||
ma_data_source_node_uninit(&g_pSoundNodes[iSound].node, NULL);
|
||||
ma_decoder_uninit(&g_pSoundNodes[iSound].decoder);
|
||||
}
|
||||
|
||||
/* Splitter. */
|
||||
ma_splitter_node_uninit(&g_splitterNode, NULL);
|
||||
|
||||
/* Echo / Delay */
|
||||
ma_delay_node_uninit(&g_delayNode, NULL);
|
||||
|
||||
/* Low Pass Filter */
|
||||
ma_lpf_node_uninit(&g_lpfNode, NULL);
|
||||
|
||||
/* Node Graph */
|
||||
ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
151
thirdparty/miniaudio/examples/resource_manager.c
vendored
Normal file
151
thirdparty/miniaudio/examples/resource_manager.c
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
Demonstrates how you can use the resource manager to manage loaded sounds.
|
||||
|
||||
This example loads the first sound specified on the command line via the resource manager and then plays it using the
|
||||
low level API.
|
||||
|
||||
You can control whether or not you want to load the sound asynchronously and whether or not you want to store the data
|
||||
in-memory or stream it. When storing the sound in-memory you can also control whether or not it is decoded. To do this,
|
||||
specify a combination of the following options in `ma_resource_manager_data_source_init()`:
|
||||
|
||||
* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC - Load asynchronously.
|
||||
* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE - Store the sound in-memory in uncompressed/decoded format.
|
||||
* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM - Stream the sound from disk rather than storing entirely in memory. Useful for music.
|
||||
|
||||
The object returned by the resource manager is just a standard data source which means it can be plugged into any of
|
||||
`ma_data_source_*()` APIs just like any other data source and it should just work.
|
||||
|
||||
Internally, there's a background thread that's used to process jobs and enable asynchronicity. By default there is only
|
||||
a single job thread, but this can be configured in the resource manager config. You can also implement your own threads
|
||||
for processing jobs. That is more advanced, and beyond the scope of this example.
|
||||
|
||||
When you initialize a resource manager you can specify the sample format, channels and sample rate to use when reading
|
||||
data from the data source. This means the resource manager will ensure all sounds will have a standard format. When not
|
||||
set, each sound will have their own formats and you'll need to do the necessary data conversion yourself.
|
||||
*/
|
||||
#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
#include <emscripten.h>
|
||||
|
||||
void main_loop__em(void* pUserData)
|
||||
{
|
||||
ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData;
|
||||
MA_ASSERT(pResourceManager != NULL);
|
||||
|
||||
/*
|
||||
The Emscripten build does not support threading which means we need to process jobs manually. If
|
||||
there are no jobs needing to be processed this will return immediately with MA_NO_DATA_AVAILABLE.
|
||||
*/
|
||||
ma_resource_manager_process_next_job(pResourceManager);
|
||||
}
|
||||
#endif
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_data_source_read_pcm_frames((ma_data_source*)pDevice->pUserData, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_resource_manager_config resourceManagerConfig;
|
||||
ma_resource_manager resourceManager;
|
||||
ma_resource_manager_data_source dataSource;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* We'll initialize the device first. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &dataSource; /* <-- We'll be reading from this in the data callback. */
|
||||
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We have the device so now we want to initialize the resource manager. We'll use the resource manager to load a
|
||||
sound based on the command line.
|
||||
*/
|
||||
resourceManagerConfig = ma_resource_manager_config_init();
|
||||
resourceManagerConfig.decodedFormat = device.playback.format;
|
||||
resourceManagerConfig.decodedChannels = device.playback.channels;
|
||||
resourceManagerConfig.decodedSampleRate = device.sampleRate;
|
||||
|
||||
/*
|
||||
We're not supporting threading with Emscripten so go ahead and disable threading. It's important
|
||||
that we set the appropriate flag and also the job thread count to 0.
|
||||
*/
|
||||
#ifdef __EMSCRIPTEN__
|
||||
resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING;
|
||||
resourceManagerConfig.jobThreadCount = 0;
|
||||
#endif
|
||||
|
||||
result = ma_resource_manager_init(&resourceManagerConfig, &resourceManager);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to initialize the resource manager.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Now that we have a resource manager we can load a sound. */
|
||||
result = ma_resource_manager_data_source_init(
|
||||
&resourceManager,
|
||||
argv[1],
|
||||
MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM,
|
||||
NULL, /* Async notification. */
|
||||
&dataSource);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to load sound \"%s\".", argv[1]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* In this example we'll enable looping. */
|
||||
ma_data_source_set_looping(&dataSource, MA_TRUE);
|
||||
|
||||
|
||||
/* Now that we have a sound we can start the device. */
|
||||
result = ma_device_start(&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to start device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
emscripten_set_main_loop_arg(main_loop__em, &resourceManager, 0, 1);
|
||||
#else
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
/* Teardown. */
|
||||
|
||||
/* Uninitialize the device first to ensure the data callback is stopped and doesn't try to access any data. */
|
||||
ma_device_uninit(&device);
|
||||
|
||||
/*
|
||||
Before uninitializing the resource manager we need to uninitialize every data source. The data source is owned by
|
||||
the caller which means you're responsible for uninitializing it.
|
||||
*/
|
||||
ma_resource_manager_data_source_uninit(&dataSource);
|
||||
|
||||
/* Uninitialize the resource manager after each data source. */
|
||||
ma_resource_manager_uninit(&resourceManager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
329
thirdparty/miniaudio/examples/resource_manager_advanced.c
vendored
Normal file
329
thirdparty/miniaudio/examples/resource_manager_advanced.c
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
/*
|
||||
Demonstrates how you can use the resource manager to manage loaded sounds.
|
||||
|
||||
The resource manager can be used to create a data source whose resources are managed internally by miniaudio. The data
|
||||
sources can then be read just like any other data source such as decoders and audio buffers.
|
||||
|
||||
In this example we use the resource manager independently of the `ma_engine` API so that we can demonstrate how it can
|
||||
be used by itself without getting it confused with `ma_engine`.
|
||||
|
||||
The main feature of the resource manager is the ability to decode and stream audio data asynchronously. Asynchronicity
|
||||
is achieved with a job system. The resource manager will issue jobs which are processed by a configurable number of job
|
||||
threads. You can also implement your own custom job threads which this example also demonstrates.
|
||||
|
||||
In this example we show how you can create a data source, mix them with other data sources, configure the number of job
|
||||
threads to manage internally and how to implement your own custom job thread.
|
||||
*/
|
||||
#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
static ma_resource_manager_data_source g_dataSources[16];
|
||||
static ma_uint32 g_dataSourceCount;
|
||||
|
||||
|
||||
/*
|
||||
TODO: Consider putting these public functions in miniaudio.h. Will depend on ma_mix_pcm_frames_f32()
|
||||
being merged into miniaudio.h (it's currently in miniaudio_engine.h).
|
||||
*/
|
||||
static ma_result ma_data_source_read_pcm_frames_f32_ex(ma_data_source* pDataSource, float* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead, ma_format dataSourceFormat, ma_uint32 dataSourceChannels)
|
||||
{
|
||||
/*
|
||||
This function is intended to be used when the format and channel count of the data source is
|
||||
known beforehand. The idea is to avoid overhead due to redundant calls to ma_data_source_get_data_format().
|
||||
*/
|
||||
MA_ASSERT(pDataSource != NULL);
|
||||
|
||||
if (dataSourceFormat == ma_format_f32) {
|
||||
/* Fast path. No conversion necessary. */
|
||||
return ma_data_source_read_pcm_frames(pDataSource, pFramesOut, frameCount, pFramesRead);
|
||||
} else {
|
||||
/* Slow path. Conversion necessary. */
|
||||
ma_result result;
|
||||
ma_uint64 totalFramesRead;
|
||||
ma_uint8 temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
|
||||
ma_uint64 tempCapInFrames = sizeof(temp) / ma_get_bytes_per_frame(dataSourceFormat, dataSourceChannels);
|
||||
|
||||
totalFramesRead = 0;
|
||||
while (totalFramesRead < frameCount) {
|
||||
ma_uint64 framesJustRead;
|
||||
ma_uint64 framesToRead = frameCount - totalFramesRead;
|
||||
if (framesToRead > tempCapInFrames) {
|
||||
framesToRead = tempCapInFrames;
|
||||
}
|
||||
|
||||
result = ma_data_source_read_pcm_frames(pDataSource, pFramesOut, framesToRead, &framesJustRead);
|
||||
|
||||
ma_convert_pcm_frames_format(ma_offset_pcm_frames_ptr_f32(pFramesOut, totalFramesRead, dataSourceChannels), ma_format_f32, temp, dataSourceFormat, framesJustRead, dataSourceChannels, ma_dither_mode_none);
|
||||
totalFramesRead += framesJustRead;
|
||||
|
||||
if (result != MA_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
MA_API ma_result ma_data_source_read_pcm_frames_f32(ma_data_source* pDataSource, float* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead)
|
||||
{
|
||||
ma_result result;
|
||||
ma_format format;
|
||||
ma_uint32 channels;
|
||||
|
||||
result = ma_data_source_get_data_format(pDataSource, &format, &channels, NULL, NULL, 0);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result; /* Failed to retrieve the data format of the data source. */
|
||||
}
|
||||
|
||||
return ma_data_source_read_pcm_frames_f32_ex(pDataSource, pFramesOut, frameCount, pFramesRead, format, channels);
|
||||
}
|
||||
|
||||
MA_API ma_result ma_data_source_read_pcm_frames_and_mix_f32(ma_data_source* pDataSource, float* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead, float volume)
|
||||
{
|
||||
ma_result result;
|
||||
ma_format format;
|
||||
ma_uint32 channels;
|
||||
ma_uint64 totalFramesRead;
|
||||
|
||||
if (pFramesRead != NULL) {
|
||||
*pFramesRead = 0;
|
||||
}
|
||||
|
||||
if (pDataSource == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
result = ma_data_source_get_data_format(pDataSource, &format, &channels, NULL, NULL, 0);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result; /* Failed to retrieve the data format of the data source. */
|
||||
}
|
||||
|
||||
totalFramesRead = 0;
|
||||
while (totalFramesRead < frameCount) {
|
||||
float temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE/sizeof(float)];
|
||||
ma_uint64 tempCapInFrames = ma_countof(temp) / channels;
|
||||
ma_uint64 framesJustRead;
|
||||
ma_uint64 framesToRead = frameCount - totalFramesRead;
|
||||
if (framesToRead > tempCapInFrames) {
|
||||
framesToRead = tempCapInFrames;
|
||||
}
|
||||
|
||||
result = ma_data_source_read_pcm_frames_f32_ex(pDataSource, temp, framesToRead, &framesJustRead, format, channels);
|
||||
|
||||
ma_mix_pcm_frames_f32(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, ma_format_f32, channels), temp, framesJustRead, channels, volume);
|
||||
totalFramesRead += framesJustRead;
|
||||
|
||||
if (result != MA_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pFramesRead != NULL) {
|
||||
*pFramesRead = totalFramesRead;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
/*
|
||||
In this example we're just going to play our data sources layered on top of each other. This
|
||||
assumes the device's format is f32 and that the buffer is not pre-silenced.
|
||||
*/
|
||||
ma_uint32 iDataSource;
|
||||
|
||||
MA_ASSERT(pDevice->playback.format == ma_format_f32);
|
||||
|
||||
(void)pInput; /* Unused. */
|
||||
|
||||
/*
|
||||
If the device was configured with noPreSilencedOutputBuffer then you would need to silence the
|
||||
buffer here, or make sure the first data source to be mixed is copied rather than mixed.
|
||||
*/
|
||||
/*ma_silence_pcm_frames(pOutput, frameCount, ma_format_f32, pDevice->playback.channels);*/
|
||||
|
||||
/* For each sound, mix as much data as we can. */
|
||||
for (iDataSource = 0; iDataSource < g_dataSourceCount; iDataSource += 1) {
|
||||
ma_data_source_read_pcm_frames_and_mix_f32(&g_dataSources[iDataSource], (float*)pOutput, frameCount, NULL, /* volume = */1);
|
||||
}
|
||||
}
|
||||
|
||||
static ma_thread_result MA_THREADCALL custom_job_thread(void* pUserData)
|
||||
{
|
||||
ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData;
|
||||
MA_ASSERT(pResourceManager != NULL);
|
||||
|
||||
for (;;) {
|
||||
ma_result result;
|
||||
ma_resource_manager_job job;
|
||||
|
||||
/*
|
||||
Retrieve a job from the queue first. This defines what it is you're about to do. By default this will be
|
||||
blocking. You can initialize the resource manager with MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING to not block in
|
||||
which case MA_NO_DATA_AVAILABLE will be returned if no jobs are available.
|
||||
|
||||
When the quit job is returned (MA_RESOURCE_MANAGER_JOB_QUIT), the return value will always be MA_CANCELLED. If you don't want
|
||||
to check the return value (you should), you can instead check if the job code is MA_RESOURCE_MANAGER_JOB_QUIT and use that
|
||||
instead.
|
||||
*/
|
||||
result = ma_resource_manager_next_job(pResourceManager, &job);
|
||||
if (result != MA_SUCCESS) {
|
||||
if (result == MA_CANCELLED) {
|
||||
printf("CUSTOM JOB THREAD TERMINATING VIA MA_CANCELLED... ");
|
||||
} else {
|
||||
printf("CUSTOM JOB THREAD ERROR: %s. TERMINATING... ", ma_result_description(result));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
Terminate if we got a quit message. You don't need to terminate like this, but's a bit more robust. You can
|
||||
just use a global variable or something similar if it's easier for your particular situation. The quit job
|
||||
remains in the queue and will continue to be returned by future calls to ma_resource_manager_next_job(). The
|
||||
reason for this is to give every job thread visibility to the quit job so they have a chance to exit.
|
||||
|
||||
We won't actually be hitting this code because the call above will return MA_CANCELLED when the MA_RESOURCE_MANAGER_JOB_QUIT
|
||||
event is received which means the `result != MA_SUCCESS` logic above will catch it. If you do not check the
|
||||
return value of ma_resource_manager_next_job() you will want to check for MA_RESOURCE_MANAGER_JOB_QUIT like the code below.
|
||||
*/
|
||||
if (job.toc.breakup.code == MA_RESOURCE_MANAGER_JOB_QUIT) {
|
||||
printf("CUSTOM JOB THREAD TERMINATING VIA MA_RESOURCE_MANAGER_JOB_QUIT... ");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Call ma_resource_manager_process_job() to actually do the work to process the job. */
|
||||
printf("PROCESSING IN CUSTOM JOB THREAD: %d\n", job.toc.breakup.code);
|
||||
ma_resource_manager_process_job(pResourceManager, &job);
|
||||
}
|
||||
|
||||
printf("TERMINATED\n");
|
||||
return (ma_thread_result)0;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_resource_manager_config resourceManagerConfig;
|
||||
ma_resource_manager resourceManager;
|
||||
ma_thread jobThread;
|
||||
int iFile;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = ma_format_f32;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = NULL;
|
||||
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* We can start the device before loading any sounds. We'll just end up outputting silence. */
|
||||
result = ma_device_start(&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to start device.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We have the device so now we want to initialize the resource manager. We'll use the resource manager to load some
|
||||
sounds based on the command line.
|
||||
*/
|
||||
resourceManagerConfig = ma_resource_manager_config_init();
|
||||
|
||||
/*
|
||||
We'll set a standard decoding format to save us to processing time at mixing time. If you're wanting to use
|
||||
spatialization with your decoded sounds, you may want to consider leaving this as 0 to ensure the file's native
|
||||
channel count is used so you can do proper spatialization.
|
||||
*/
|
||||
resourceManagerConfig.decodedFormat = device.playback.format;
|
||||
resourceManagerConfig.decodedChannels = device.playback.channels;
|
||||
resourceManagerConfig.decodedSampleRate = device.sampleRate;
|
||||
|
||||
/* The number of job threads to be managed internally. Set this to 0 if you want to self-manage your job threads */
|
||||
resourceManagerConfig.jobThreadCount = 4;
|
||||
|
||||
result = ma_resource_manager_init(&resourceManagerConfig, &resourceManager);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to initialize the resource manager.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
Now that we have a resource manager we can set up our custom job thread. This is optional. Normally when doing
|
||||
self-managed job threads you would set the internal job thread count to zero. We're doing both internal and
|
||||
self-managed job threads in this example just for demonstration purposes.
|
||||
*/
|
||||
ma_thread_create(&jobThread, ma_thread_priority_default, 0, custom_job_thread, &resourceManager, NULL);
|
||||
|
||||
/* Create each data source from the resource manager. Note that the caller is the owner. */
|
||||
for (iFile = 0; iFile < ma_countof(g_dataSources) && iFile < argc-1; iFile += 1) {
|
||||
result = ma_resource_manager_data_source_init(
|
||||
&resourceManager,
|
||||
argv[iFile+1],
|
||||
MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC /*| MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM*/,
|
||||
NULL, /* Async notification. */
|
||||
&g_dataSources[iFile]);
|
||||
|
||||
if (result != MA_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Use looping in this example. */
|
||||
ma_data_source_set_looping(&g_dataSources[iFile], MA_TRUE);
|
||||
|
||||
g_dataSourceCount += 1;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
|
||||
/* Teardown. */
|
||||
|
||||
/*
|
||||
Uninitialize the device first to ensure the data callback is stopped and doesn't try to access
|
||||
any data.
|
||||
*/
|
||||
ma_device_uninit(&device);
|
||||
|
||||
/*
|
||||
Our data sources need to be explicitly uninitialized. ma_resource_manager_uninit() will not do
|
||||
it for us. This needs to be done before posting the quit event and uninitializing the resource
|
||||
manager or else we'll get stuck in a deadlock because ma_resource_manager_data_source_uninit()
|
||||
will be waiting for the job thread(s) to finish work, which will never happen because they were
|
||||
just terminated.
|
||||
*/
|
||||
for (iFile = 0; (size_t)iFile < g_dataSourceCount; iFile += 1) {
|
||||
ma_resource_manager_data_source_uninit(&g_dataSources[iFile]);
|
||||
}
|
||||
|
||||
/*
|
||||
Before uninitializing the resource manager we need to make sure a quit event has been posted to
|
||||
ensure we can get out of our custom thread. The call to ma_resource_manager_uninit() will also
|
||||
do this, but we need to call it explicitly so that our self-managed thread can exit naturally.
|
||||
You only need to post a quit job if you're using that as the exit indicator. You can instead
|
||||
use whatever variable you want to terminate your job thread, but since this example is using a
|
||||
quit job we need to post one. Note that you don't need to do this if you're not managing your
|
||||
own threads - ma_resource_manager_uninit() alone will suffice in that case.
|
||||
*/
|
||||
ma_resource_manager_post_job_quit(&resourceManager);
|
||||
ma_thread_wait(&jobThread); /* Wait for the custom job thread to finish so it doesn't try to access any data. */
|
||||
|
||||
/* Uninitialize the resource manager after each data source. */
|
||||
ma_resource_manager_uninit(&resourceManager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
74
thirdparty/miniaudio/examples/simple_capture.c
vendored
Normal file
74
thirdparty/miniaudio/examples/simple_capture.c
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Demonstrates how to capture data from a microphone using the low-level API.
|
||||
|
||||
This example simply captures data from your default microphone until you press Enter. The output is saved to the file
|
||||
specified on the command line.
|
||||
|
||||
Capturing works in a very similar way to playback. The only difference is the direction of data movement. Instead of
|
||||
the application sending data to the device, the device will send data to the application. This example just writes the
|
||||
data received by the microphone straight to a WAV file.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData;
|
||||
MA_ASSERT(pEncoder != NULL);
|
||||
|
||||
ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL);
|
||||
|
||||
(void)pOutput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_encoder_config encoderConfig;
|
||||
ma_encoder encoder;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No output file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
encoderConfig = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 2, 44100);
|
||||
|
||||
if (ma_encoder_init_file(argv[1], &encoderConfig, &encoder) != MA_SUCCESS) {
|
||||
printf("Failed to initialize output file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_capture);
|
||||
deviceConfig.capture.format = encoder.config.format;
|
||||
deviceConfig.capture.channels = encoder.config.channels;
|
||||
deviceConfig.sampleRate = encoder.config.sampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &encoder;
|
||||
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize capture device.\n");
|
||||
return -2;
|
||||
}
|
||||
|
||||
result = ma_device_start(&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to start device.\n");
|
||||
return -3;
|
||||
}
|
||||
|
||||
printf("Press Enter to stop recording...\n");
|
||||
getchar();
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_encoder_uninit(&encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
71
thirdparty/miniaudio/examples/simple_duplex.c
vendored
Normal file
71
thirdparty/miniaudio/examples/simple_duplex.c
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Demonstrates duplex mode which is where data is captured from a microphone and then output to a speaker device.
|
||||
|
||||
This example captures audio from the default microphone and then outputs it straight to the default playback device
|
||||
without any kind of modification. If you wanted to, you could also apply filters and effects to the input stream
|
||||
before outputting to the playback device.
|
||||
|
||||
Note that the microphone and playback device must run in lockstep. Any kind of timing deviation will result in audible
|
||||
glitching which the backend may not be able to recover from. For this reason, miniaudio forces you to use the same
|
||||
sample rate for both capture and playback. If internally the native sample rates differ, miniaudio will perform the
|
||||
sample rate conversion for you automatically.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
void main_loop__em()
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->capture.format == pDevice->playback.format);
|
||||
MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels);
|
||||
|
||||
/* In this example the format and channel count are the same for both input and output which means we can just memcpy(). */
|
||||
MA_COPY_MEMORY(pOutput, pInput, frameCount * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_duplex);
|
||||
deviceConfig.capture.pDeviceID = NULL;
|
||||
deviceConfig.capture.format = ma_format_s16;
|
||||
deviceConfig.capture.channels = 2;
|
||||
deviceConfig.capture.shareMode = ma_share_mode_shared;
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = ma_format_s16;
|
||||
deviceConfig.playback.channels = 2;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_device_start(&device);
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
emscripten_set_main_loop(main_loop__em, 0, 1);
|
||||
#else
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
54
thirdparty/miniaudio/examples/simple_enumeration.c
vendored
Normal file
54
thirdparty/miniaudio/examples/simple_enumeration.c
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
Demonstrates how to enumerate over devices.
|
||||
|
||||
Device enumaration requires a `ma_context` object which is initialized with `ma_context_init()`. Conceptually, the
|
||||
context sits above a device. You can have many devices to one context.
|
||||
|
||||
If you use device enumeration, you should explicitly specify the same context you used for enumeration in the call to
|
||||
`ma_device_init()` when you initialize your devices.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_context context;
|
||||
ma_device_info* pPlaybackDeviceInfos;
|
||||
ma_uint32 playbackDeviceCount;
|
||||
ma_device_info* pCaptureDeviceInfos;
|
||||
ma_uint32 captureDeviceCount;
|
||||
ma_uint32 iDevice;
|
||||
|
||||
if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) {
|
||||
printf("Failed to initialize context.\n");
|
||||
return -2;
|
||||
}
|
||||
|
||||
result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, &pCaptureDeviceInfos, &captureDeviceCount);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to retrieve device information.\n");
|
||||
return -3;
|
||||
}
|
||||
|
||||
printf("Playback Devices\n");
|
||||
for (iDevice = 0; iDevice < playbackDeviceCount; ++iDevice) {
|
||||
printf(" %u: %s\n", iDevice, pPlaybackDeviceInfos[iDevice].name);
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
|
||||
printf("Capture Devices\n");
|
||||
for (iDevice = 0; iDevice < captureDeviceCount; ++iDevice) {
|
||||
printf(" %u: %s\n", iDevice, pCaptureDeviceInfos[iDevice].name);
|
||||
}
|
||||
|
||||
|
||||
ma_context_uninit(&context);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
82
thirdparty/miniaudio/examples/simple_loopback.c
vendored
Normal file
82
thirdparty/miniaudio/examples/simple_loopback.c
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Demonstrates how to implement loopback recording.
|
||||
|
||||
This example simply captures data from your default playback device until you press Enter. The output is saved to the
|
||||
file specified on the command line.
|
||||
|
||||
Loopback mode is when you record audio that is played from a given speaker. It is only supported on WASAPI, but can be
|
||||
used indirectly with PulseAudio by choosing the appropriate loopback device after enumeration.
|
||||
|
||||
To use loopback mode you just need to set the device type to ma_device_type_loopback and set the capture device config
|
||||
properties. The output buffer in the callback will be null whereas the input buffer will be valid.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData;
|
||||
MA_ASSERT(pEncoder != NULL);
|
||||
|
||||
ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL);
|
||||
|
||||
(void)pOutput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_encoder_config encoderConfig;
|
||||
ma_encoder encoder;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
/* Loopback mode is currently only supported on WASAPI. */
|
||||
ma_backend backends[] = {
|
||||
ma_backend_wasapi
|
||||
};
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No output file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
encoderConfig = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 2, 44100);
|
||||
|
||||
if (ma_encoder_init_file(argv[1], &encoderConfig, &encoder) != MA_SUCCESS) {
|
||||
printf("Failed to initialize output file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_loopback);
|
||||
deviceConfig.capture.pDeviceID = NULL; /* Use default device for this example. Set this to the ID of a _playback_ device if you want to capture from a specific device. */
|
||||
deviceConfig.capture.format = encoder.config.format;
|
||||
deviceConfig.capture.channels = encoder.config.channels;
|
||||
deviceConfig.sampleRate = encoder.config.sampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &encoder;
|
||||
|
||||
result = ma_device_init_ex(backends, sizeof(backends)/sizeof(backends[0]), NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize loopback device.\n");
|
||||
return -2;
|
||||
}
|
||||
|
||||
result = ma_device_start(&device);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
printf("Failed to start device.\n");
|
||||
return -3;
|
||||
}
|
||||
|
||||
printf("Press Enter to stop recording...\n");
|
||||
getchar();
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_encoder_uninit(&encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
76
thirdparty/miniaudio/examples/simple_looping.c
vendored
Normal file
76
thirdparty/miniaudio/examples/simple_looping.c
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
Shows one way to handle looping of a sound.
|
||||
|
||||
This example uses a decoder as the data source. Decoders can be used with the `ma_data_source` API which, conveniently,
|
||||
supports looping via the `ma_data_source_read_pcm_frames()` API. To use it, all you need to do is pass a pointer to the
|
||||
decoder straight into `ma_data_source_read_pcm_frames()` and it will just work.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_decoder* pDecoder = (ma_decoder*)pDevice->pUserData;
|
||||
if (pDecoder == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Reading PCM frames will loop based on what we specified when called ma_data_source_set_looping(). */
|
||||
ma_data_source_read_pcm_frames(pDecoder, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder decoder;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = ma_decoder_init_file(argv[1], NULL, &decoder);
|
||||
if (result != MA_SUCCESS) {
|
||||
return -2;
|
||||
}
|
||||
|
||||
/*
|
||||
A decoder is a data source which means we just use ma_data_source_set_looping() to set the
|
||||
looping state. We will read data using ma_data_source_read_pcm_frames() in the data callback.
|
||||
*/
|
||||
ma_data_source_set_looping(&decoder, MA_TRUE);
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = decoder.outputFormat;
|
||||
deviceConfig.playback.channels = decoder.outputChannels;
|
||||
deviceConfig.sampleRate = decoder.outputSampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &decoder;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
printf("Failed to open playback device.\n");
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -3;
|
||||
}
|
||||
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
printf("Failed to start playback device.\n");
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -4;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
199
thirdparty/miniaudio/examples/simple_mixing.c
vendored
Normal file
199
thirdparty/miniaudio/examples/simple_mixing.c
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
/*
|
||||
Demonstrates one way to load multiple files and play them all back at the same time.
|
||||
|
||||
When mixing multiple sounds together, you should not create multiple devices. Instead you should create only a single
|
||||
device and then mix your sounds together which you can do by simply summing their samples together. The simplest way to
|
||||
do this is to use floating point samples and use miniaudio's built-in clipper to handling clipping for you. (Clipping
|
||||
is when sample are clampled to their minimum and maximum range, which for floating point is -1..1.)
|
||||
|
||||
```
|
||||
Usage: simple_mixing [input file 0] [input file 1] ... [input file n]
|
||||
Example: simple_mixing file1.wav file2.flac
|
||||
```
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
/*
|
||||
For simplicity, this example requires the device to use floating point samples.
|
||||
*/
|
||||
#define SAMPLE_FORMAT ma_format_f32
|
||||
#define CHANNEL_COUNT 2
|
||||
#define SAMPLE_RATE 48000
|
||||
|
||||
ma_uint32 g_decoderCount;
|
||||
ma_decoder* g_pDecoders;
|
||||
ma_bool32* g_pDecodersAtEnd;
|
||||
|
||||
ma_event g_stopEvent; /* <-- Signaled by the audio thread, waited on by the main thread. */
|
||||
|
||||
ma_bool32 are_all_decoders_at_end()
|
||||
{
|
||||
ma_uint32 iDecoder;
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
if (g_pDecodersAtEnd[iDecoder] == MA_FALSE) {
|
||||
return MA_FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
return MA_TRUE;
|
||||
}
|
||||
|
||||
ma_uint32 read_and_mix_pcm_frames_f32(ma_decoder* pDecoder, float* pOutputF32, ma_uint32 frameCount)
|
||||
{
|
||||
/*
|
||||
The way mixing works is that we just read into a temporary buffer, then take the contents of that buffer and mix it with the
|
||||
contents of the output buffer by simply adding the samples together. You could also clip the samples to -1..+1, but I'm not
|
||||
doing that in this example.
|
||||
*/
|
||||
ma_result result;
|
||||
float temp[4096];
|
||||
ma_uint32 tempCapInFrames = ma_countof(temp) / CHANNEL_COUNT;
|
||||
ma_uint32 totalFramesRead = 0;
|
||||
|
||||
while (totalFramesRead < frameCount) {
|
||||
ma_uint64 iSample;
|
||||
ma_uint64 framesReadThisIteration;
|
||||
ma_uint32 totalFramesRemaining = frameCount - totalFramesRead;
|
||||
ma_uint32 framesToReadThisIteration = tempCapInFrames;
|
||||
if (framesToReadThisIteration > totalFramesRemaining) {
|
||||
framesToReadThisIteration = totalFramesRemaining;
|
||||
}
|
||||
|
||||
result = ma_decoder_read_pcm_frames(pDecoder, temp, framesToReadThisIteration, &framesReadThisIteration);
|
||||
if (result != MA_SUCCESS || framesReadThisIteration == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Mix the frames together. */
|
||||
for (iSample = 0; iSample < framesReadThisIteration*CHANNEL_COUNT; ++iSample) {
|
||||
pOutputF32[totalFramesRead*CHANNEL_COUNT + iSample] += temp[iSample];
|
||||
}
|
||||
|
||||
totalFramesRead += (ma_uint32)framesReadThisIteration;
|
||||
|
||||
if (framesReadThisIteration < (ma_uint32)framesToReadThisIteration) {
|
||||
break; /* Reached EOF. */
|
||||
}
|
||||
}
|
||||
|
||||
return totalFramesRead;
|
||||
}
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
float* pOutputF32 = (float*)pOutput;
|
||||
ma_uint32 iDecoder;
|
||||
|
||||
MA_ASSERT(pDevice->playback.format == SAMPLE_FORMAT); /* <-- Important for this example. */
|
||||
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
if (!g_pDecodersAtEnd[iDecoder]) {
|
||||
ma_uint32 framesRead = read_and_mix_pcm_frames_f32(&g_pDecoders[iDecoder], pOutputF32, frameCount);
|
||||
if (framesRead < frameCount) {
|
||||
g_pDecodersAtEnd[iDecoder] = MA_TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
If at the end all of our decoders are at the end we need to stop. We cannot stop the device in the callback. Instead we need to
|
||||
signal an event to indicate that it's stopped. The main thread will be waiting on the event, after which it will stop the device.
|
||||
*/
|
||||
if (are_all_decoders_at_end()) {
|
||||
ma_event_signal(&g_stopEvent);
|
||||
}
|
||||
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder_config decoderConfig;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_uint32 iDecoder;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input files.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_decoderCount = argc-1;
|
||||
g_pDecoders = (ma_decoder*)malloc(sizeof(*g_pDecoders) * g_decoderCount);
|
||||
g_pDecodersAtEnd = (ma_bool32*) malloc(sizeof(*g_pDecodersAtEnd) * g_decoderCount);
|
||||
|
||||
/* In this example, all decoders need to have the same output format. */
|
||||
decoderConfig = ma_decoder_config_init(SAMPLE_FORMAT, CHANNEL_COUNT, SAMPLE_RATE);
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
result = ma_decoder_init_file(argv[1+iDecoder], &decoderConfig, &g_pDecoders[iDecoder]);
|
||||
if (result != MA_SUCCESS) {
|
||||
ma_uint32 iDecoder2;
|
||||
for (iDecoder2 = 0; iDecoder2 < iDecoder; ++iDecoder2) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder2]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
free(g_pDecodersAtEnd);
|
||||
|
||||
printf("Failed to load %s.\n", argv[1+iDecoder]);
|
||||
return -3;
|
||||
}
|
||||
g_pDecodersAtEnd[iDecoder] = MA_FALSE;
|
||||
}
|
||||
|
||||
/* Create only a single device. The decoders will be mixed together in the callback. In this example the data format needs to be the same as the decoders. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = SAMPLE_FORMAT;
|
||||
deviceConfig.playback.channels = CHANNEL_COUNT;
|
||||
deviceConfig.sampleRate = SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = NULL;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
free(g_pDecodersAtEnd);
|
||||
|
||||
printf("Failed to open playback device.\n");
|
||||
return -3;
|
||||
}
|
||||
|
||||
/*
|
||||
We can't stop in the audio thread so we instead need to use an event. We wait on this thread in the main thread, and signal it in the audio thread. This
|
||||
needs to be done before starting the device. We need a context to initialize the event, which we can get from the device. Alternatively you can initialize
|
||||
a context separately, but we don't need to do that for this example.
|
||||
*/
|
||||
ma_event_init(&g_stopEvent);
|
||||
|
||||
/* Now we start playback and wait for the audio thread to tell us to stop. */
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
ma_device_uninit(&device);
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
free(g_pDecodersAtEnd);
|
||||
|
||||
printf("Failed to start playback device.\n");
|
||||
return -4;
|
||||
}
|
||||
|
||||
printf("Waiting for playback to complete...\n");
|
||||
ma_event_wait(&g_stopEvent);
|
||||
|
||||
/* Getting here means the audio thread has signaled that the device should be stopped. */
|
||||
ma_device_uninit(&device);
|
||||
|
||||
for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) {
|
||||
ma_decoder_uninit(&g_pDecoders[iDecoder]);
|
||||
}
|
||||
free(g_pDecoders);
|
||||
free(g_pDecodersAtEnd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
75
thirdparty/miniaudio/examples/simple_playback.c
vendored
Normal file
75
thirdparty/miniaudio/examples/simple_playback.c
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
Demonstrates how to load a sound file and play it back using the low-level API.
|
||||
|
||||
The low-level API uses a callback to deliver audio between the application and miniaudio for playback or recording. When
|
||||
in playback mode, as in this example, the application sends raw audio data to miniaudio which is then played back through
|
||||
the default playback device as defined by the operating system.
|
||||
|
||||
This example uses the `ma_decoder` API to load a sound and play it back. The decoder is entirely decoupled from the
|
||||
device and can be used independently of it. This example only plays back a single sound file, but it's possible to play
|
||||
back multiple files by simple loading multiple decoders and mixing them (do not create multiple devices to do this). See
|
||||
the simple_mixing example for how best to do this.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_decoder* pDecoder = (ma_decoder*)pDevice->pUserData;
|
||||
if (pDecoder == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ma_decoder_read_pcm_frames(pDecoder, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder decoder;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = ma_decoder_init_file(argv[1], NULL, &decoder);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Could not load file: %s\n", argv[1]);
|
||||
return -2;
|
||||
}
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = decoder.outputFormat;
|
||||
deviceConfig.playback.channels = decoder.outputChannels;
|
||||
deviceConfig.sampleRate = decoder.outputSampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &decoder;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
printf("Failed to open playback device.\n");
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -3;
|
||||
}
|
||||
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
printf("Failed to start playback device.\n");
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
return -4;
|
||||
}
|
||||
|
||||
printf("Press Enter to quit...");
|
||||
getchar();
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_decoder_uninit(&decoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
91
thirdparty/miniaudio/examples/simple_playback_sine.c
vendored
Normal file
91
thirdparty/miniaudio/examples/simple_playback_sine.c
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Demonstrates playback of a sine wave.
|
||||
|
||||
Since all this example is doing is playing back a sine wave, we can disable decoding (and encoding) which will slightly
|
||||
reduce the size of the executable. This is done with the `MA_NO_DECODING` and `MA_NO_ENCODING` options.
|
||||
|
||||
The generation of sine wave is achieved via the `ma_waveform` API. A waveform is a data source which means it can be
|
||||
seamlessly plugged into the `ma_data_source_*()` family of APIs as well.
|
||||
|
||||
A waveform is initialized using the standard config/init pattern used throughout all of miniaudio. Frames are read via
|
||||
the `ma_waveform_read_pcm_frames()` API.
|
||||
|
||||
This example works with Emscripten.
|
||||
*/
|
||||
#define MA_NO_DECODING
|
||||
#define MA_NO_ENCODING
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
#include <emscripten.h>
|
||||
|
||||
void main_loop__em()
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32
|
||||
#define DEVICE_CHANNELS 2
|
||||
#define DEVICE_SAMPLE_RATE 48000
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
ma_waveform* pSineWave;
|
||||
|
||||
MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS);
|
||||
|
||||
pSineWave = (ma_waveform*)pDevice->pUserData;
|
||||
MA_ASSERT(pSineWave != NULL);
|
||||
|
||||
ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL);
|
||||
|
||||
(void)pInput; /* Unused. */
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_waveform sineWave;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_waveform_config sineWaveConfig;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.sampleRate = DEVICE_SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
deviceConfig.pUserData = &sineWave;
|
||||
|
||||
if (ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS) {
|
||||
printf("Failed to open playback device.\n");
|
||||
return -4;
|
||||
}
|
||||
|
||||
printf("Device Name: %s\n", device.playback.name);
|
||||
|
||||
sineWaveConfig = ma_waveform_config_init(device.playback.format, device.playback.channels, device.sampleRate, ma_waveform_type_sine, 0.2, 220);
|
||||
ma_waveform_init(&sineWaveConfig, &sineWave);
|
||||
|
||||
if (ma_device_start(&device) != MA_SUCCESS) {
|
||||
printf("Failed to start playback device.\n");
|
||||
ma_device_uninit(&device);
|
||||
return -5;
|
||||
}
|
||||
|
||||
#ifdef __EMSCRIPTEN__
|
||||
emscripten_set_main_loop(main_loop__em, 0, 1);
|
||||
#else
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
#endif
|
||||
|
||||
ma_device_uninit(&device);
|
||||
ma_waveform_uninit(&sineWave); /* Uninitialize the waveform after the device so we don't pull it from under the device while it's being reference in the data callback. */
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user