first commit
This commit is contained in:
77
thirdparty/miniaudio/extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.c
vendored
Normal file
77
thirdparty/miniaudio/extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.c
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
|
||||
#include "ma_channel_combiner_node.h"
|
||||
|
||||
MA_API ma_channel_combiner_node_config ma_channel_combiner_node_config_init(ma_uint32 channels)
|
||||
{
|
||||
ma_channel_combiner_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init(); /* Input and output channels will be set in ma_channel_combiner_node_init(). */
|
||||
config.channels = channels;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_channel_combiner_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_channel_combiner_node* pCombinerNode = (ma_channel_combiner_node*)pNode;
|
||||
|
||||
(void)pFrameCountIn;
|
||||
|
||||
ma_interleave_pcm_frames(ma_format_f32, ma_node_get_output_channels(pCombinerNode, 0), *pFrameCountOut, (const void**)ppFramesIn, (void*)ppFramesOut[0]);
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_channel_combiner_node_vtable =
|
||||
{
|
||||
ma_channel_combiner_node_process_pcm_frames,
|
||||
NULL,
|
||||
MA_NODE_BUS_COUNT_UNKNOWN, /* Input bus count is determined by the channel count and is unknown until the node instance is initialized. */
|
||||
1, /* 1 output bus. */
|
||||
0 /* Default flags. */
|
||||
};
|
||||
|
||||
MA_API ma_result ma_channel_combiner_node_init(ma_node_graph* pNodeGraph, const ma_channel_combiner_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_combiner_node* pCombinerNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
ma_uint32 inputChannels[MA_MAX_NODE_BUS_COUNT];
|
||||
ma_uint32 outputChannels[1];
|
||||
ma_uint32 iChannel;
|
||||
|
||||
if (pCombinerNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pCombinerNode);
|
||||
|
||||
if (pConfig == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
/* All input channels are mono. */
|
||||
for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) {
|
||||
inputChannels[iChannel] = 1;
|
||||
}
|
||||
|
||||
outputChannels[0] = pConfig->channels;
|
||||
|
||||
baseConfig = pConfig->nodeConfig;
|
||||
baseConfig.vtable = &g_ma_channel_combiner_node_vtable;
|
||||
baseConfig.inputBusCount = pConfig->channels; /* The vtable has an unknown channel count, so must specify it here. */
|
||||
baseConfig.pInputChannels = inputChannels;
|
||||
baseConfig.pOutputChannels = outputChannels;
|
||||
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pCombinerNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_channel_combiner_node_uninit(ma_channel_combiner_node* pCombinerNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
/* The base node is always uninitialized first. */
|
||||
ma_node_uninit(pCombinerNode, pAllocationCallbacks);
|
||||
}
|
||||
30
thirdparty/miniaudio/extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.h
vendored
Normal file
30
thirdparty/miniaudio/extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.h
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
/* Include ma_channel_combiner_node.h after miniaudio.h */
|
||||
#ifndef ma_channel_combiner_node_h
|
||||
#define ma_channel_combiner_node_h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channels;
|
||||
} ma_channel_combiner_node_config;
|
||||
|
||||
MA_API ma_channel_combiner_node_config ma_channel_combiner_node_config_init(ma_uint32 channels);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
} ma_channel_combiner_node;
|
||||
|
||||
MA_API ma_result ma_channel_combiner_node_init(ma_node_graph* pNodeGraph, const ma_channel_combiner_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_combiner_node* pSeparatorNode);
|
||||
MA_API void ma_channel_combiner_node_uninit(ma_channel_combiner_node* pSeparatorNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* ma_reverb_node_h */
|
||||
@@ -0,0 +1,2 @@
|
||||
/* The channel separtor example also demonstrates how to use the combiner. */
|
||||
#include "../ma_channel_separator_node/ma_channel_separator_node_example.c"
|
||||
81
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node.c
vendored
Normal file
81
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node.c
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
|
||||
#include "ma_channel_separator_node.h"
|
||||
|
||||
MA_API ma_channel_separator_node_config ma_channel_separator_node_config_init(ma_uint32 channels)
|
||||
{
|
||||
ma_channel_separator_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init(); /* Input and output channels will be set in ma_channel_separator_node_init(). */
|
||||
config.channels = channels;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_channel_separator_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_channel_separator_node* pSplitterNode = (ma_channel_separator_node*)pNode;
|
||||
|
||||
(void)pFrameCountIn;
|
||||
|
||||
ma_deinterleave_pcm_frames(ma_format_f32, ma_node_get_input_channels(pSplitterNode, 0), *pFrameCountOut, (const void*)ppFramesIn[0], (void**)ppFramesOut);
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_channel_separator_node_vtable =
|
||||
{
|
||||
ma_channel_separator_node_process_pcm_frames,
|
||||
NULL,
|
||||
1, /* 1 input bus. */
|
||||
MA_NODE_BUS_COUNT_UNKNOWN, /* Output bus count is determined by the channel count and is unknown until the node instance is initialized. */
|
||||
0 /* Default flags. */
|
||||
};
|
||||
|
||||
MA_API ma_result ma_channel_separator_node_init(ma_node_graph* pNodeGraph, const ma_channel_separator_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_separator_node* pSeparatorNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
ma_uint32 inputChannels[1];
|
||||
ma_uint32 outputChannels[MA_MAX_NODE_BUS_COUNT];
|
||||
ma_uint32 iChannel;
|
||||
|
||||
if (pSeparatorNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pSeparatorNode);
|
||||
|
||||
if (pConfig == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
if (pConfig->channels > MA_MAX_NODE_BUS_COUNT) {
|
||||
return MA_INVALID_ARGS; /* Channel count cannot exceed the maximum number of buses. */
|
||||
}
|
||||
|
||||
inputChannels[0] = pConfig->channels;
|
||||
|
||||
/* All output channels are mono. */
|
||||
for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) {
|
||||
outputChannels[iChannel] = 1;
|
||||
}
|
||||
|
||||
baseConfig = pConfig->nodeConfig;
|
||||
baseConfig.vtable = &g_ma_channel_separator_node_vtable;
|
||||
baseConfig.outputBusCount = pConfig->channels; /* The vtable has an unknown channel count, so must specify it here. */
|
||||
baseConfig.pInputChannels = inputChannels;
|
||||
baseConfig.pOutputChannels = outputChannels;
|
||||
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pSeparatorNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_channel_separator_node_uninit(ma_channel_separator_node* pSeparatorNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
/* The base node is always uninitialized first. */
|
||||
ma_node_uninit(pSeparatorNode, pAllocationCallbacks);
|
||||
}
|
||||
29
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node.h
vendored
Normal file
29
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node.h
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
/* Include ma_channel_separator_node.h after miniaudio.h */
|
||||
#ifndef ma_channel_separator_node_h
|
||||
#define ma_channel_separator_node_h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channels;
|
||||
} ma_channel_separator_node_config;
|
||||
|
||||
MA_API ma_channel_separator_node_config ma_channel_separator_node_config_init(ma_uint32 channels);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
} ma_channel_separator_node;
|
||||
|
||||
MA_API ma_result ma_channel_separator_node_init(ma_node_graph* pNodeGraph, const ma_channel_separator_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_separator_node* pSeparatorNode);
|
||||
MA_API void ma_channel_separator_node_uninit(ma_channel_separator_node* pSeparatorNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* ma_reverb_node_h */
|
||||
149
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node_example.c
vendored
Normal file
149
thirdparty/miniaudio/extras/nodes/ma_channel_separator_node/ma_channel_separator_node_example.c
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../../../miniaudio.h"
|
||||
#include "ma_channel_separator_node.c"
|
||||
#include "../ma_channel_combiner_node/ma_channel_combiner_node.c"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 0 /* The input file will determine the channel count. */
|
||||
#define DEVICE_SAMPLE_RATE 48000
|
||||
|
||||
/*
|
||||
In this example we're just separating out the channels with a `ma_channel_separator_node`, and then
|
||||
combining them back together with a `ma_channel_combiner_node` before playing them back.
|
||||
*/
|
||||
static ma_decoder g_decoder; /* The decoder that we'll read data from. */
|
||||
static ma_data_source_node g_dataSupplyNode; /* The node that will sit at the root level. Will be reading data from g_dataSupply. */
|
||||
static ma_channel_separator_node g_separatorNode; /* The separator node. */
|
||||
static ma_channel_combiner_node g_combinerNode; /* The combiner node. */
|
||||
static ma_node_graph g_nodeGraph; /* The main node graph that we'll be feeding data through. */
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
(void)pInput;
|
||||
(void)pDevice;
|
||||
|
||||
/* All we need to do is read from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder_config decoderConfig;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_channel_separator_node_config separatorNodeConfig;
|
||||
ma_channel_combiner_node_config combinerNodeConfig;
|
||||
ma_data_source_node_config dataSupplyNodeConfig;
|
||||
ma_uint32 iChannel;
|
||||
|
||||
if (argc < 1) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Decoder. */
|
||||
decoderConfig = ma_decoder_config_init(DEVICE_FORMAT, 0, DEVICE_SAMPLE_RATE);
|
||||
|
||||
result = ma_decoder_init_file(argv[1], &decoderConfig, &g_decoder);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to load decoder.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Device. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = g_decoder.outputFormat;
|
||||
deviceConfig.playback.channels = g_decoder.outputChannels;
|
||||
deviceConfig.sampleRate = g_decoder.outputSampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.playback.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Combiner. Attached straight to the endpoint. Input will be the separator node. */
|
||||
combinerNodeConfig = ma_channel_combiner_node_config_init(device.playback.channels);
|
||||
|
||||
result = ma_channel_combiner_node_init(&g_nodeGraph, &combinerNodeConfig, NULL, &g_combinerNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize channel combiner node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_combinerNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
|
||||
/*
|
||||
Separator. Attached to the combiner. We need to attach each of the outputs of the
|
||||
separator to each of the inputs of the combiner.
|
||||
*/
|
||||
separatorNodeConfig = ma_channel_separator_node_config_init(device.playback.channels);
|
||||
|
||||
result = ma_channel_separator_node_init(&g_nodeGraph, &separatorNodeConfig, NULL, &g_separatorNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize channel separator node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
/* The separator and combiner must have the same number of output and input buses respectively. */
|
||||
MA_ASSERT(ma_node_get_output_bus_count(&g_separatorNode) == ma_node_get_input_bus_count(&g_combinerNode));
|
||||
|
||||
/* Each of the separator's outputs need to be attached to the corresponding input of the combiner. */
|
||||
for (iChannel = 0; iChannel < ma_node_get_output_bus_count(&g_separatorNode); iChannel += 1) {
|
||||
ma_node_attach_output_bus(&g_separatorNode, iChannel, &g_combinerNode, iChannel);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Data supply. Attached to input bus 0 of the reverb node. */
|
||||
dataSupplyNodeConfig = ma_data_source_node_config_init(&g_decoder);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &dataSupplyNodeConfig, NULL, &g_dataSupplyNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize source node.");
|
||||
goto done3;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_dataSupplyNode, 0, &g_separatorNode, 0);
|
||||
|
||||
|
||||
|
||||
/* Now we just start the device and wait for the user to terminate the program. */
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
|
||||
/*done4:*/ ma_data_source_node_uninit(&g_dataSupplyNode, NULL);
|
||||
done3: ma_channel_separator_node_uninit(&g_separatorNode, NULL);
|
||||
done2: ma_channel_combiner_node_uninit(&g_combinerNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
116
thirdparty/miniaudio/extras/nodes/ma_delay_node/ma_delay_node_example.c
vendored
Normal file
116
thirdparty/miniaudio/extras/nodes/ma_delay_node/ma_delay_node_example.c
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../../../miniaudio.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 2
|
||||
#define DEVICE_SAMPLE_RATE 48000
|
||||
|
||||
static ma_audio_buffer_ref g_dataSupply; /* The underlying data source of the source node. */
|
||||
static ma_data_source_node g_dataSupplyNode; /* The node that will sit at the root level. Will be reading data from g_dataSupply. */
|
||||
static ma_delay_node g_delayNode; /* The delay node. */
|
||||
static ma_node_graph g_nodeGraph; /* The main node graph that we'll be feeding data through. */
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32);
|
||||
MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels);
|
||||
|
||||
/*
|
||||
The node graph system is a pulling style of API. At the lowest level of the chain will be a
|
||||
node acting as a data source for the purpose of delivering the initial audio data. In our case,
|
||||
the data source is our `pInput` buffer. We need to update the underlying data source so that it
|
||||
read data from `pInput`.
|
||||
*/
|
||||
ma_audio_buffer_ref_set_data(&g_dataSupply, pInput, frameCount);
|
||||
|
||||
/* With the source buffer configured we can now read directly from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_delay_node_config delayNodeConfig;
|
||||
ma_data_source_node_config dataSupplyNodeConfig;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_duplex);
|
||||
deviceConfig.capture.pDeviceID = NULL;
|
||||
deviceConfig.capture.format = DEVICE_FORMAT;
|
||||
deviceConfig.capture.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.capture.shareMode = ma_share_mode_shared;
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.capture.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Delay. Attached straight to the endpoint. */
|
||||
delayNodeConfig = ma_delay_node_config_init(device.capture.channels, device.sampleRate, (100 * device.sampleRate) / 1000, 0.5f);
|
||||
|
||||
result = ma_delay_node_init(&g_nodeGraph, &delayNodeConfig, NULL, &g_delayNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize delay node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_delayNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
|
||||
/* Data supply. Attached to input bus 0 of the delay node. */
|
||||
result = ma_audio_buffer_ref_init(device.capture.format, device.capture.channels, NULL, 0, &g_dataSupply);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio buffer for source.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
dataSupplyNodeConfig = ma_data_source_node_config_init(&g_dataSupply);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &dataSupplyNodeConfig, NULL, &g_dataSupplyNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize source node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_dataSupplyNode, 0, &g_delayNode, 0);
|
||||
|
||||
|
||||
|
||||
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
/*done3:*/ ma_data_source_node_uninit(&g_dataSupplyNode, NULL);
|
||||
done2: ma_delay_node_uninit(&g_delayNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
102
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node.c
vendored
Normal file
102
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node.c
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
|
||||
#include "ma_ltrim_node.h"
|
||||
|
||||
MA_API ma_ltrim_node_config ma_ltrim_node_config_init(ma_uint32 channels, float threshold)
|
||||
{
|
||||
ma_ltrim_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init(); /* Input and output channels will be set in ma_ltrim_node_init(). */
|
||||
config.channels = channels;
|
||||
config.threshold = threshold;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_ltrim_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_ltrim_node* pTrimNode = (ma_ltrim_node*)pNode;
|
||||
ma_uint32 framesProcessedIn = 0;
|
||||
ma_uint32 framesProcessedOut = 0;
|
||||
ma_uint32 channelCount = ma_node_get_input_channels(pNode, 0);
|
||||
|
||||
/*
|
||||
If we haven't yet found the start, skip over every input sample until we find a frame outside
|
||||
of the threshold.
|
||||
*/
|
||||
if (pTrimNode->foundStart == MA_FALSE) {
|
||||
while (framesProcessedIn < *pFrameCountIn) {
|
||||
ma_uint32 iChannel = 0;
|
||||
for (iChannel = 0; iChannel < channelCount; iChannel += 1) {
|
||||
float sample = ppFramesIn[0][framesProcessedIn*channelCount + iChannel];
|
||||
if (sample < -pTrimNode->threshold || sample > pTrimNode->threshold) {
|
||||
pTrimNode->foundStart = MA_TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTrimNode->foundStart) {
|
||||
break; /* The start has been found. Get out of this loop and finish off processing. */
|
||||
} else {
|
||||
framesProcessedIn += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If there's anything left, just copy it over. */
|
||||
framesProcessedOut = ma_min(*pFrameCountOut, *pFrameCountIn - framesProcessedIn);
|
||||
ma_copy_pcm_frames(ppFramesOut[0], &ppFramesIn[0][framesProcessedIn], framesProcessedOut, ma_format_f32, channelCount);
|
||||
|
||||
framesProcessedIn += framesProcessedOut;
|
||||
|
||||
/* We always "process" every input frame, but we may only done a partial output. */
|
||||
*pFrameCountIn = framesProcessedIn;
|
||||
*pFrameCountOut = framesProcessedOut;
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_ltrim_node_vtable =
|
||||
{
|
||||
ma_ltrim_node_process_pcm_frames,
|
||||
NULL,
|
||||
1, /* 1 input channel. */
|
||||
1, /* 1 output channel. */
|
||||
MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES
|
||||
};
|
||||
|
||||
MA_API ma_result ma_ltrim_node_init(ma_node_graph* pNodeGraph, const ma_ltrim_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_ltrim_node* pTrimNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
|
||||
if (pTrimNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pTrimNode);
|
||||
|
||||
if (pConfig == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
pTrimNode->threshold = pConfig->threshold;
|
||||
pTrimNode->foundStart = MA_FALSE;
|
||||
|
||||
baseConfig = pConfig->nodeConfig;
|
||||
baseConfig.vtable = &g_ma_ltrim_node_vtable;
|
||||
baseConfig.pInputChannels = &pConfig->channels;
|
||||
baseConfig.pOutputChannels = &pConfig->channels;
|
||||
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pTrimNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_ltrim_node_uninit(ma_ltrim_node* pTrimNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
/* The base node is always uninitialized first. */
|
||||
ma_node_uninit(pTrimNode, pAllocationCallbacks);
|
||||
}
|
||||
35
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node.h
vendored
Normal file
35
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node.h
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
/* Include ma_ltrim_node.h after miniaudio.h */
|
||||
#ifndef ma_ltrim_node_h
|
||||
#define ma_ltrim_node_h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
The trim node has one input and one output.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channels;
|
||||
float threshold;
|
||||
} ma_ltrim_node_config;
|
||||
|
||||
MA_API ma_ltrim_node_config ma_ltrim_node_config_init(ma_uint32 channels, float threshold);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
float threshold;
|
||||
ma_bool32 foundStart;
|
||||
} ma_ltrim_node;
|
||||
|
||||
MA_API ma_result ma_ltrim_node_init(ma_node_graph* pNodeGraph, const ma_ltrim_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_ltrim_node* pTrimNode);
|
||||
MA_API void ma_ltrim_node_uninit(ma_ltrim_node* pTrimNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* ma_ltrim_node_h */
|
||||
115
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node_example.c
vendored
Normal file
115
thirdparty/miniaudio/extras/nodes/ma_ltrim_node/ma_ltrim_node_example.c
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../../../miniaudio.h"
|
||||
#include "ma_ltrim_node.c"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 0 /* The input file will determine the channel count. */
|
||||
#define DEVICE_SAMPLE_RATE 0 /* The input file will determine the sample rate. */
|
||||
|
||||
static ma_decoder g_decoder; /* The decoder that we'll read data from. */
|
||||
static ma_data_source_node g_dataSupplyNode; /* The node that will sit at the root level. Will be reading data from g_dataSupply. */
|
||||
static ma_ltrim_node g_trimNode; /* The trim node. */
|
||||
static ma_node_graph g_nodeGraph; /* The main node graph that we'll be feeding data through. */
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
(void)pInput;
|
||||
(void)pDevice;
|
||||
|
||||
/* All we need to do is read from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_decoder_config decoderConfig;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_ltrim_node_config trimNodeConfig;
|
||||
ma_data_source_node_config dataSupplyNodeConfig;
|
||||
|
||||
if (argc < 1) {
|
||||
printf("No input file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Decoder. */
|
||||
decoderConfig = ma_decoder_config_init(DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE);
|
||||
|
||||
result = ma_decoder_init_file(argv[1], &decoderConfig, &g_decoder);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to load decoder.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Device. */
|
||||
deviceConfig = ma_device_config_init(ma_device_type_playback);
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = g_decoder.outputFormat;
|
||||
deviceConfig.playback.channels = g_decoder.outputChannels;
|
||||
deviceConfig.sampleRate = g_decoder.outputSampleRate;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.playback.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Trimmer. Attached straight to the endpoint. Input will be the data source node. */
|
||||
trimNodeConfig = ma_ltrim_node_config_init(device.playback.channels, 0);
|
||||
|
||||
result = ma_ltrim_node_init(&g_nodeGraph, &trimNodeConfig, NULL, &g_trimNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize ltrim node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_trimNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
|
||||
/* Data supply. */
|
||||
dataSupplyNodeConfig = ma_data_source_node_config_init(&g_decoder);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &dataSupplyNodeConfig, NULL, &g_dataSupplyNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize data source node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_dataSupplyNode, 0, &g_trimNode, 0);
|
||||
|
||||
|
||||
|
||||
/* Now we just start the device and wait for the user to terminate the program. */
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
|
||||
/*done3:*/ ma_data_source_node_uninit(&g_dataSupplyNode, NULL);
|
||||
done2: ma_ltrim_node_uninit(&g_trimNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
return 0;
|
||||
}
|
||||
78
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node.c
vendored
Normal file
78
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node.c
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
|
||||
#define VERBLIB_IMPLEMENTATION
|
||||
#include "ma_reverb_node.h"
|
||||
|
||||
MA_API ma_reverb_node_config ma_reverb_node_config_init(ma_uint32 channels, ma_uint32 sampleRate)
|
||||
{
|
||||
ma_reverb_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init(); /* Input and output channels will be set in ma_reverb_node_init(). */
|
||||
config.channels = channels;
|
||||
config.sampleRate = sampleRate;
|
||||
config.roomSize = verblib_initialroom;
|
||||
config.damping = verblib_initialdamp;
|
||||
config.width = verblib_initialwidth;
|
||||
config.wetVolume = verblib_initialwet;
|
||||
config.dryVolume = verblib_initialdry;
|
||||
config.mode = verblib_initialmode;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_reverb_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_reverb_node* pReverbNode = (ma_reverb_node*)pNode;
|
||||
|
||||
(void)pFrameCountIn;
|
||||
|
||||
verblib_process(&pReverbNode->reverb, ppFramesIn[0], ppFramesOut[0], *pFrameCountOut);
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_reverb_node_vtable =
|
||||
{
|
||||
ma_reverb_node_process_pcm_frames,
|
||||
NULL,
|
||||
1, /* 1 input channel. */
|
||||
1, /* 1 output channel. */
|
||||
MA_NODE_FLAG_CONTINUOUS_PROCESSING /* Reverb requires continuous processing to ensure the tail get's processed. */
|
||||
};
|
||||
|
||||
MA_API ma_result ma_reverb_node_init(ma_node_graph* pNodeGraph, const ma_reverb_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_reverb_node* pReverbNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
|
||||
if (pReverbNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pReverbNode);
|
||||
|
||||
if (pConfig == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
if (verblib_initialize(&pReverbNode->reverb, (unsigned long)pConfig->sampleRate, (unsigned int)pConfig->channels) == 0) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
baseConfig = pConfig->nodeConfig;
|
||||
baseConfig.vtable = &g_ma_reverb_node_vtable;
|
||||
baseConfig.pInputChannels = &pConfig->channels;
|
||||
baseConfig.pOutputChannels = &pConfig->channels;
|
||||
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pReverbNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_reverb_node_uninit(ma_reverb_node* pReverbNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
/* The base node is always uninitialized first. */
|
||||
ma_node_uninit(pReverbNode, pAllocationCallbacks);
|
||||
}
|
||||
42
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node.h
vendored
Normal file
42
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node.h
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
/* Include ma_reverb_node.h after miniaudio.h */
|
||||
#ifndef ma_reverb_node_h
|
||||
#define ma_reverb_node_h
|
||||
|
||||
#include "verblib.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
The reverb node has one input and one output.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channels; /* The number of channels of the source, which will be the same as the output. Must be 1 or 2. */
|
||||
ma_uint32 sampleRate;
|
||||
float roomSize;
|
||||
float damping;
|
||||
float width;
|
||||
float wetVolume;
|
||||
float dryVolume;
|
||||
float mode;
|
||||
} ma_reverb_node_config;
|
||||
|
||||
MA_API ma_reverb_node_config ma_reverb_node_config_init(ma_uint32 channels, ma_uint32 sampleRate);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
verblib reverb;
|
||||
} ma_reverb_node;
|
||||
|
||||
MA_API ma_result ma_reverb_node_init(ma_node_graph* pNodeGraph, const ma_reverb_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_reverb_node* pReverbNode);
|
||||
MA_API void ma_reverb_node_uninit(ma_reverb_node* pReverbNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* ma_reverb_node_h */
|
||||
118
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node_example.c
vendored
Normal file
118
thirdparty/miniaudio/extras/nodes/ma_reverb_node/ma_reverb_node_example.c
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../../../miniaudio.h"
|
||||
#include "ma_reverb_node.c"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 1 /* For this example, always set to 1. */
|
||||
#define DEVICE_SAMPLE_RATE 48000 /* Cannot be less than 22050 for this example. */
|
||||
|
||||
static ma_audio_buffer_ref g_dataSupply; /* The underlying data source of the source node. */
|
||||
static ma_data_source_node g_dataSupplyNode; /* The node that will sit at the root level. Will be reading data from g_dataSupply. */
|
||||
static ma_reverb_node g_reverbNode; /* The reverb node. */
|
||||
static ma_node_graph g_nodeGraph; /* The main node graph that we'll be feeding data through. */
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32);
|
||||
MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels);
|
||||
|
||||
/*
|
||||
The node graph system is a pulling style of API. At the lowest level of the chain will be a
|
||||
node acting as a data source for the purpose of delivering the initial audio data. In our case,
|
||||
the data source is our `pInput` buffer. We need to update the underlying data source so that it
|
||||
read data from `pInput`.
|
||||
*/
|
||||
ma_audio_buffer_ref_set_data(&g_dataSupply, pInput, frameCount);
|
||||
|
||||
/* With the source buffer configured we can now read directly from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_reverb_node_config reverbNodeConfig;
|
||||
ma_data_source_node_config dataSupplyNodeConfig;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_duplex);
|
||||
deviceConfig.capture.pDeviceID = NULL;
|
||||
deviceConfig.capture.format = DEVICE_FORMAT;
|
||||
deviceConfig.capture.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.capture.shareMode = ma_share_mode_shared;
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.sampleRate = DEVICE_SAMPLE_RATE;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.capture.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Reverb. Attached straight to the endpoint. */
|
||||
reverbNodeConfig = ma_reverb_node_config_init(device.capture.channels, device.sampleRate);
|
||||
|
||||
result = ma_reverb_node_init(&g_nodeGraph, &reverbNodeConfig, NULL, &g_reverbNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize reverb node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_reverbNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
|
||||
/* Data supply. Attached to input bus 0 of the reverb node. */
|
||||
result = ma_audio_buffer_ref_init(device.capture.format, device.capture.channels, NULL, 0, &g_dataSupply);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio buffer for source.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
dataSupplyNodeConfig = ma_data_source_node_config_init(&g_dataSupply);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &dataSupplyNodeConfig, NULL, &g_dataSupplyNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize source node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_dataSupplyNode, 0, &g_reverbNode, 0);
|
||||
|
||||
|
||||
|
||||
/* Now we just start the device and wait for the user to terminate the program. */
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
|
||||
/*done3:*/ ma_data_source_node_uninit(&g_dataSupplyNode, NULL);
|
||||
done2: ma_reverb_node_uninit(&g_reverbNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
743
thirdparty/miniaudio/extras/nodes/ma_reverb_node/verblib.h
vendored
Normal file
743
thirdparty/miniaudio/extras/nodes/ma_reverb_node/verblib.h
vendored
Normal file
@@ -0,0 +1,743 @@
|
||||
/* Reverb Library
|
||||
* Verblib version 0.5 - 2022-10-25
|
||||
*
|
||||
* Philip Bennefall - philip@blastbay.com
|
||||
*
|
||||
* See the end of this file for licensing terms.
|
||||
* This reverb is based on Freeverb, a public domain reverb written by Jezar at Dreampoint.
|
||||
*
|
||||
* IMPORTANT: The reverb currently only works with 1 or 2 channels, at sample rates of 22050 HZ and above.
|
||||
* These restrictions may be lifted in a future version.
|
||||
*
|
||||
* USAGE
|
||||
*
|
||||
* This is a single-file library. To use it, do something like the following in one .c file.
|
||||
* #define VERBLIB_IMPLEMENTATION
|
||||
* #include "verblib.h"
|
||||
*
|
||||
* You can then #include this file in other parts of the program as you would with any other header file.
|
||||
*/
|
||||
|
||||
#ifndef VERBLIB_H
|
||||
#define VERBLIB_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* COMPILE-TIME OPTIONS */
|
||||
|
||||
/* The maximum sample rate that should be supported, specified as a multiple of 44100. */
|
||||
#ifndef verblib_max_sample_rate_multiplier
|
||||
#define verblib_max_sample_rate_multiplier 4
|
||||
#endif
|
||||
|
||||
/* The silence threshold which is used when calculating decay time. */
|
||||
#ifndef verblib_silence_threshold
|
||||
#define verblib_silence_threshold 80.0 /* In dB (absolute). */
|
||||
#endif
|
||||
|
||||
/* PUBLIC API */
|
||||
|
||||
typedef struct verblib verblib;
|
||||
|
||||
/* Initialize a verblib structure.
|
||||
*
|
||||
* Call this function to initialize the verblib structure.
|
||||
* Returns nonzero (true) on success or 0 (false) on failure.
|
||||
* The function will only fail if one or more of the parameters are invalid.
|
||||
*/
|
||||
int verblib_initialize ( verblib* verb, unsigned long sample_rate, unsigned int channels );
|
||||
|
||||
/* Run the reverb.
|
||||
*
|
||||
* Call this function continuously to generate your output.
|
||||
* output_buffer may be the same pointer as input_buffer if in place processing is desired.
|
||||
* frames specifies the number of sample frames that should be processed.
|
||||
*/
|
||||
void verblib_process ( verblib* verb, const float* input_buffer, float* output_buffer, unsigned long frames );
|
||||
|
||||
/* Set the size of the room, between 0.0 and 1.0. */
|
||||
void verblib_set_room_size ( verblib* verb, float value );
|
||||
|
||||
/* Get the size of the room. */
|
||||
float verblib_get_room_size ( const verblib* verb );
|
||||
|
||||
/* Set the amount of damping, between 0.0 and 1.0. */
|
||||
void verblib_set_damping ( verblib* verb, float value );
|
||||
|
||||
/* Get the amount of damping. */
|
||||
float verblib_get_damping ( const verblib* verb );
|
||||
|
||||
/* Set the stereo width of the reverb, between 0.0 and 1.0. */
|
||||
void verblib_set_width ( verblib* verb, float value );
|
||||
|
||||
/* Get the stereo width of the reverb. */
|
||||
float verblib_get_width ( const verblib* verb );
|
||||
|
||||
/* Set the volume of the wet signal, between 0.0 and 1.0. */
|
||||
void verblib_set_wet ( verblib* verb, float value );
|
||||
|
||||
/* Get the volume of the wet signal. */
|
||||
float verblib_get_wet ( const verblib* verb );
|
||||
|
||||
/* Set the volume of the dry signal, between 0.0 and 1.0. */
|
||||
void verblib_set_dry ( verblib* verb, float value );
|
||||
|
||||
/* Get the volume of the dry signal. */
|
||||
float verblib_get_dry ( const verblib* verb );
|
||||
|
||||
/* Set the stereo width of the input signal sent to the reverb, 0.0 or greater.
|
||||
* Values less than 1.0 narrow the signal, 1.0 sends the input signal unmodified, values greater than 1.0 widen the signal.
|
||||
*/
|
||||
void verblib_set_input_width ( verblib* verb, float value );
|
||||
|
||||
/* Get the stereo width of the input signal sent to the reverb. */
|
||||
float verblib_get_input_width ( const verblib* verb );
|
||||
|
||||
/* Set the mode of the reverb, where values below 0.5 mean normal and values above mean frozen. */
|
||||
void verblib_set_mode ( verblib* verb, float value );
|
||||
|
||||
/* Get the mode of the reverb. */
|
||||
float verblib_get_mode ( const verblib* verb );
|
||||
|
||||
/* Get the decay time in sample frames based on the current room size setting. */
|
||||
/* If freeze mode is active, the decay time is infinite and this function returns 0. */
|
||||
unsigned long verblib_get_decay_time_in_frames ( const verblib* verb );
|
||||
|
||||
/* INTERNAL STRUCTURES */
|
||||
|
||||
/* Allpass filter */
|
||||
typedef struct verblib_allpass verblib_allpass;
|
||||
struct verblib_allpass
|
||||
{
|
||||
float* buffer;
|
||||
float feedback;
|
||||
int bufsize;
|
||||
int bufidx;
|
||||
};
|
||||
|
||||
/* Comb filter */
|
||||
typedef struct verblib_comb verblib_comb;
|
||||
struct verblib_comb
|
||||
{
|
||||
float* buffer;
|
||||
float feedback;
|
||||
float filterstore;
|
||||
float damp1;
|
||||
float damp2;
|
||||
int bufsize;
|
||||
int bufidx;
|
||||
};
|
||||
|
||||
/* Reverb model tuning values */
|
||||
#define verblib_numcombs 8
|
||||
#define verblib_numallpasses 4
|
||||
#define verblib_muted 0.0f
|
||||
#define verblib_fixedgain 0.015f
|
||||
#define verblib_scalewet 3.0f
|
||||
#define verblib_scaledry 2.0f
|
||||
#define verblib_scaledamp 0.8f
|
||||
#define verblib_scaleroom 0.28f
|
||||
#define verblib_offsetroom 0.7f
|
||||
#define verblib_initialroom 0.5f
|
||||
#define verblib_initialdamp 0.25f
|
||||
#define verblib_initialwet 1.0f/verblib_scalewet
|
||||
#define verblib_initialdry 0.0f
|
||||
#define verblib_initialwidth 1.0f
|
||||
#define verblib_initialinputwidth 0.0f
|
||||
#define verblib_initialmode 0.0f
|
||||
#define verblib_freezemode 0.5f
|
||||
#define verblib_stereospread 23
|
||||
|
||||
/*
|
||||
* These values assume 44.1KHz sample rate, but will be verblib_scaled appropriately.
|
||||
* The values were obtained by listening tests.
|
||||
*/
|
||||
#define verblib_combtuningL1 1116
|
||||
#define verblib_combtuningR1 (1116+verblib_stereospread)
|
||||
#define verblib_combtuningL2 1188
|
||||
#define verblib_combtuningR2 (1188+verblib_stereospread)
|
||||
#define verblib_combtuningL3 1277
|
||||
#define verblib_combtuningR3 (1277+verblib_stereospread)
|
||||
#define verblib_combtuningL4 1356
|
||||
#define verblib_combtuningR4 (1356+verblib_stereospread)
|
||||
#define verblib_combtuningL5 1422
|
||||
#define verblib_combtuningR5 (1422+verblib_stereospread)
|
||||
#define verblib_combtuningL6 1491
|
||||
#define verblib_combtuningR6 (1491+verblib_stereospread)
|
||||
#define verblib_combtuningL7 1557
|
||||
#define verblib_combtuningR7 (1557+verblib_stereospread)
|
||||
#define verblib_combtuningL8 1617
|
||||
#define verblib_combtuningR8 (1617+verblib_stereospread)
|
||||
#define verblib_allpasstuningL1 556
|
||||
#define verblib_allpasstuningR1 (556+verblib_stereospread)
|
||||
#define verblib_allpasstuningL2 441
|
||||
#define verblib_allpasstuningR2 (441+verblib_stereospread)
|
||||
#define verblib_allpasstuningL3 341
|
||||
#define verblib_allpasstuningR3 (341+verblib_stereospread)
|
||||
#define verblib_allpasstuningL4 225
|
||||
#define verblib_allpasstuningR4 (225+verblib_stereospread)
|
||||
|
||||
/* The main reverb structure. This is the structure that you will create an instance of when using the reverb. */
|
||||
struct verblib
|
||||
{
|
||||
unsigned int channels;
|
||||
float gain;
|
||||
float roomsize, roomsize1;
|
||||
float damp, damp1;
|
||||
float wet, wet1, wet2;
|
||||
float dry;
|
||||
float width;
|
||||
float input_width;
|
||||
float mode;
|
||||
|
||||
/*
|
||||
* The following are all declared inline
|
||||
* to remove the need for dynamic allocation.
|
||||
*/
|
||||
|
||||
/* Comb filters */
|
||||
verblib_comb combL[verblib_numcombs];
|
||||
verblib_comb combR[verblib_numcombs];
|
||||
|
||||
/* Allpass filters */
|
||||
verblib_allpass allpassL[verblib_numallpasses];
|
||||
verblib_allpass allpassR[verblib_numallpasses];
|
||||
|
||||
/* Buffers for the combs */
|
||||
float bufcombL1[verblib_combtuningL1* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR1[verblib_combtuningR1* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL2[verblib_combtuningL2* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR2[verblib_combtuningR2* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL3[verblib_combtuningL3* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR3[verblib_combtuningR3* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL4[verblib_combtuningL4* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR4[verblib_combtuningR4* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL5[verblib_combtuningL5* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR5[verblib_combtuningR5* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL6[verblib_combtuningL6* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR6[verblib_combtuningR6* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL7[verblib_combtuningL7* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR7[verblib_combtuningR7* verblib_max_sample_rate_multiplier];
|
||||
float bufcombL8[verblib_combtuningL8* verblib_max_sample_rate_multiplier];
|
||||
float bufcombR8[verblib_combtuningR8* verblib_max_sample_rate_multiplier];
|
||||
|
||||
/* Buffers for the allpasses */
|
||||
float bufallpassL1[verblib_allpasstuningL1* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassR1[verblib_allpasstuningR1* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassL2[verblib_allpasstuningL2* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassR2[verblib_allpasstuningR2* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassL3[verblib_allpasstuningL3* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassR3[verblib_allpasstuningR3* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassL4[verblib_allpasstuningL4* verblib_max_sample_rate_multiplier];
|
||||
float bufallpassR4[verblib_allpasstuningR4* verblib_max_sample_rate_multiplier];
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* VERBLIB_H */
|
||||
|
||||
/* IMPLEMENTATION */
|
||||
|
||||
#ifdef VERBLIB_IMPLEMENTATION
|
||||
|
||||
#include <stddef.h>
|
||||
#include <math.h>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define VERBLIB_INLINE __forceinline
|
||||
#else
|
||||
#ifdef __GNUC__
|
||||
#define VERBLIB_INLINE inline __attribute__((always_inline))
|
||||
#else
|
||||
#define VERBLIB_INLINE inline
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define verblib_max(x, y) (((x) > (y)) ? (x) : (y))
|
||||
|
||||
#define undenormalise(sample) sample+=1.0f; sample-=1.0f;
|
||||
|
||||
/* Allpass filter */
|
||||
static void verblib_allpass_initialize ( verblib_allpass* allpass, float* buf, int size )
|
||||
{
|
||||
allpass->buffer = buf;
|
||||
allpass->bufsize = size;
|
||||
allpass->bufidx = 0;
|
||||
}
|
||||
|
||||
static VERBLIB_INLINE float verblib_allpass_process ( verblib_allpass* allpass, float input )
|
||||
{
|
||||
float output;
|
||||
float bufout;
|
||||
|
||||
bufout = allpass->buffer[allpass->bufidx];
|
||||
undenormalise ( bufout );
|
||||
|
||||
output = -input + bufout;
|
||||
allpass->buffer[allpass->bufidx] = input + ( bufout * allpass->feedback );
|
||||
|
||||
if ( ++allpass->bufidx >= allpass->bufsize )
|
||||
{
|
||||
allpass->bufidx = 0;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
static void verblib_allpass_mute ( verblib_allpass* allpass )
|
||||
{
|
||||
int i;
|
||||
for ( i = 0; i < allpass->bufsize; i++ )
|
||||
{
|
||||
allpass->buffer[i] = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
/* Comb filter */
|
||||
static void verblib_comb_initialize ( verblib_comb* comb, float* buf, int size )
|
||||
{
|
||||
comb->buffer = buf;
|
||||
comb->bufsize = size;
|
||||
comb->filterstore = 0.0f;
|
||||
comb->bufidx = 0;
|
||||
}
|
||||
|
||||
static void verblib_comb_mute ( verblib_comb* comb )
|
||||
{
|
||||
int i;
|
||||
for ( i = 0; i < comb->bufsize; i++ )
|
||||
{
|
||||
comb->buffer[i] = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
static void verblib_comb_set_damp ( verblib_comb* comb, float val )
|
||||
{
|
||||
comb->damp1 = val;
|
||||
comb->damp2 = 1.0f - val;
|
||||
}
|
||||
|
||||
static VERBLIB_INLINE float verblib_comb_process ( verblib_comb* comb, float input )
|
||||
{
|
||||
float output;
|
||||
|
||||
output = comb->buffer[comb->bufidx];
|
||||
undenormalise ( output );
|
||||
|
||||
comb->filterstore = ( output * comb->damp2 ) + ( comb->filterstore * comb->damp1 );
|
||||
undenormalise ( comb->filterstore );
|
||||
|
||||
comb->buffer[comb->bufidx] = input + ( comb->filterstore * comb->feedback );
|
||||
|
||||
if ( ++comb->bufidx >= comb->bufsize )
|
||||
{
|
||||
comb->bufidx = 0;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
static void verblib_update ( verblib* verb )
|
||||
{
|
||||
/* Recalculate internal values after parameter change. */
|
||||
|
||||
int i;
|
||||
|
||||
verb->wet1 = verb->wet * ( verb->width / 2.0f + 0.5f );
|
||||
verb->wet2 = verb->wet * ( ( 1.0f - verb->width ) / 2.0f );
|
||||
|
||||
if ( verb->mode >= verblib_freezemode )
|
||||
{
|
||||
verb->roomsize1 = 1.0f;
|
||||
verb->damp1 = 0.0f;
|
||||
verb->gain = verblib_muted;
|
||||
}
|
||||
else
|
||||
{
|
||||
verb->roomsize1 = verb->roomsize;
|
||||
verb->damp1 = verb->damp;
|
||||
verb->gain = verblib_fixedgain;
|
||||
}
|
||||
|
||||
for ( i = 0; i < verblib_numcombs; i++ )
|
||||
{
|
||||
verb->combL[i].feedback = verb->roomsize1;
|
||||
verb->combR[i].feedback = verb->roomsize1;
|
||||
verblib_comb_set_damp ( &verb->combL[i], verb->damp1 );
|
||||
verblib_comb_set_damp ( &verb->combR[i], verb->damp1 );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void verblib_mute ( verblib* verb )
|
||||
{
|
||||
int i;
|
||||
if ( verblib_get_mode ( verb ) >= verblib_freezemode )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
for ( i = 0; i < verblib_numcombs; i++ )
|
||||
{
|
||||
verblib_comb_mute ( &verb->combL[i] );
|
||||
verblib_comb_mute ( &verb->combR[i] );
|
||||
}
|
||||
for ( i = 0; i < verblib_numallpasses; i++ )
|
||||
{
|
||||
verblib_allpass_mute ( &verb->allpassL[i] );
|
||||
verblib_allpass_mute ( &verb->allpassR[i] );
|
||||
}
|
||||
}
|
||||
|
||||
static int verblib_get_verblib_scaled_buffer_size ( unsigned long sample_rate, unsigned long value )
|
||||
{
|
||||
long double result = ( long double ) sample_rate;
|
||||
result /= 44100.0;
|
||||
result = ( ( long double ) value ) * result;
|
||||
if ( result < 1.0 )
|
||||
{
|
||||
result = 1.0;
|
||||
}
|
||||
return ( int ) result;
|
||||
}
|
||||
|
||||
int verblib_initialize ( verblib* verb, unsigned long sample_rate, unsigned int channels )
|
||||
{
|
||||
int i;
|
||||
|
||||
if ( channels != 1 && channels != 2 )
|
||||
{
|
||||
return 0; /* Currently supports only 1 or 2 channels. */
|
||||
}
|
||||
if ( sample_rate < 22050 )
|
||||
{
|
||||
return 0; /* The minimum supported sample rate is 22050 HZ. */
|
||||
}
|
||||
else if ( sample_rate > 44100 * verblib_max_sample_rate_multiplier )
|
||||
{
|
||||
return 0; /* The sample rate is too high. */
|
||||
}
|
||||
|
||||
verb->channels = channels;
|
||||
|
||||
/* Tie the components to their buffers. */
|
||||
verblib_comb_initialize ( &verb->combL[0], verb->bufcombL1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL1 ) );
|
||||
verblib_comb_initialize ( &verb->combR[0], verb->bufcombR1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR1 ) );
|
||||
verblib_comb_initialize ( &verb->combL[1], verb->bufcombL2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL2 ) );
|
||||
verblib_comb_initialize ( &verb->combR[1], verb->bufcombR2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR2 ) );
|
||||
verblib_comb_initialize ( &verb->combL[2], verb->bufcombL3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL3 ) );
|
||||
verblib_comb_initialize ( &verb->combR[2], verb->bufcombR3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR3 ) );
|
||||
verblib_comb_initialize ( &verb->combL[3], verb->bufcombL4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL4 ) );
|
||||
verblib_comb_initialize ( &verb->combR[3], verb->bufcombR4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR4 ) );
|
||||
verblib_comb_initialize ( &verb->combL[4], verb->bufcombL5, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL5 ) );
|
||||
verblib_comb_initialize ( &verb->combR[4], verb->bufcombR5, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR5 ) );
|
||||
verblib_comb_initialize ( &verb->combL[5], verb->bufcombL6, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL6 ) );
|
||||
verblib_comb_initialize ( &verb->combR[5], verb->bufcombR6, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR6 ) );
|
||||
verblib_comb_initialize ( &verb->combL[6], verb->bufcombL7, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL7 ) );
|
||||
verblib_comb_initialize ( &verb->combR[6], verb->bufcombR7, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR7 ) );
|
||||
verblib_comb_initialize ( &verb->combL[7], verb->bufcombL8, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL8 ) );
|
||||
verblib_comb_initialize ( &verb->combR[7], verb->bufcombR8, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR8 ) );
|
||||
|
||||
verblib_allpass_initialize ( &verb->allpassL[0], verb->bufallpassL1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL1 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassR[0], verb->bufallpassR1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR1 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassL[1], verb->bufallpassL2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL2 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassR[1], verb->bufallpassR2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR2 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassL[2], verb->bufallpassL3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL3 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassR[2], verb->bufallpassR3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR3 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassL[3], verb->bufallpassL4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL4 ) );
|
||||
verblib_allpass_initialize ( &verb->allpassR[3], verb->bufallpassR4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR4 ) );
|
||||
|
||||
/* Set default values. */
|
||||
for ( i = 0; i < verblib_numallpasses; i++ )
|
||||
{
|
||||
verb->allpassL[i].feedback = 0.5f;
|
||||
verb->allpassR[i].feedback = 0.5f;
|
||||
}
|
||||
|
||||
verblib_set_wet ( verb, verblib_initialwet );
|
||||
verblib_set_room_size ( verb, verblib_initialroom );
|
||||
verblib_set_dry ( verb, verblib_initialdry );
|
||||
verblib_set_damping ( verb, verblib_initialdamp );
|
||||
verblib_set_width ( verb, verblib_initialwidth );
|
||||
verblib_set_input_width ( verb, verblib_initialinputwidth );
|
||||
verblib_set_mode ( verb, verblib_initialmode );
|
||||
|
||||
/* The buffers will be full of rubbish - so we MUST mute them. */
|
||||
verblib_mute ( verb );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void verblib_process ( verblib* verb, const float* input_buffer, float* output_buffer, unsigned long frames )
|
||||
{
|
||||
int i;
|
||||
float outL, outR, input;
|
||||
|
||||
if ( verb->channels == 1 )
|
||||
{
|
||||
while ( frames-- > 0 )
|
||||
{
|
||||
outL = 0.0f;
|
||||
input = ( input_buffer[0] * 2.0f ) * verb->gain;
|
||||
|
||||
/* Accumulate comb filters in parallel. */
|
||||
for ( i = 0; i < verblib_numcombs; i++ )
|
||||
{
|
||||
outL += verblib_comb_process ( &verb->combL[i], input );
|
||||
}
|
||||
|
||||
/* Feed through allpasses in series. */
|
||||
for ( i = 0; i < verblib_numallpasses; i++ )
|
||||
{
|
||||
outL = verblib_allpass_process ( &verb->allpassL[i], outL );
|
||||
}
|
||||
|
||||
/* Calculate output REPLACING anything already there. */
|
||||
output_buffer[0] = outL * verb->wet1 + input_buffer[0] * verb->dry;
|
||||
|
||||
/* Increment sample pointers. */
|
||||
++input_buffer;
|
||||
++output_buffer;
|
||||
}
|
||||
}
|
||||
else if ( verb->channels == 2 )
|
||||
{
|
||||
if ( verb->input_width > 0.0f ) /* Stereo input is widened or narrowed. */
|
||||
{
|
||||
|
||||
/*
|
||||
* The stereo mid/side code is derived from:
|
||||
* https://www.musicdsp.org/en/latest/Effects/256-stereo-width-control-obtained-via-transfromation-matrix.html
|
||||
* The description of the code on the above page says:
|
||||
*
|
||||
* This work is hereby placed in the public domain for all purposes, including
|
||||
* use in commercial applications.
|
||||
*/
|
||||
|
||||
const float tmp = 1 / verblib_max ( 1 + verb->input_width, 2 );
|
||||
const float coef_mid = 1 * tmp;
|
||||
const float coef_side = verb->input_width * tmp;
|
||||
while ( frames-- > 0 )
|
||||
{
|
||||
const float mid = ( input_buffer[0] + input_buffer[1] ) * coef_mid;
|
||||
const float side = ( input_buffer[1] - input_buffer[0] ) * coef_side;
|
||||
const float input_left = ( mid - side ) * ( verb->gain * 2.0f );
|
||||
const float input_right = ( mid + side ) * ( verb->gain * 2.0f );
|
||||
|
||||
outL = outR = 0.0f;
|
||||
|
||||
/* Accumulate comb filters in parallel. */
|
||||
for ( i = 0; i < verblib_numcombs; i++ )
|
||||
{
|
||||
outL += verblib_comb_process ( &verb->combL[i], input_left );
|
||||
outR += verblib_comb_process ( &verb->combR[i], input_right );
|
||||
}
|
||||
|
||||
/* Feed through allpasses in series. */
|
||||
for ( i = 0; i < verblib_numallpasses; i++ )
|
||||
{
|
||||
outL = verblib_allpass_process ( &verb->allpassL[i], outL );
|
||||
outR = verblib_allpass_process ( &verb->allpassR[i], outR );
|
||||
}
|
||||
|
||||
/* Calculate output REPLACING anything already there. */
|
||||
output_buffer[0] = outL * verb->wet1 + outR * verb->wet2 + input_buffer[0] * verb->dry;
|
||||
output_buffer[1] = outR * verb->wet1 + outL * verb->wet2 + input_buffer[1] * verb->dry;
|
||||
|
||||
/* Increment sample pointers. */
|
||||
input_buffer += 2;
|
||||
output_buffer += 2;
|
||||
}
|
||||
}
|
||||
else /* Stereo input is summed to mono. */
|
||||
{
|
||||
while ( frames-- > 0 )
|
||||
{
|
||||
outL = outR = 0.0f;
|
||||
input = ( input_buffer[0] + input_buffer[1] ) * verb->gain;
|
||||
|
||||
/* Accumulate comb filters in parallel. */
|
||||
for ( i = 0; i < verblib_numcombs; i++ )
|
||||
{
|
||||
outL += verblib_comb_process ( &verb->combL[i], input );
|
||||
outR += verblib_comb_process ( &verb->combR[i], input );
|
||||
}
|
||||
|
||||
/* Feed through allpasses in series. */
|
||||
for ( i = 0; i < verblib_numallpasses; i++ )
|
||||
{
|
||||
outL = verblib_allpass_process ( &verb->allpassL[i], outL );
|
||||
outR = verblib_allpass_process ( &verb->allpassR[i], outR );
|
||||
}
|
||||
|
||||
/* Calculate output REPLACING anything already there. */
|
||||
output_buffer[0] = outL * verb->wet1 + outR * verb->wet2 + input_buffer[0] * verb->dry;
|
||||
output_buffer[1] = outR * verb->wet1 + outL * verb->wet2 + input_buffer[1] * verb->dry;
|
||||
|
||||
/* Increment sample pointers. */
|
||||
input_buffer += 2;
|
||||
output_buffer += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void verblib_set_room_size ( verblib* verb, float value )
|
||||
{
|
||||
verb->roomsize = ( value * verblib_scaleroom ) + verblib_offsetroom;
|
||||
verblib_update ( verb );
|
||||
}
|
||||
|
||||
float verblib_get_room_size ( const verblib* verb )
|
||||
{
|
||||
return ( verb->roomsize - verblib_offsetroom ) / verblib_scaleroom;
|
||||
}
|
||||
|
||||
void verblib_set_damping ( verblib* verb, float value )
|
||||
{
|
||||
verb->damp = value * verblib_scaledamp;
|
||||
verblib_update ( verb );
|
||||
}
|
||||
|
||||
float verblib_get_damping ( const verblib* verb )
|
||||
{
|
||||
return verb->damp / verblib_scaledamp;
|
||||
}
|
||||
|
||||
void verblib_set_wet ( verblib* verb, float value )
|
||||
{
|
||||
verb->wet = value * verblib_scalewet;
|
||||
verblib_update ( verb );
|
||||
}
|
||||
|
||||
float verblib_get_wet ( const verblib* verb )
|
||||
{
|
||||
return verb->wet / verblib_scalewet;
|
||||
}
|
||||
|
||||
void verblib_set_dry ( verblib* verb, float value )
|
||||
{
|
||||
verb->dry = value * verblib_scaledry;
|
||||
}
|
||||
|
||||
float verblib_get_dry ( const verblib* verb )
|
||||
{
|
||||
return verb->dry / verblib_scaledry;
|
||||
}
|
||||
|
||||
void verblib_set_width ( verblib* verb, float value )
|
||||
{
|
||||
verb->width = value;
|
||||
verblib_update ( verb );
|
||||
}
|
||||
|
||||
float verblib_get_width ( const verblib* verb )
|
||||
{
|
||||
return verb->width;
|
||||
}
|
||||
|
||||
void verblib_set_input_width ( verblib* verb, float value )
|
||||
{
|
||||
verb->input_width = value;
|
||||
}
|
||||
|
||||
float verblib_get_input_width ( const verblib* verb )
|
||||
{
|
||||
return verb->input_width;
|
||||
}
|
||||
|
||||
void verblib_set_mode ( verblib* verb, float value )
|
||||
{
|
||||
verb->mode = value;
|
||||
verblib_update ( verb );
|
||||
}
|
||||
|
||||
float verblib_get_mode ( const verblib* verb )
|
||||
{
|
||||
if ( verb->mode >= verblib_freezemode )
|
||||
{
|
||||
return 1.0f;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
unsigned long verblib_get_decay_time_in_frames ( const verblib* verb )
|
||||
{
|
||||
double decay;
|
||||
|
||||
if ( verb->mode >= verblib_freezemode )
|
||||
{
|
||||
return 0; /* Freeze mode creates an infinite decay. */
|
||||
}
|
||||
|
||||
decay = verblib_silence_threshold / fabs ( -20.0 * log ( 1.0 / verb->roomsize1 ) );
|
||||
decay *= ( double ) ( verb->combR[7].bufsize * 2 );
|
||||
return ( unsigned long ) decay;
|
||||
}
|
||||
|
||||
#endif /* VERBLIB_IMPLEMENTATION */
|
||||
|
||||
/* REVISION HISTORY
|
||||
*
|
||||
* Version 0.5 - 2022-10-25
|
||||
* Added two functions called verblib_set_input_width and verblib_get_input_width.
|
||||
*
|
||||
* Version 0.4 - 2021-01-23
|
||||
* Added a function called verblib_get_decay_time_in_frames.
|
||||
*
|
||||
* Version 0.3 - 2021-01-18
|
||||
* Added support for sample rates of 22050 and above.
|
||||
*
|
||||
* Version 0.2 - 2021-01-17
|
||||
* Added support for processing mono audio.
|
||||
*
|
||||
* Version 0.1 - 2021-01-17
|
||||
* Initial release.
|
||||
*/
|
||||
|
||||
/* LICENSE
|
||||
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT No Attribution License
|
||||
Copyright (c) 2022 Philip Bennefall
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
80
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node.c
vendored
Normal file
80
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node.c
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
|
||||
#define VOCLIB_IMPLEMENTATION
|
||||
#include "ma_vocoder_node.h"
|
||||
|
||||
MA_API ma_vocoder_node_config ma_vocoder_node_config_init(ma_uint32 channels, ma_uint32 sampleRate)
|
||||
{
|
||||
ma_vocoder_node_config config;
|
||||
|
||||
MA_ZERO_OBJECT(&config);
|
||||
config.nodeConfig = ma_node_config_init(); /* Input and output channels will be set in ma_vocoder_node_init(). */
|
||||
config.channels = channels;
|
||||
config.sampleRate = sampleRate;
|
||||
config.bands = 16;
|
||||
config.filtersPerBand = 6;
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
|
||||
static void ma_vocoder_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
|
||||
{
|
||||
ma_vocoder_node* pVocoderNode = (ma_vocoder_node*)pNode;
|
||||
|
||||
(void)pFrameCountIn;
|
||||
|
||||
voclib_process(&pVocoderNode->voclib, ppFramesIn[0], ppFramesIn[1], ppFramesOut[0], *pFrameCountOut);
|
||||
}
|
||||
|
||||
static ma_node_vtable g_ma_vocoder_node_vtable =
|
||||
{
|
||||
ma_vocoder_node_process_pcm_frames,
|
||||
NULL,
|
||||
2, /* 2 input channels. */
|
||||
1, /* 1 output channel. */
|
||||
0
|
||||
};
|
||||
|
||||
MA_API ma_result ma_vocoder_node_init(ma_node_graph* pNodeGraph, const ma_vocoder_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_vocoder_node* pVocoderNode)
|
||||
{
|
||||
ma_result result;
|
||||
ma_node_config baseConfig;
|
||||
ma_uint32 inputChannels[2];
|
||||
ma_uint32 outputChannels[1];
|
||||
|
||||
if (pVocoderNode == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
MA_ZERO_OBJECT(pVocoderNode);
|
||||
|
||||
if (pConfig == NULL) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
if (voclib_initialize(&pVocoderNode->voclib, (unsigned char)pConfig->bands, (unsigned char)pConfig->filtersPerBand, (unsigned int)pConfig->sampleRate, (unsigned char)pConfig->channels) == 0) {
|
||||
return MA_INVALID_ARGS;
|
||||
}
|
||||
|
||||
inputChannels [0] = pConfig->channels; /* Source/carrier. */
|
||||
inputChannels [1] = 1; /* Excite/modulator. Must always be single channel. */
|
||||
outputChannels[0] = pConfig->channels; /* Output channels is always the same as the source/carrier. */
|
||||
|
||||
baseConfig = pConfig->nodeConfig;
|
||||
baseConfig.vtable = &g_ma_vocoder_node_vtable;
|
||||
baseConfig.pInputChannels = inputChannels;
|
||||
baseConfig.pOutputChannels = outputChannels;
|
||||
|
||||
result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pVocoderNode->baseNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return MA_SUCCESS;
|
||||
}
|
||||
|
||||
MA_API void ma_vocoder_node_uninit(ma_vocoder_node* pVocoderNode, const ma_allocation_callbacks* pAllocationCallbacks)
|
||||
{
|
||||
/* The base node must always be initialized first. */
|
||||
ma_node_uninit(pVocoderNode, pAllocationCallbacks);
|
||||
}
|
||||
45
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node.h
vendored
Normal file
45
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node.h
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
/* Include ma_vocoder_node.h after miniaudio.h */
|
||||
#ifndef ma_vocoder_node_h
|
||||
#define ma_vocoder_node_h
|
||||
|
||||
#include "voclib.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
The vocoder node has two inputs and one output. Inputs:
|
||||
|
||||
Input Bus 0: The source/carrier stream.
|
||||
Input Bus 1: The excite/modulator stream.
|
||||
|
||||
The source (input bus 0) and output must have the same channel count, and is restricted to 1 or 2.
|
||||
The excite (input bus 1) is restricted to 1 channel.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
ma_node_config nodeConfig;
|
||||
ma_uint32 channels; /* The number of channels of the source, which will be the same as the output. Must be 1 or 2. The excite bus must always have one channel. */
|
||||
ma_uint32 sampleRate;
|
||||
ma_uint32 bands; /* Defaults to 16. */
|
||||
ma_uint32 filtersPerBand; /* Defaults to 6. */
|
||||
} ma_vocoder_node_config;
|
||||
|
||||
MA_API ma_vocoder_node_config ma_vocoder_node_config_init(ma_uint32 channels, ma_uint32 sampleRate);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ma_node_base baseNode;
|
||||
voclib_instance voclib;
|
||||
} ma_vocoder_node;
|
||||
|
||||
MA_API ma_result ma_vocoder_node_init(ma_node_graph* pNodeGraph, const ma_vocoder_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_vocoder_node* pVocoderNode);
|
||||
MA_API void ma_vocoder_node_uninit(ma_vocoder_node* pVocoderNode, const ma_allocation_callbacks* pAllocationCallbacks);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* ma_vocoder_node_h */
|
||||
148
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c
vendored
Normal file
148
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
Demonstrates how to apply an effect to a duplex stream using the node graph system.
|
||||
|
||||
This example applies a vocoder effect to the input stream before outputting it. A custom node
|
||||
called `ma_vocoder_node` is used to achieve the effect which can be found in the extras folder in
|
||||
the miniaudio repository. The vocoder node uses https://github.com/blastbay/voclib to achieve the
|
||||
effect.
|
||||
*/
|
||||
#define MINIAUDIO_IMPLEMENTATION
|
||||
#include "../../../miniaudio.h"
|
||||
#include "ma_vocoder_node.c"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */
|
||||
#define DEVICE_CHANNELS 1 /* For this example, always set to 1. */
|
||||
|
||||
static ma_waveform g_sourceData; /* The underlying data source of the excite node. */
|
||||
static ma_audio_buffer_ref g_exciteData; /* The underlying data source of the source node. */
|
||||
static ma_data_source_node g_sourceNode; /* A data source node containing the source data we'll be sending through to the vocoder. This will be routed into the first bus of the vocoder node. */
|
||||
static ma_data_source_node g_exciteNode; /* A data source node containing the excite data we'll be sending through to the vocoder. This will be routed into the second bus of the vocoder node. */
|
||||
static ma_vocoder_node g_vocoderNode; /* The vocoder node. */
|
||||
static ma_node_graph g_nodeGraph;
|
||||
|
||||
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
|
||||
{
|
||||
MA_ASSERT(pDevice->capture.format == pDevice->playback.format);
|
||||
MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels);
|
||||
|
||||
/*
|
||||
The node graph system is a pulling style of API. At the lowest level of the chain will be a
|
||||
node acting as a data source for the purpose of delivering the initial audio data. In our case,
|
||||
the data source is our `pInput` buffer. We need to update the underlying data source so that it
|
||||
read data from `pInput`.
|
||||
*/
|
||||
ma_audio_buffer_ref_set_data(&g_exciteData, pInput, frameCount);
|
||||
|
||||
/* With the source buffer configured we can now read directly from the node graph. */
|
||||
ma_node_graph_read_pcm_frames(&g_nodeGraph, pOutput, frameCount, NULL);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
ma_result result;
|
||||
ma_device_config deviceConfig;
|
||||
ma_device device;
|
||||
ma_node_graph_config nodeGraphConfig;
|
||||
ma_vocoder_node_config vocoderNodeConfig;
|
||||
ma_data_source_node_config sourceNodeConfig;
|
||||
ma_data_source_node_config exciteNodeConfig;
|
||||
ma_waveform_config waveformConfig;
|
||||
|
||||
deviceConfig = ma_device_config_init(ma_device_type_duplex);
|
||||
deviceConfig.capture.pDeviceID = NULL;
|
||||
deviceConfig.capture.format = DEVICE_FORMAT;
|
||||
deviceConfig.capture.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.capture.shareMode = ma_share_mode_shared;
|
||||
deviceConfig.playback.pDeviceID = NULL;
|
||||
deviceConfig.playback.format = DEVICE_FORMAT;
|
||||
deviceConfig.playback.channels = DEVICE_CHANNELS;
|
||||
deviceConfig.dataCallback = data_callback;
|
||||
result = ma_device_init(NULL, &deviceConfig, &device);
|
||||
if (result != MA_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Now we can setup our node graph. */
|
||||
nodeGraphConfig = ma_node_graph_config_init(device.capture.channels);
|
||||
|
||||
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize node graph.");
|
||||
goto done0;
|
||||
}
|
||||
|
||||
|
||||
/* Vocoder. Attached straight to the endpoint. */
|
||||
vocoderNodeConfig = ma_vocoder_node_config_init(device.capture.channels, device.sampleRate);
|
||||
|
||||
result = ma_vocoder_node_init(&g_nodeGraph, &vocoderNodeConfig, NULL, &g_vocoderNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize vocoder node.");
|
||||
goto done1;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_vocoderNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
|
||||
|
||||
/* Amplify the volume of the vocoder output because in my testing it is a bit quiet. */
|
||||
ma_node_set_output_bus_volume(&g_vocoderNode, 0, 4);
|
||||
|
||||
|
||||
/* Source/carrier. Attached to input bus 0 of the vocoder node. */
|
||||
waveformConfig = ma_waveform_config_init(device.capture.format, device.capture.channels, device.sampleRate, ma_waveform_type_sawtooth, 1.0, 50);
|
||||
|
||||
result = ma_waveform_init(&waveformConfig, &g_sourceData);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize waveform for excite node.");
|
||||
goto done3;
|
||||
}
|
||||
|
||||
sourceNodeConfig = ma_data_source_node_config_init(&g_sourceData);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &sourceNodeConfig, NULL, &g_sourceNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize excite node.");
|
||||
goto done3;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_sourceNode, 0, &g_vocoderNode, 0);
|
||||
|
||||
|
||||
/* Excite/modulator. Attached to input bus 1 of the vocoder node. */
|
||||
result = ma_audio_buffer_ref_init(device.capture.format, device.capture.channels, NULL, 0, &g_exciteData);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize audio buffer for source.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
exciteNodeConfig = ma_data_source_node_config_init(&g_exciteData);
|
||||
|
||||
result = ma_data_source_node_init(&g_nodeGraph, &exciteNodeConfig, NULL, &g_exciteNode);
|
||||
if (result != MA_SUCCESS) {
|
||||
printf("Failed to initialize source node.");
|
||||
goto done2;
|
||||
}
|
||||
|
||||
ma_node_attach_output_bus(&g_exciteNode, 0, &g_vocoderNode, 1);
|
||||
|
||||
|
||||
ma_device_start(&device);
|
||||
|
||||
printf("Press Enter to quit...\n");
|
||||
getchar();
|
||||
|
||||
/* It's important that we stop the device first or else we'll uninitialize the graph from under the device. */
|
||||
ma_device_stop(&device);
|
||||
|
||||
/*done4:*/ ma_data_source_node_uninit(&g_exciteNode, NULL);
|
||||
done3: ma_data_source_node_uninit(&g_sourceNode, NULL);
|
||||
done2: ma_vocoder_node_uninit(&g_vocoderNode, NULL);
|
||||
done1: ma_node_graph_uninit(&g_nodeGraph, NULL);
|
||||
done0: ma_device_uninit(&device);
|
||||
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
return 0;
|
||||
}
|
||||
672
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/voclib.h
vendored
Normal file
672
thirdparty/miniaudio/extras/nodes/ma_vocoder_node/voclib.h
vendored
Normal file
@@ -0,0 +1,672 @@
|
||||
/* Vocoder Library
|
||||
* Voclib version 1.1 - 2019-02-16
|
||||
*
|
||||
* Philip Bennefall - philip@blastbay.com
|
||||
*
|
||||
* See the end of this file for licensing terms.
|
||||
* The filter implementation was derived from public domain code found on musicdsp.org (see the section called "Filters" for more details).
|
||||
*
|
||||
* USAGE
|
||||
*
|
||||
* This is a single-file library. To use it, do something like the following in one .c file.
|
||||
* #define VOCLIB_IMPLEMENTATION
|
||||
* #include "voclib.h"
|
||||
*
|
||||
* You can then #include this file in other parts of the program as you would with any other header file.
|
||||
*/
|
||||
|
||||
#ifndef VOCLIB_H
|
||||
#define VOCLIB_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* COMPILE-TIME OPTIONS */
|
||||
|
||||
/* The maximum number of bands that the vocoder can be initialized with (lower this number to save memory). */
|
||||
#define VOCLIB_MAX_BANDS 96
|
||||
|
||||
/* The maximum number of filters per vocoder band (lower this number to save memory). */
|
||||
#define VOCLIB_MAX_FILTERS_PER_BAND 8
|
||||
|
||||
/* PUBLIC API */
|
||||
|
||||
typedef struct voclib_instance voclib_instance;
|
||||
|
||||
/* Initialize a voclib_instance structure.
|
||||
*
|
||||
* Call this function to initialize the voclib_instance structure.
|
||||
* bands is the number of bands that the vocoder should use; recommended values are between 12 and 64.
|
||||
* bands must be between 4 and VOCLIB_MAX_BANDS (inclusive).
|
||||
* filters_per_band determines the steapness with which the filterbank divides the signal; a value of 6 is recommended.
|
||||
* filters_per_band must be between 1 and VOCLIB_MAX_FILTERS_PER_BAND (inclusive).
|
||||
* sample_rate is the number of samples per second in hertz, and should be between 8000 and 192000 (inclusive).
|
||||
* carrier_channels is the number of channels that the carrier has, and should be between 1 and 2 (inclusive).
|
||||
* Note: The modulator must always have only one channel.
|
||||
* Returns nonzero (true) on success or 0 (false) on failure.
|
||||
* The function will only fail if one or more of the parameters are invalid.
|
||||
*/
|
||||
int voclib_initialize ( voclib_instance* instance, unsigned char bands, unsigned char filters_per_band, unsigned int sample_rate, unsigned char carrier_channels );
|
||||
|
||||
/* Run the vocoder.
|
||||
*
|
||||
* Call this function continuously to generate your output.
|
||||
* carrier_buffer and modulator_buffer should contain the carrier and modulator signals respectively.
|
||||
* The modulator must always have one channel.
|
||||
* If the carrier has two channels, the samples in carrier_buffer must be interleaved.
|
||||
* output_buffer will be filled with the result, and must be able to hold as many channels as the carrier.
|
||||
* If the carrier has two channels, the output buffer will be filled with interleaved samples.
|
||||
* output_buffer may be the same pointer as either carrier_buffer or modulator_buffer as long as it can hold the same number of channels as the carrier.
|
||||
* The processing is performed in place.
|
||||
* frames specifies the number of sample frames that should be processed.
|
||||
* Returns nonzero (true) on success or 0 (false) on failure.
|
||||
* The function will only fail if one or more of the parameters are invalid.
|
||||
*/
|
||||
int voclib_process ( voclib_instance* instance, const float* carrier_buffer, const float* modulator_buffer, float* output_buffer, unsigned int frames );
|
||||
|
||||
/* Reset the vocoder sample history.
|
||||
*
|
||||
* In order to run smoothly, the vocoder needs to store a few recent samples internally.
|
||||
* This function resets that internal history. This should only be done if you are processing a new stream.
|
||||
* Resetting the history in the middle of a stream will cause clicks.
|
||||
*/
|
||||
void voclib_reset_history ( voclib_instance* instance );
|
||||
|
||||
/* Set the reaction time of the vocoder in seconds.
|
||||
*
|
||||
* The reaction time is the time it takes for the vocoder to respond to a volume change in the modulator.
|
||||
* A value of 0.03 (AKA 30 milliseconds) is recommended for intelligible speech.
|
||||
* Values lower than about 0.02 will make the output sound raspy and unpleasant.
|
||||
* Values above 0.2 or so will make the speech hard to understand, but can be used for special effects.
|
||||
* The value must be between 0.002 and 2.0 (inclusive).
|
||||
* Returns nonzero (true) on success or 0 (false) on failure.
|
||||
* The function will only fail if the parameter is invalid.
|
||||
*/
|
||||
int voclib_set_reaction_time ( voclib_instance* instance, float reaction_time );
|
||||
|
||||
/* Get the current reaction time of the vocoder in seconds. */
|
||||
float voclib_get_reaction_time ( const voclib_instance* instance );
|
||||
|
||||
/* Set the formant shift of the vocoder in octaves.
|
||||
*
|
||||
* Formant shifting changes the size of the speaker's head.
|
||||
* A value of 1.0 leaves the head size unmodified.
|
||||
* Values lower than 1.0 make the head larger, and values above 1.0 make it smaller.
|
||||
* The value must be between 0.25 and 4.0 (inclusive).
|
||||
* Returns nonzero (true) on success or 0 (false) on failure.
|
||||
* The function will only fail if the parameter is invalid.
|
||||
*/
|
||||
int voclib_set_formant_shift ( voclib_instance* instance, float formant_shift );
|
||||
|
||||
/* Get the current formant shift of the vocoder in octaves. */
|
||||
float voclib_get_formant_shift ( const voclib_instance* instance );
|
||||
|
||||
/* INTERNAL STRUCTURES */
|
||||
|
||||
/* this holds the data required to update samples thru a filter. */
|
||||
typedef struct
|
||||
{
|
||||
float a0, a1, a2, a3, a4;
|
||||
float x1, x2, y1, y2;
|
||||
} voclib_biquad;
|
||||
|
||||
/* Stores the state required for our envelope follower. */
|
||||
typedef struct
|
||||
{
|
||||
float coef;
|
||||
float history[4];
|
||||
} voclib_envelope;
|
||||
|
||||
/* Holds a set of filters required for one vocoder band. */
|
||||
typedef struct
|
||||
{
|
||||
voclib_biquad filters[VOCLIB_MAX_FILTERS_PER_BAND];
|
||||
} voclib_band;
|
||||
|
||||
/* The main instance structure. This is the structure that you will create an instance of when using the vocoder. */
|
||||
struct voclib_instance
|
||||
{
|
||||
voclib_band analysis_bands[VOCLIB_MAX_BANDS]; /* The filterbank used for analysis (these are applied to the modulator). */
|
||||
voclib_envelope analysis_envelopes[VOCLIB_MAX_BANDS]; /* The envelopes used to smooth the analysis bands. */
|
||||
voclib_band synthesis_bands[VOCLIB_MAX_BANDS * 2]; /* The filterbank used for synthesis (these are applied to the carrier). The second half of the array is only used for stereo carriers. */
|
||||
float reaction_time; /* In seconds. Higher values make the vocoder respond more slowly to changes in the modulator. */
|
||||
float formant_shift; /* In octaves. 1.0 is unchanged. */
|
||||
unsigned int sample_rate; /* In hertz. */
|
||||
unsigned char bands;
|
||||
unsigned char filters_per_band;
|
||||
unsigned char carrier_channels;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* VOCLIB_H */
|
||||
|
||||
/* IMPLEMENTATION */
|
||||
|
||||
#ifdef VOCLIB_IMPLEMENTATION
|
||||
|
||||
#include <math.h>
|
||||
#include <assert.h>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define VOCLIB_INLINE __forceinline
|
||||
#else
|
||||
#ifdef __GNUC__
|
||||
#define VOCLIB_INLINE inline __attribute__((always_inline))
|
||||
#else
|
||||
#define VOCLIB_INLINE inline
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Filters
|
||||
*
|
||||
* The filter code below was derived from http://www.musicdsp.org/files/biquad.c. The comment at the top of biquad.c file reads:
|
||||
*
|
||||
* Simple implementation of Biquad filters -- Tom St Denis
|
||||
*
|
||||
* Based on the work
|
||||
|
||||
Cookbook formulae for audio EQ biquad filter coefficients
|
||||
---------------------------------------------------------
|
||||
by Robert Bristow-Johnson, pbjrbj@viconet.com a.k.a. robert@audioheads.com
|
||||
|
||||
* Available on the web at
|
||||
|
||||
http://www.smartelectronix.com/musicdsp/text/filters005.txt
|
||||
|
||||
* Enjoy.
|
||||
*
|
||||
* This work is hereby placed in the public domain for all purposes, whether
|
||||
* commercial, free [as in speech] or educational, etc. Use the code and please
|
||||
* give me credit if you wish.
|
||||
*
|
||||
* Tom St Denis -- http://tomstdenis.home.dhs.org
|
||||
*/
|
||||
|
||||
#ifndef VOCLIB_M_LN2
|
||||
#define VOCLIB_M_LN2 0.69314718055994530942
|
||||
#endif
|
||||
|
||||
#ifndef VOCLIB_M_PI
|
||||
#define VOCLIB_M_PI 3.14159265358979323846
|
||||
#endif
|
||||
|
||||
/* Computes a BiQuad filter on a sample. */
|
||||
static VOCLIB_INLINE float voclib_BiQuad ( float sample, voclib_biquad* b )
|
||||
{
|
||||
float result;
|
||||
|
||||
/* compute the result. */
|
||||
result = b->a0 * sample + b->a1 * b->x1 + b->a2 * b->x2 -
|
||||
b->a3 * b->y1 - b->a4 * b->y2;
|
||||
|
||||
/* shift x1 to x2, sample to x1. */
|
||||
b->x2 = b->x1;
|
||||
b->x1 = sample;
|
||||
|
||||
/* shift y1 to y2, result to y1. */
|
||||
b->y2 = b->y1;
|
||||
b->y1 = result;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* filter types. */
|
||||
enum
|
||||
{
|
||||
VOCLIB_LPF, /* low pass filter */
|
||||
VOCLIB_HPF, /* High pass filter */
|
||||
VOCLIB_BPF, /* band pass filter */
|
||||
VOCLIB_NOTCH, /* Notch Filter */
|
||||
VOCLIB_PEQ, /* Peaking band EQ filter */
|
||||
VOCLIB_LSH, /* Low shelf filter */
|
||||
VOCLIB_HSH /* High shelf filter */
|
||||
};
|
||||
|
||||
/* sets up a BiQuad Filter. */
|
||||
static void voclib_BiQuad_new ( voclib_biquad* b, int type, float dbGain, /* gain of filter */
|
||||
float freq, /* center frequency */
|
||||
float srate, /* sampling rate */
|
||||
float bandwidth ) /* bandwidth in octaves */
|
||||
{
|
||||
float A, omega, sn, cs, alpha, beta;
|
||||
float a0, a1, a2, b0, b1, b2;
|
||||
|
||||
/* setup variables. */
|
||||
A = ( float ) pow ( 10, dbGain / 40.0f );
|
||||
omega = ( float ) ( 2.0 * VOCLIB_M_PI * freq / srate );
|
||||
sn = ( float ) sin ( omega );
|
||||
cs = ( float ) cos ( omega );
|
||||
alpha = sn * ( float ) sinh ( VOCLIB_M_LN2 / 2 * bandwidth * omega / sn );
|
||||
beta = ( float ) sqrt ( A + A );
|
||||
|
||||
switch ( type )
|
||||
{
|
||||
case VOCLIB_LPF:
|
||||
b0 = ( 1 - cs ) / 2;
|
||||
b1 = 1 - cs;
|
||||
b2 = ( 1 - cs ) / 2;
|
||||
a0 = 1 + alpha;
|
||||
a1 = -2 * cs;
|
||||
a2 = 1 - alpha;
|
||||
break;
|
||||
case VOCLIB_HPF:
|
||||
b0 = ( 1 + cs ) / 2;
|
||||
b1 = - ( 1 + cs );
|
||||
b2 = ( 1 + cs ) / 2;
|
||||
a0 = 1 + alpha;
|
||||
a1 = -2 * cs;
|
||||
a2 = 1 - alpha;
|
||||
break;
|
||||
case VOCLIB_BPF:
|
||||
b0 = alpha;
|
||||
b1 = 0;
|
||||
b2 = -alpha;
|
||||
a0 = 1 + alpha;
|
||||
a1 = -2 * cs;
|
||||
a2 = 1 - alpha;
|
||||
break;
|
||||
case VOCLIB_NOTCH:
|
||||
b0 = 1;
|
||||
b1 = -2 * cs;
|
||||
b2 = 1;
|
||||
a0 = 1 + alpha;
|
||||
a1 = -2 * cs;
|
||||
a2 = 1 - alpha;
|
||||
break;
|
||||
case VOCLIB_PEQ:
|
||||
b0 = 1 + ( alpha * A );
|
||||
b1 = -2 * cs;
|
||||
b2 = 1 - ( alpha * A );
|
||||
a0 = 1 + ( alpha / A );
|
||||
a1 = -2 * cs;
|
||||
a2 = 1 - ( alpha / A );
|
||||
break;
|
||||
case VOCLIB_LSH:
|
||||
b0 = A * ( ( A + 1 ) - ( A - 1 ) * cs + beta * sn );
|
||||
b1 = 2 * A * ( ( A - 1 ) - ( A + 1 ) * cs );
|
||||
b2 = A * ( ( A + 1 ) - ( A - 1 ) * cs - beta * sn );
|
||||
a0 = ( A + 1 ) + ( A - 1 ) * cs + beta * sn;
|
||||
a1 = -2 * ( ( A - 1 ) + ( A + 1 ) * cs );
|
||||
a2 = ( A + 1 ) + ( A - 1 ) * cs - beta * sn;
|
||||
break;
|
||||
case VOCLIB_HSH:
|
||||
b0 = A * ( ( A + 1 ) + ( A - 1 ) * cs + beta * sn );
|
||||
b1 = -2 * A * ( ( A - 1 ) + ( A + 1 ) * cs );
|
||||
b2 = A * ( ( A + 1 ) + ( A - 1 ) * cs - beta * sn );
|
||||
a0 = ( A + 1 ) - ( A - 1 ) * cs + beta * sn;
|
||||
a1 = 2 * ( ( A - 1 ) - ( A + 1 ) * cs );
|
||||
a2 = ( A + 1 ) - ( A - 1 ) * cs - beta * sn;
|
||||
break;
|
||||
default:
|
||||
assert ( 0 ); /* Misuse. */
|
||||
return;
|
||||
}
|
||||
|
||||
/* precompute the coefficients. */
|
||||
b->a0 = b0 / a0;
|
||||
b->a1 = b1 / a0;
|
||||
b->a2 = b2 / a0;
|
||||
b->a3 = a1 / a0;
|
||||
b->a4 = a2 / a0;
|
||||
}
|
||||
|
||||
/* Reset the filter history. */
|
||||
static void voclib_BiQuad_reset ( voclib_biquad* b )
|
||||
{
|
||||
b->x1 = b->x2 = 0.0f;
|
||||
b->y1 = b->y2 = 0.0f;
|
||||
}
|
||||
|
||||
/* Envelope follower. */
|
||||
|
||||
static void voclib_envelope_configure ( voclib_envelope* envelope, double time_in_seconds, double sample_rate )
|
||||
{
|
||||
envelope->coef = ( float ) ( pow ( 0.01, 1.0 / ( time_in_seconds * sample_rate ) ) );
|
||||
}
|
||||
|
||||
/* Reset the envelope history. */
|
||||
static void voclib_envelope_reset ( voclib_envelope* envelope )
|
||||
{
|
||||
envelope->history[0] = 0.0f;
|
||||
envelope->history[1] = 0.0f;
|
||||
envelope->history[2] = 0.0f;
|
||||
envelope->history[3] = 0.0f;
|
||||
}
|
||||
|
||||
static VOCLIB_INLINE float voclib_envelope_tick ( voclib_envelope* envelope, float sample )
|
||||
{
|
||||
const float coef = envelope->coef;
|
||||
envelope->history[0] = ( float ) ( ( 1.0f - coef ) * fabs ( sample ) ) + ( coef * envelope->history[0] );
|
||||
envelope->history[1] = ( ( 1.0f - coef ) * envelope->history[0] ) + ( coef * envelope->history[1] );
|
||||
envelope->history[2] = ( ( 1.0f - coef ) * envelope->history[1] ) + ( coef * envelope->history[2] );
|
||||
envelope->history[3] = ( ( 1.0f - coef ) * envelope->history[2] ) + ( coef * envelope->history[3] );
|
||||
return envelope->history[3];
|
||||
}
|
||||
|
||||
/* Initialize the vocoder filterbank. */
|
||||
static void voclib_initialize_filterbank ( voclib_instance* instance, int carrier_only )
|
||||
{
|
||||
unsigned char i;
|
||||
double step;
|
||||
double lastfreq = 0.0;
|
||||
double minfreq = 80.0;
|
||||
double maxfreq = instance->sample_rate;
|
||||
if ( maxfreq > 12000.0 )
|
||||
{
|
||||
maxfreq = 12000.0;
|
||||
}
|
||||
step = pow ( ( maxfreq / minfreq ), ( 1.0 / instance->bands ) );
|
||||
|
||||
for ( i = 0; i < instance->bands; ++i )
|
||||
{
|
||||
unsigned char i2;
|
||||
double bandwidth, nextfreq;
|
||||
double priorfreq = lastfreq;
|
||||
if ( lastfreq > 0.0 )
|
||||
{
|
||||
lastfreq *= step;
|
||||
}
|
||||
else
|
||||
{
|
||||
lastfreq = minfreq;
|
||||
}
|
||||
nextfreq = lastfreq * step;
|
||||
bandwidth = ( nextfreq - priorfreq ) / lastfreq;
|
||||
|
||||
if ( !carrier_only )
|
||||
{
|
||||
voclib_BiQuad_new ( &instance->analysis_bands[i].filters[0], VOCLIB_BPF, 0.0f, ( float ) lastfreq, ( float ) instance->sample_rate, ( float ) bandwidth );
|
||||
for ( i2 = 1; i2 < instance->filters_per_band; ++i2 )
|
||||
{
|
||||
instance->analysis_bands[i].filters[i2].a0 = instance->analysis_bands[i].filters[0].a0;
|
||||
instance->analysis_bands[i].filters[i2].a1 = instance->analysis_bands[i].filters[0].a1;
|
||||
instance->analysis_bands[i].filters[i2].a2 = instance->analysis_bands[i].filters[0].a2;
|
||||
instance->analysis_bands[i].filters[i2].a3 = instance->analysis_bands[i].filters[0].a3;
|
||||
instance->analysis_bands[i].filters[i2].a4 = instance->analysis_bands[i].filters[0].a4;
|
||||
}
|
||||
}
|
||||
|
||||
if ( instance->formant_shift != 1.0f )
|
||||
{
|
||||
voclib_BiQuad_new ( &instance->synthesis_bands[i].filters[0], VOCLIB_BPF, 0.0f, ( float ) ( lastfreq * instance->formant_shift ), ( float ) instance->sample_rate, ( float ) bandwidth );
|
||||
}
|
||||
else
|
||||
{
|
||||
instance->synthesis_bands[i].filters[0].a0 = instance->analysis_bands[i].filters[0].a0;
|
||||
instance->synthesis_bands[i].filters[0].a1 = instance->analysis_bands[i].filters[0].a1;
|
||||
instance->synthesis_bands[i].filters[0].a2 = instance->analysis_bands[i].filters[0].a2;
|
||||
instance->synthesis_bands[i].filters[0].a3 = instance->analysis_bands[i].filters[0].a3;
|
||||
instance->synthesis_bands[i].filters[0].a4 = instance->analysis_bands[i].filters[0].a4;
|
||||
}
|
||||
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a0 = instance->synthesis_bands[i].filters[0].a0;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a1 = instance->synthesis_bands[i].filters[0].a1;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a2 = instance->synthesis_bands[i].filters[0].a2;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a3 = instance->synthesis_bands[i].filters[0].a3;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a4 = instance->synthesis_bands[i].filters[0].a4;
|
||||
|
||||
for ( i2 = 1; i2 < instance->filters_per_band; ++i2 )
|
||||
{
|
||||
instance->synthesis_bands[i].filters[i2].a0 = instance->synthesis_bands[i].filters[0].a0;
|
||||
instance->synthesis_bands[i].filters[i2].a1 = instance->synthesis_bands[i].filters[0].a1;
|
||||
instance->synthesis_bands[i].filters[i2].a2 = instance->synthesis_bands[i].filters[0].a2;
|
||||
instance->synthesis_bands[i].filters[i2].a3 = instance->synthesis_bands[i].filters[0].a3;
|
||||
instance->synthesis_bands[i].filters[i2].a4 = instance->synthesis_bands[i].filters[0].a4;
|
||||
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a0 = instance->synthesis_bands[i].filters[0].a0;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a1 = instance->synthesis_bands[i].filters[0].a1;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a2 = instance->synthesis_bands[i].filters[0].a2;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a3 = instance->synthesis_bands[i].filters[0].a3;
|
||||
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a4 = instance->synthesis_bands[i].filters[0].a4;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Initialize the vocoder envelopes. */
|
||||
static void voclib_initialize_envelopes ( voclib_instance* instance )
|
||||
{
|
||||
unsigned char i;
|
||||
|
||||
voclib_envelope_configure ( &instance->analysis_envelopes[0], instance->reaction_time, ( double ) instance->sample_rate );
|
||||
for ( i = 1; i < instance->bands; ++i )
|
||||
{
|
||||
instance->analysis_envelopes[i].coef = instance->analysis_envelopes[0].coef;
|
||||
}
|
||||
}
|
||||
|
||||
int voclib_initialize ( voclib_instance* instance, unsigned char bands, unsigned char filters_per_band, unsigned int sample_rate, unsigned char carrier_channels )
|
||||
{
|
||||
if ( !instance )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( bands < 4 || bands > VOCLIB_MAX_BANDS )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( filters_per_band < 1 || filters_per_band > VOCLIB_MAX_FILTERS_PER_BAND )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( sample_rate < 8000 || sample_rate > 192000 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( carrier_channels < 1 || carrier_channels > 2 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
instance->reaction_time = 0.03f;
|
||||
instance->formant_shift = 1.0f;
|
||||
instance->sample_rate = sample_rate;
|
||||
instance->bands = bands;
|
||||
instance->filters_per_band = filters_per_band;
|
||||
instance->carrier_channels = carrier_channels;
|
||||
|
||||
voclib_reset_history ( instance );
|
||||
voclib_initialize_filterbank ( instance, 0 );
|
||||
voclib_initialize_envelopes ( instance );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void voclib_reset_history ( voclib_instance* instance )
|
||||
{
|
||||
unsigned char i;
|
||||
|
||||
for ( i = 0; i < instance->bands; ++i )
|
||||
{
|
||||
unsigned char i2;
|
||||
|
||||
for ( i2 = 0; i2 < instance->filters_per_band; ++i2 )
|
||||
{
|
||||
voclib_BiQuad_reset ( &instance->analysis_bands[i].filters[i2] );
|
||||
voclib_BiQuad_reset ( &instance->synthesis_bands[i].filters[i2] );
|
||||
voclib_BiQuad_reset ( &instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2] );
|
||||
}
|
||||
voclib_envelope_reset ( &instance->analysis_envelopes[i] );
|
||||
}
|
||||
}
|
||||
|
||||
int voclib_process ( voclib_instance* instance, const float* carrier_buffer, const float* modulator_buffer, float* output_buffer, unsigned int frames )
|
||||
{
|
||||
unsigned int i;
|
||||
const unsigned char bands = instance->bands;
|
||||
const unsigned char filters_per_band = instance->filters_per_band;
|
||||
|
||||
if ( !carrier_buffer )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( !modulator_buffer )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( !output_buffer )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ( frames == 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ( instance->carrier_channels == 2 )
|
||||
{
|
||||
|
||||
/* The carrier has two channels and the modulator has 1. */
|
||||
for ( i = 0; i < frames * 2; i += 2, ++modulator_buffer )
|
||||
{
|
||||
unsigned char i2;
|
||||
float out_left = 0.0f;
|
||||
float out_right = 0.0f;
|
||||
|
||||
/* Run the bands in parallel and accumulate the output. */
|
||||
for ( i2 = 0; i2 < bands; ++i2 )
|
||||
{
|
||||
unsigned char i3;
|
||||
float analysis_band = voclib_BiQuad ( *modulator_buffer, &instance->analysis_bands[i2].filters[0] );
|
||||
float synthesis_band_left = voclib_BiQuad ( carrier_buffer[i], &instance->synthesis_bands[i2].filters[0] );
|
||||
float synthesis_band_right = voclib_BiQuad ( carrier_buffer[i + 1], &instance->synthesis_bands[i2 + VOCLIB_MAX_BANDS].filters[0] );
|
||||
|
||||
for ( i3 = 1; i3 < filters_per_band; ++i3 )
|
||||
{
|
||||
analysis_band = voclib_BiQuad ( analysis_band, &instance->analysis_bands[i2].filters[i3] );
|
||||
synthesis_band_left = voclib_BiQuad ( synthesis_band_left, &instance->synthesis_bands[i2].filters[i3] );
|
||||
synthesis_band_right = voclib_BiQuad ( synthesis_band_right, &instance->synthesis_bands[i2 + VOCLIB_MAX_BANDS].filters[i3] );
|
||||
}
|
||||
analysis_band = voclib_envelope_tick ( &instance->analysis_envelopes[i2], analysis_band );
|
||||
out_left += synthesis_band_left * analysis_band;
|
||||
out_right += synthesis_band_right * analysis_band;
|
||||
}
|
||||
output_buffer[i] = out_left;
|
||||
output_buffer[i + 1] = out_right;
|
||||
}
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/* Both the carrier and the modulator have a single channel. */
|
||||
for ( i = 0; i < frames; ++i )
|
||||
{
|
||||
unsigned char i2;
|
||||
float out = 0.0f;
|
||||
|
||||
/* Run the bands in parallel and accumulate the output. */
|
||||
for ( i2 = 0; i2 < bands; ++i2 )
|
||||
{
|
||||
unsigned char i3;
|
||||
float analysis_band = voclib_BiQuad ( modulator_buffer[i], &instance->analysis_bands[i2].filters[0] );
|
||||
float synthesis_band = voclib_BiQuad ( carrier_buffer[i], &instance->synthesis_bands[i2].filters[0] );
|
||||
|
||||
for ( i3 = 1; i3 < filters_per_band; ++i3 )
|
||||
{
|
||||
analysis_band = voclib_BiQuad ( analysis_band, &instance->analysis_bands[i2].filters[i3] );
|
||||
synthesis_band = voclib_BiQuad ( synthesis_band, &instance->synthesis_bands[i2].filters[i3] );
|
||||
}
|
||||
analysis_band = voclib_envelope_tick ( &instance->analysis_envelopes[i2], analysis_band );
|
||||
out += synthesis_band * analysis_band;
|
||||
}
|
||||
output_buffer[i] = out;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int voclib_set_reaction_time ( voclib_instance* instance, float reaction_time )
|
||||
{
|
||||
if ( reaction_time < 0.002f || reaction_time > 2.0f )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
instance->reaction_time = reaction_time;
|
||||
voclib_initialize_envelopes ( instance );
|
||||
return 1;
|
||||
}
|
||||
|
||||
float voclib_get_reaction_time ( const voclib_instance* instance )
|
||||
{
|
||||
return instance->reaction_time;
|
||||
}
|
||||
|
||||
int voclib_set_formant_shift ( voclib_instance* instance, float formant_shift )
|
||||
{
|
||||
if ( formant_shift < 0.25f || formant_shift > 4.0f )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
instance->formant_shift = formant_shift;
|
||||
voclib_initialize_filterbank ( instance, 1 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
float voclib_get_formant_shift ( const voclib_instance* instance )
|
||||
{
|
||||
return instance->formant_shift;
|
||||
}
|
||||
|
||||
#endif /* VOCLIB_IMPLEMENTATION */
|
||||
|
||||
/* REVISION HISTORY
|
||||
*
|
||||
* Version 1.1 - 2019-02-16
|
||||
* Breaking change: Introduced a new argument to voclib_initialize called carrier_channels. This allows the vocoder to output stereo natively.
|
||||
* Better assignment of band frequencies when using lower sample rates.
|
||||
* The shell now automatically normalizes the output file to match the peak amplitude in the carrier.
|
||||
* Fixed a memory corruption bug in the shell which would occur in response to an error condition.
|
||||
*
|
||||
* Version 1.0 - 2019-01-27
|
||||
* Initial release.
|
||||
*/
|
||||
|
||||
/* LICENSE
|
||||
|
||||
This software is available under 2 licenses -- choose whichever you prefer.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE A - MIT No Attribution License
|
||||
Copyright (c) 2019 Philip Bennefall
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
ALTERNATIVE B - Public Domain (www.unlicense.org)
|
||||
This is free and unencumbered software released into the public domain.
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
|
||||
software, either in source code form or as a compiled binary, for any purpose,
|
||||
commercial or non-commercial, and by any means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors of this
|
||||
software dedicate any and all copyright interest in the software to the public
|
||||
domain. We make this dedication for the benefit of the public at large and to
|
||||
the detriment of our heirs and successors. We intend this dedication to be an
|
||||
overt act of relinquishment in perpetuity of all present and future rights to
|
||||
this software under copyright law.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
------------------------------------------------------------------------------
|
||||
*/
|
||||
Reference in New Issue
Block a user