Wwise Audio Input 插件
Unreal Integration 允许通过 Wwise Audio Input 插件向 Wwise 提供音频输入。请参阅 Audio Input Source Plug-in from the Wwise SDK documentation. 为了向 Wwise 提供音频输入,必须从 AkAudioInputComponent
继承类。
AkAudioInputComponent
AkAudioInputComponent
由 AkComponent 派生。它是一个专门用于向 Wwise 提供音频输入的 AkComponent
。为此,需要实现两个关键函数:
virtual bool FillSamplesBuffer(uint32 NumChannels, uint32 NumSamples, float** BufferToFill);
virtual void GetChannelConfig(AkAudioFormat& AudioFormat);
另外还有一个 Blueprint 函数:Post Associated Audio Input Event。它会以此组件作为游戏对象来源,将组件的 AkAudioEvent 连同关联的 AudioSamples 回调和 AudioFormat 回调一并发给 Wwise。
自定义音频输入行为
为了实现自定义音频输入行为,可编写由 AkAudioInputComponent 派生的自定义类。下面的 UAkVoiceInputComponent.h 和 UAkVoiceInputComponent.cpp 示例展示了类如何获取话筒输入并将其发送给 Wwise 声音引擎。
为了在 C++ Unreal 工程中使用这些文件,需要进行一些初始设置。首先,必须链接 AkAudio 和 Unreal Voice 模块。比如,在工程的 Build.cs 文件中将其添加到 PublicDependencyModuleNames:
public class MyModule : ModuleRules
{
public MyModule(ReadOnlyTargetRules Target) : base(Target)
{
PublicDependencyModuleNames.AddRange(new string[] { "Core", "CoreUObject", "Engine", "InputCore", "AkAudio", "Voice" });
}
}
首先,必须链接 AkAudio 和 Unreal Voice 模块。比如,在工程的 Build.cs 文件中将其添加到 PublicDependencyModuleNames:
在完成这些初始设置后可添加下例中的类,用于通过获取话筒输入并将其发送给 Wwise 来实现自定义音频输入行为。
注意,此处的代码仅用于为读者提供简单的示例。请勿将其用在发售的游戏中!
AkVoiceInputComponent.h:
#pragma once
#include "CoreMinimal.h"
#include "AkAudioInputComponent.h"
#include "Voice.h"
#include "AkVoiceInputComponent.generated.h"
UCLASS(ClassGroup = Audiokinetic, BlueprintType, hidecategories = (Transform, Rendering, Mobility, LOD, Component, Activation), meta = (BlueprintSpawnableComponent))
class WWISEDEMOGAME_API UAkVoiceInputComponent : public UAkAudioInputComponent
{
GENERATED_BODY()
UAkVoiceInputComponent(const class FObjectInitializer& ObjectInitializer);
virtual void TickComponent(float DeltaTime, enum ELevelTick TickType, FActorComponentTickFunction *ThisTickFunction) override;
protected:
virtual void PostUnregisterGameObject() override;
virtual bool FillSamplesBuffer(uint32 NumChannels, uint32 NumSamples, float** BufferToFill) override;
virtual void GetChannelConfig(AkAudioFormat& AudioFormat) override;
TSharedPtr<IVoiceCapture> VoiceCapture;
TArray<uint8> IncomingRawVoiceData;
TArray<uint8> CollectedRawVoiceData;
FThreadSafeBool bIsReadingVoiceData = false;
};
AkVoiceInputComponent.cpp:
#include "AkVoiceInputComponent.h"
UAkVoiceInputComponent::UAkVoiceInputComponent(const class FObjectInitializer& ObjectInitializer) :
UAkAudioInputComponent(ObjectInitializer)
{
CollectedRawVoiceData.Reset();
VoiceCapture = FVoiceModule::Get().CreateVoiceCapture();
}
void UAkVoiceInputComponent::TickComponent(float DeltaTime, enum ELevelTick TickType, FActorComponentTickFunction *ThisTickFunction)
{
Super::TickComponent(DeltaTime, TickType, ThisTickFunction);
if (!VoiceCapture.IsValid())
{
return;
}
uint32 NumAvailableVoiceCaptureBytes = 0;
EVoiceCaptureState::Type CaptureState = VoiceCapture->GetCaptureState(NumAvailableVoiceCaptureBytes);
if (CaptureState == EVoiceCaptureState::Ok && NumAvailableVoiceCaptureBytes > 0)
{
uint32 NumVoiceCaptureBytesReturned = 0;
IncomingRawVoiceData.Reset((int32)NumAvailableVoiceCaptureBytes);
IncomingRawVoiceData.AddDefaulted(NumAvailableVoiceCaptureBytes);
uint64 SampleCounter = 0;
VoiceCapture->GetVoiceData(IncomingRawVoiceData.GetData(), NumAvailableVoiceCaptureBytes, NumVoiceCaptureBytesReturned, SampleCounter);
if (NumVoiceCaptureBytesReturned > 0)
{
while (bIsReadingVoiceData) {}
CollectedRawVoiceData.Append(IncomingRawVoiceData);
}
}
}
bool UAkVoiceInputComponent::FillSamplesBuffer(uint32 NumChannels, uint32 NumSamples, float** BufferToFill)
{
if (!VoiceCapture.IsValid())
{
return false;
}
const uint8 NumBytesPerSample = 2;
const uint32 NumRequiredBytesPerChannel = NumSamples * NumBytesPerSample;
const uint32 NumRequiredBytes = NumRequiredBytesPerChannel * NumChannels;
int16 VoiceSample = 0;
uint32 RawChannelIndex = 0;
uint32 RawSampleIndex = 0;
bIsReadingVoiceData = true;
const int32 NumSamplesAvailable = CollectedRawVoiceData.Num() / NumBytesPerSample;
const uint32 BufferSlack = (uint32)FMath::Max(0, (int32)(NumSamples * NumChannels) - NumSamplesAvailable);
for (uint32 c = 0; c < NumChannels; ++c)
{
RawChannelIndex = c * NumRequiredBytesPerChannel;
for (uint32 s = 0; s < NumSamples; ++s)
{
if (s >= (NumSamples - BufferSlack) / NumChannels)
{
BufferToFill[c][s] = 0.0f;
}
else
{
uint32 RawSampleDataMSBIndex = s * 2 + 1;
uint32 RawSampleDataLSBIndex = s * 2;
VoiceSample = (CollectedRawVoiceData[RawSampleDataMSBIndex] << 8) | CollectedRawVoiceData[RawSampleDataLSBIndex];
BufferToFill[c][s] = VoiceSample / (float)INT16_MAX;
}
}
}
const int32 NumBytesRead = (NumSamples - BufferSlack) * NumBytesPerSample;
CollectedRawVoiceData.RemoveAt(0, NumBytesRead);
bIsReadingVoiceData = false;
return true;
}
void UAkVoiceInputComponent::GetChannelConfig(AkAudioFormat& AudioFormat)
{
const int sampleRate = 16000;
AudioFormat.uSampleRate = sampleRate;
AudioFormat.channelConfig.SetStandard(AK_SPEAKER_SETUP_MONO);
if (VoiceCapture.IsValid())
{
if (!VoiceCapture->Init(FString(""), AudioFormat.uSampleRate, AudioFormat.channelConfig.uNumChannels))
{
UE_LOG(LogTemp, Error, TEXT("Failed to initialize device for voice input!"));
return;
}
VoiceCapture->Start();
}
}
void UAkVoiceInputComponent::PostUnregisterGameObject()
{
Super::PostUnregisterGameObject();
if (VoiceCapture.IsValid())
{
VoiceCapture->Stop();
VoiceCapture->Shutdown();
}
}
在将此类添加到 Unreal 工程后,可创建带有 AkVoiceInputComponent
的自定义 Blueprint 类,并调用 Blueprint 函数 Post Associated Audio Input Event(包含在 AkAudioInputComponent
基类中),以便将话筒数据发送给 Wwise。下图展示了自定义 Blueprint 类的部分 Blueprint(基于 Actor
,并包含名为 AkVoiceInput
的 AkVoiceInputComponent
)。