Partial voice implementation on audio renderer

This commit is contained in:
gdkchan 2018-07-01 17:33:35 -03:00
commit d77c77f3b9
22 changed files with 759 additions and 59 deletions

View file

@ -0,0 +1,91 @@
namespace Ryujinx.Audio.Adpcm
{
public static class AdpcmDecoder
{
private const int SamplesPerFrame = 14;
private const int BytesPerFrame = 8;
public static short[] Decode(byte[] Buffer, AdpcmDecoderContext Context)
{
int Samples = GetSamplesCountFromSize(Buffer.Length);
short[] Pcm = new short[Samples * 2];
short History0 = Context.History0;
short History1 = Context.History1;
int InputOffset = 0;
int OutputOffset = 0;
while (InputOffset < Buffer.Length)
{
byte Header = Buffer[InputOffset++];
int Scale = 0x800 << (Header & 0xf);
int CoeffIndex = Header >> 4;
short Coeff0 = Context.Coefficients[CoeffIndex * 2 + 0];
short Coeff1 = Context.Coefficients[CoeffIndex * 2 + 1];
int FrameSamples = SamplesPerFrame;
if (FrameSamples > Samples)
{
FrameSamples = Samples;
}
int Value = 0;
for (int SampleIndex = 0; SampleIndex < FrameSamples; SampleIndex++)
{
int Sample;
if ((SampleIndex & 1) == 0)
{
Value = Buffer[InputOffset++];
Sample = (Value << 24) >> 28;
}
else
{
Sample = (Value << 28) >> 28;
}
int Prediction = Coeff0 * History0 + Coeff1 * History1;
Sample = (Sample * Scale + Prediction + 0x400) >> 11;
short SaturatedSample = DspUtils.Saturate(Sample);
History1 = History0;
History0 = SaturatedSample;
Pcm[OutputOffset++] = SaturatedSample;
Pcm[OutputOffset++] = SaturatedSample;
}
Samples -= FrameSamples;
}
Context.History0 = History0;
Context.History1 = History1;
return Pcm;
}
public static long GetSizeFromSamplesCount(int SamplesCount)
{
int Frames = SamplesCount / SamplesPerFrame;
return Frames * BytesPerFrame;
}
public static int GetSamplesCountFromSize(long Size)
{
int Frames = (int)(Size / BytesPerFrame);
return Frames * SamplesPerFrame;
}
}
}

View file

@ -0,0 +1,10 @@
namespace Ryujinx.Audio.Adpcm
{
public class AdpcmDecoderContext
{
public short[] Coefficients;
public short History0;
public short History1;
}
}

16
Ryujinx.Audio/DspUtils.cs Normal file
View file

@ -0,0 +1,16 @@
namespace Ryujinx.Audio.Adpcm
{
public static class DspUtils
{
public static short Saturate(int Value)
{
if (Value > short.MaxValue)
Value = short.MaxValue;
if (Value < short.MinValue)
Value = short.MinValue;
return (short)Value;
}
}
}

View file

@ -14,7 +14,7 @@ namespace Ryujinx.Audio
long[] GetReleasedBuffers(int Track, int MaxCount); long[] GetReleasedBuffers(int Track, int MaxCount);
void AppendBuffer(int Track, long Tag, byte[] Buffer); void AppendBuffer<T>(int Track, long Tag, T[] Buffer) where T : struct;
void Start(int Track); void Start(int Track);
void Stop(int Track); void Stop(int Track);

View file

@ -3,6 +3,7 @@ using OpenTK.Audio.OpenAL;
using System; using System;
using System.Collections.Concurrent; using System.Collections.Concurrent;
using System.Collections.Generic; using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Threading; using System.Threading;
namespace Ryujinx.Audio.OpenAL namespace Ryujinx.Audio.OpenAL
@ -309,13 +310,15 @@ namespace Ryujinx.Audio.OpenAL
return null; return null;
} }
public void AppendBuffer(int Track, long Tag, byte[] Buffer) public void AppendBuffer<T>(int Track, long Tag, T[] Buffer) where T : struct
{ {
if (Tracks.TryGetValue(Track, out Track Td)) if (Tracks.TryGetValue(Track, out Track Td))
{ {
int BufferId = Td.AppendBuffer(Tag); int BufferId = Td.AppendBuffer(Tag);
AL.BufferData(BufferId, Td.Format, Buffer, Buffer.Length, Td.SampleRate); int Size = Buffer.Length * Marshal.SizeOf<T>();
AL.BufferData<T>(BufferId, Td.Format, Buffer, Size, Td.SampleRate);
AL.SourceQueueBuffer(Td.SourceId, BufferId); AL.SourceQueueBuffer(Td.SourceId, BufferId);
@ -366,7 +369,5 @@ namespace Ryujinx.Audio.OpenAL
return PlaybackState.Stopped; return PlaybackState.Stopped;
} }
} }
} }

View file

@ -0,0 +1,11 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x10, Pack = 4)]
struct BehaviorIn
{
public long Unknown0;
public long Unknown8;
}
}

View file

@ -0,0 +1,14 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0xc, Pack = 2)]
struct BiquadFilter
{
public short B0;
public short B1;
public short B2;
public short A1;
public short A2;
}
}

View file

@ -1,7 +1,10 @@
using ChocolArm64.Memory; using ChocolArm64.Memory;
using Ryujinx.Audio;
using Ryujinx.Audio.Adpcm;
using Ryujinx.HLE.Logging; using Ryujinx.HLE.Logging;
using Ryujinx.HLE.OsHle.Handles; using Ryujinx.HLE.OsHle.Handles;
using Ryujinx.HLE.OsHle.Ipc; using Ryujinx.HLE.OsHle.Ipc;
using Ryujinx.HLE.OsHle.Utilities;
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
@ -10,15 +13,36 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
{ {
class IAudioRenderer : IpcService, IDisposable class IAudioRenderer : IpcService, IDisposable
{ {
private const int DeviceChannelsCount = 2;
//This is the amount of samples that are going to be appended
//each time that RequestUpdateAudioRenderer is called. Ideally,
//this value shouldn't be neither too small (to avoid the player
//starving due to running out of samples) or too large (to avoid
//high latency).
//Additionally, due to ADPCM having 14 samples per frame, this value
//needs to be a multiple of 14.
private const int MixBufferSamplesCount = 770;
private Dictionary<int, ServiceProcessRequest> m_Commands; private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands; public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
private KEvent UpdateEvent; private KEvent UpdateEvent;
private AMemory Memory;
private IAalOutput AudioOut;
private AudioRendererParameter Params; private AudioRendererParameter Params;
public IAudioRenderer(AudioRendererParameter Params) private int Track;
private MemoryPoolContext[] MemoryPools;
private VoiceContext[] Voices;
public IAudioRenderer(AMemory Memory, IAalOutput AudioOut, AudioRendererParameter Params)
{ {
m_Commands = new Dictionary<int, ServiceProcessRequest>() m_Commands = new Dictionary<int, ServiceProcessRequest>()
{ {
@ -30,7 +54,43 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
UpdateEvent = new KEvent(); UpdateEvent = new KEvent();
this.Memory = Memory;
this.AudioOut = AudioOut;
this.Params = Params; this.Params = Params;
Track = AudioOut.OpenTrack(48000, 2, AudioCallback, out _);
MemoryPools = CreateArray<MemoryPoolContext>(Params.EffectCount + Params.VoiceCount * 4);
Voices = CreateArray<VoiceContext>(Params.VoiceCount);
InitializeAudioOut();
}
private void AudioCallback()
{
UpdateEvent.WaitEvent.Set();
}
private static T[] CreateArray<T>(int Size) where T : new()
{
T[] Output = new T[Size];
for (int Index = 0; Index < Size; Index++)
{
Output[Index] = new T();
}
return Output;
}
private void InitializeAudioOut()
{
AppendMixedBuffer(0);
AppendMixedBuffer(1);
AppendMixedBuffer(2);
AudioOut.Start(Track);
} }
public long RequestUpdateAudioRenderer(ServiceCtx Context) public long RequestUpdateAudioRenderer(ServiceCtx Context)
@ -42,58 +102,96 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
long InputPosition = Context.Request.SendBuff[0].Position; long InputPosition = Context.Request.SendBuff[0].Position;
UpdateDataHeader InputDataHeader = AMemoryHelper.Read<UpdateDataHeader>(Context.Memory, InputPosition); StructReader Reader = new StructReader(Context.Memory, InputPosition);
StructWriter Writer = new StructWriter(Context.Memory, OutputPosition);
UpdateDataHeader OutputDataHeader = new UpdateDataHeader(); UpdateDataHeader InputHeader = Reader.Read<UpdateDataHeader>();
Reader.Read<BehaviorIn>(InputHeader.BehaviorSize);
MemoryPoolIn[] MemoryPoolsIn = Reader.Read<MemoryPoolIn>(InputHeader.MemoryPoolsSize);
for (int Index = 0; Index < MemoryPoolsIn.Length; Index++)
{
MemoryPoolIn MemoryPool = MemoryPoolsIn[Index];
if (MemoryPool.State == MemoryPoolState.RequestAttach)
{
MemoryPools[Index].OutStatus.State = MemoryPoolState.Attached;
}
else if (MemoryPool.State == MemoryPoolState.RequestDetach)
{
MemoryPools[Index].OutStatus.State = MemoryPoolState.Detached;
}
}
Reader.Read<VoiceChannelResourceIn>(InputHeader.VoiceResourceSize);
VoiceIn[] VoicesIn = Reader.Read<VoiceIn>(InputHeader.VoicesSize);
for (int Index = 0; Index < VoicesIn.Length; Index++)
{
VoiceIn Voice = VoicesIn[Index];
Voices[Index].SetAcquireState(Voice.Acquired != 0);
if (Voice.Acquired == 0)
{
continue;
}
if (Voice.FirstUpdate != 0)
{
Voices[Index].AdpcmCtx = GetAdpcmDecoderContext(
Voice.AdpcmCoeffsPosition,
Voice.AdpcmCoeffsSize);
Voices[Index].SampleFormat = Voice.SampleFormat;
Voices[Index].ChannelsCount = Voice.ChannelsCount;
Voices[Index].BufferIndex = Voice.BaseWaveBufferIndex;
}
Voices[Index].WaveBuffers[0] = Voice.WaveBuffer0;
Voices[Index].WaveBuffers[1] = Voice.WaveBuffer1;
Voices[Index].WaveBuffers[2] = Voice.WaveBuffer2;
Voices[Index].WaveBuffers[3] = Voice.WaveBuffer3;
Voices[Index].Volume = Voice.Volume;
Voices[Index].PlayState = Voice.PlayState;
}
UpdateAudio();
UpdateDataHeader OutputHeader = new UpdateDataHeader();
int UpdateHeaderSize = Marshal.SizeOf<UpdateDataHeader>(); int UpdateHeaderSize = Marshal.SizeOf<UpdateDataHeader>();
OutputDataHeader.Revision = Params.Revision; OutputHeader.Revision = Params.Revision;
OutputDataHeader.BehaviorSize = 0xb0; OutputHeader.BehaviorSize = 0xb0;
OutputDataHeader.MemoryPoolsSize = (Params.EffectCount + Params.VoiceCount * 4) * 0x10; OutputHeader.MemoryPoolsSize = (Params.EffectCount + Params.VoiceCount * 4) * 0x10;
OutputDataHeader.VoicesSize = Params.VoiceCount * 0x10; OutputHeader.VoicesSize = Params.VoiceCount * 0x10;
OutputDataHeader.EffectsSize = Params.EffectCount * 0x10; OutputHeader.EffectsSize = Params.EffectCount * 0x10;
OutputDataHeader.SinksSize = Params.SinkCount * 0x20; OutputHeader.SinksSize = Params.SinkCount * 0x20;
OutputDataHeader.PerformanceManagerSize = 0x10; OutputHeader.PerformanceManagerSize = 0x10;
OutputDataHeader.TotalSize = UpdateHeaderSize + OutputHeader.TotalSize = UpdateHeaderSize +
OutputDataHeader.BehaviorSize + OutputHeader.BehaviorSize +
OutputDataHeader.MemoryPoolsSize + OutputHeader.MemoryPoolsSize +
OutputDataHeader.VoicesSize + OutputHeader.VoicesSize +
OutputDataHeader.EffectsSize + OutputHeader.EffectsSize +
OutputDataHeader.SinksSize + OutputHeader.SinksSize +
OutputDataHeader.PerformanceManagerSize; OutputHeader.PerformanceManagerSize;
AMemoryHelper.Write(Context.Memory, OutputPosition, OutputDataHeader); Writer.Write(OutputHeader);
int InMemoryPoolOffset = UpdateHeaderSize + InputDataHeader.BehaviorSize; foreach (MemoryPoolContext MemoryPool in MemoryPools)
int OutMemoryPoolOffset = UpdateHeaderSize;
for (int Offset = 0; Offset < OutputDataHeader.MemoryPoolsSize; Offset += 0x10, InMemoryPoolOffset += 0x20)
{ {
MemoryPoolState PoolState = (MemoryPoolState)Context.Memory.ReadInt32(InputPosition + InMemoryPoolOffset + 0x10); Writer.Write(MemoryPool.OutStatus);
//TODO: Figure out what the other values does.
if (PoolState == MemoryPoolState.RequestAttach)
{
Context.Memory.WriteInt32(OutputPosition + OutMemoryPoolOffset + Offset, (int)MemoryPoolState.Attached);
}
else if (PoolState == MemoryPoolState.RequestDetach)
{
Context.Memory.WriteInt32(OutputPosition + OutMemoryPoolOffset + Offset, (int)MemoryPoolState.Detached);
}
} }
int OutVoicesOffset = OutMemoryPoolOffset + OutputDataHeader.MemoryPoolsSize; foreach (VoiceContext Voice in Voices)
for (int Offset = 0; Offset < OutputDataHeader.VoicesSize; Offset += 0x10)
{ {
Context.Memory.WriteInt32(OutputPosition + OutVoicesOffset + Offset + 8, (int)VoicePlaybackState.Finished); Writer.Write(Voice.OutStatus);
} }
//TODO: We shouldn't be signaling this here.
UpdateEvent.WaitEvent.Set();
return 0; return 0;
} }
@ -120,6 +218,85 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
return 0; return 0;
} }
private AdpcmDecoderContext GetAdpcmDecoderContext(long Position, long Size)
{
if (Size == 0)
{
return null;
}
AdpcmDecoderContext Context = new AdpcmDecoderContext();
Context.Coefficients = new short[Size >> 1];
for (int Offset = 0; Offset < Size; Offset += 2)
{
Context.Coefficients[Offset >> 1] = Memory.ReadInt16(Position + Offset);
}
return Context;
}
private void UpdateAudio()
{
long[] Released = AudioOut.GetReleasedBuffers(Track, 2);
for (int Index = 0; Index < Released.Length; Index++)
{
AppendMixedBuffer(Released[Index]);
}
}
private void AppendMixedBuffer(long Tag)
{
int[] MixBuffer = new int[MixBufferSamplesCount * DeviceChannelsCount];
foreach (VoiceContext Voice in Voices)
{
if (!Voice.Playing)
{
continue;
}
int OutOffset = 0;
int PendingSamples = MixBufferSamplesCount;
while (PendingSamples > 0)
{
short[] Samples = Voice.GetBufferData(Memory, PendingSamples, out int ReturnedSamples);
if (ReturnedSamples == 0)
{
break;
}
PendingSamples -= ReturnedSamples;
for (int Offset = 0; Offset < Samples.Length; Offset++)
{
int Sample = (int)(Samples[Offset] * Voice.Volume);
MixBuffer[OutOffset++] += Sample;
}
}
}
AudioOut.AppendBuffer(Track, Tag, GetFinalBuffer(MixBuffer));
}
private static short[] GetFinalBuffer(int[] Buffer)
{
short[] Output = new short[Buffer.Length];
for (int Offset = 0; Offset < Buffer.Length; Offset++)
{
Output[Offset] = DspUtils.Saturate(Buffer[Offset]);
}
return Output;
}
public void Dispose() public void Dispose()
{ {
Dispose(true); Dispose(true);

View file

@ -1,7 +1,7 @@
using Ryujinx.Audio;
using Ryujinx.HLE.Logging; using Ryujinx.HLE.Logging;
using Ryujinx.HLE.OsHle.Ipc; using Ryujinx.HLE.OsHle.Ipc;
using System.Collections.Generic; using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud namespace Ryujinx.HLE.OsHle.Services.Aud
{ {
@ -28,7 +28,7 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
public long OpenAudioRenderer(ServiceCtx Context) public long OpenAudioRenderer(ServiceCtx Context)
{ {
//Same buffer as GetAudioRendererWorkBufferSize is receive here. IAalOutput AudioOut = Context.Ns.AudioOut;
AudioRendererParameter Params = new AudioRendererParameter(); AudioRendererParameter Params = new AudioRendererParameter();
@ -46,7 +46,7 @@ namespace Ryujinx.HLE.OsHle.Services.Aud
Params.Unknown2C = Context.RequestData.ReadInt32(); Params.Unknown2C = Context.RequestData.ReadInt32();
Params.Revision = Context.RequestData.ReadInt32(); Params.Revision = Context.RequestData.ReadInt32();
MakeObject(Context, new IAudioRenderer(Params)); MakeObject(Context, new IAudioRenderer(Context.Memory, AudioOut, Params));
return 0; return 0;
} }

View file

@ -0,0 +1,12 @@
namespace Ryujinx.HLE.OsHle.Services.Aud
{
class MemoryPoolContext
{
public MemoryPoolOut OutStatus;
public MemoryPoolContext()
{
OutStatus.State = MemoryPoolState.Detached;
}
}
}

View file

@ -0,0 +1,14 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x20, Pack = 4)]
struct MemoryPoolIn
{
public long Address;
public long Size;
public MemoryPoolState State;
public int Unknown14;
public long Unknown18;
}
}

View file

@ -0,0 +1,12 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x10, Pack = 4)]
struct MemoryPoolOut
{
public MemoryPoolState State;
public int Unknown14;
public long Unknown18;
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.HLE.OsHle.Services.Aud
{
enum PlayState : byte
{
Playing = 0,
Stopped = 1,
Paused = 2
}
}

View file

@ -0,0 +1,13 @@
namespace Ryujinx.HLE.OsHle.Services.Aud
{
enum SampleFormat : byte
{
Invalid = 0,
PcmInt8 = 1,
PcmInt16 = 2,
PcmInt24 = 3,
PcmInt32 = 4,
PcmFloat = 5,
Adpcm = 6
}
}

View file

@ -0,0 +1,10 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x70, Pack = 1)]
struct VoiceChannelResourceIn
{
//???
}
}

View file

@ -0,0 +1,168 @@
using ChocolArm64.Memory;
using Ryujinx.Audio.Adpcm;
using System;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
class VoiceContext
{
private bool Acquired;
public int ChannelsCount;
public int BufferIndex;
public long Offset;
public float Volume;
public PlayState PlayState;
public SampleFormat SampleFormat;
public AdpcmDecoderContext AdpcmCtx;
public WaveBuffer[] WaveBuffers;
public VoiceOut OutStatus;
public bool Playing => Acquired && PlayState == PlayState.Playing;
public VoiceContext()
{
WaveBuffers = new WaveBuffer[4];
}
public void SetAcquireState(bool NewState)
{
if (Acquired && !NewState)
{
//Release.
Reset();
}
Acquired = NewState;
}
private void Reset()
{
BufferIndex = 0;
Offset = 0;
OutStatus.PlayedSamplesCount = 0;
OutStatus.PlayedWaveBuffersCount = 0;
OutStatus.VoiceDropsCount = 0;
}
public short[] GetBufferData(AMemory Memory, int MaxSamples, out int Samples)
{
WaveBuffer Wb = WaveBuffers[BufferIndex];
long Position = Wb.Position + Offset;
long MaxSize = Wb.Size - Offset;
long Size = GetSizeFromSamplesCount(MaxSamples);
if (Size > MaxSize)
{
Size = MaxSize;
}
Samples = GetSamplesCountFromSize(Size);
OutStatus.PlayedSamplesCount += Samples;
Offset += Size;
if (Offset == Wb.Size)
{
Offset = 0;
if (Wb.Looping == 0)
{
BufferIndex = (BufferIndex + 1) & 3;
}
OutStatus.PlayedWaveBuffersCount++;
}
return Decode(Memory.ReadBytes(Position, Size));
}
private long GetSizeFromSamplesCount(int SamplesCount)
{
if (SampleFormat == SampleFormat.PcmInt16)
{
return SamplesCount * sizeof(short) * ChannelsCount;
}
else if (SampleFormat == SampleFormat.Adpcm)
{
return AdpcmDecoder.GetSizeFromSamplesCount(SamplesCount);
}
else
{
throw new InvalidOperationException();
}
}
private int GetSamplesCountFromSize(long Size)
{
if (SampleFormat == SampleFormat.PcmInt16)
{
return (int)(Size / (sizeof(short) * ChannelsCount));
}
else if (SampleFormat == SampleFormat.Adpcm)
{
return AdpcmDecoder.GetSamplesCountFromSize(Size);
}
else
{
throw new InvalidOperationException();
}
}
private short[] Decode(byte[] Buffer)
{
if (SampleFormat == SampleFormat.PcmInt16)
{
int Samples = GetSamplesCountFromSize(Buffer.Length);
short[] Output = new short[Samples * 2];
if (ChannelsCount == 1)
{
//Duplicate samples to convert the mono stream to stereo.
for (int Offset = 0; Offset < Buffer.Length; Offset += 2)
{
short Sample = GetShort(Buffer, Offset);
Output[Offset + 0] = Sample;
Output[Offset + 1] = Sample;
}
}
else
{
for (int Offset = 0; Offset < Buffer.Length; Offset += 2)
{
Output[Offset >> 1] = GetShort(Buffer, Offset);
}
}
return Output;
}
else if (SampleFormat == SampleFormat.Adpcm)
{
return AdpcmDecoder.Decode(Buffer, AdpcmCtx);
}
else
{
throw new InvalidOperationException();
}
}
private static short GetShort(byte[] Buffer, int Offset)
{
return (short)((Buffer[Offset + 0] << 0) |
(Buffer[Offset + 1] << 8));
}
}
}

View file

@ -0,0 +1,49 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x170, Pack = 1)]
struct VoiceIn
{
public int VoiceSlot;
public int NodeId;
public byte FirstUpdate;
public byte Acquired;
public PlayState PlayState;
public SampleFormat SampleFormat;
public int SampleRate;
public int Priority;
public int Unknown14;
public int ChannelsCount;
public float Pitch;
public float Volume;
public BiquadFilter BiquadFilter0;
public BiquadFilter BiquadFilter1;
public int AppendedWaveBuffersCount;
public int BaseWaveBufferIndex;
public int Unknown44;
public long AdpcmCoeffsPosition;
public long AdpcmCoeffsSize;
public int VoiceDestination;
public int Padding;
public WaveBuffer WaveBuffer0;
public WaveBuffer WaveBuffer1;
public WaveBuffer WaveBuffer2;
public WaveBuffer WaveBuffer3;
}
}

View file

@ -0,0 +1,12 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x10, Pack = 4)]
struct VoiceOut
{
public long PlayedSamplesCount;
public int PlayedWaveBuffersCount;
public int VoiceDropsCount; //?
}
}

View file

@ -1,9 +0,0 @@
namespace Ryujinx.HLE.OsHle.Services.Aud
{
enum VoicePlaybackState : int
{
Playing = 0,
Finished = 1,
Paused = 2
}
}

View file

@ -0,0 +1,20 @@
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Services.Aud
{
[StructLayout(LayoutKind.Sequential, Size = 0x38, Pack = 1)]
struct WaveBuffer
{
public long Position;
public long Size;
public int FirstSampleOffset;
public int LastSampleOffset;
public byte Looping;
public byte LastBuffer;
public short Unknown1A;
public int Unknown1C;
public long AdpcmLoopContextPosition;
public long AdpcmLoopContextSize;
public long Unknown30;
}
}

View file

@ -0,0 +1,45 @@
using ChocolArm64.Memory;
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Utilities
{
class StructReader
{
private AMemory Memory;
public long Position { get; private set; }
public StructReader(AMemory Memory, long Position)
{
this.Memory = Memory;
this.Position = Position;
}
public T Read<T>() where T : struct
{
T Value = AMemoryHelper.Read<T>(Memory, Position);
Position += Marshal.SizeOf<T>();
return Value;
}
public T[] Read<T>(int Size) where T : struct
{
int StructSize = Marshal.SizeOf<T>();
int Count = Size / StructSize;
T[] Output = new T[Count];
for (int Index = 0; Index < Count; Index++)
{
Output[Index] = AMemoryHelper.Read<T>(Memory, Position);
Position += StructSize;
}
return Output;
}
}
}

View file

@ -0,0 +1,25 @@
using ChocolArm64.Memory;
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.OsHle.Utilities
{
class StructWriter
{
private AMemory Memory;
public long Position { get; private set; }
public StructWriter(AMemory Memory, long Position)
{
this.Memory = Memory;
this.Position = Position;
}
public void Write<T>(T Value) where T : struct
{
AMemoryHelper.Write(Memory, Position, Value);
Position += Marshal.SizeOf<T>();
}
}
}