Version
menu_open
link
Wwise SDK 2019.2.15
AkSimdAvx.h
Go to the documentation of this file.
1 /*******************************************************************************
2 The content of this file includes portions of the AUDIOKINETIC Wwise Technology
3 released in source code form as part of the SDK installer package.
4 
5 Commercial License Usage
6 
7 Licensees holding valid commercial licenses to the AUDIOKINETIC Wwise Technology
8 may use this file in accordance with the end user license agreement provided
9 with the software or, alternatively, in accordance with the terms contained in a
10 written agreement between you and Audiokinetic Inc.
11 
12 Apache License Usage
13 
14 Alternatively, this file may be used under the Apache License, Version 2.0 (the
15 "Apache License"); you may not use this file except in compliance with the
16 Apache License. You may obtain a copy of the Apache License at
17 http://www.apache.org/licenses/LICENSE-2.0.
18 
19 Unless required by applicable law or agreed to in writing, software distributed
20 under the Apache License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
21 OR CONDITIONS OF ANY KIND, either express or implied. See the Apache License for
22 the specific language governing permissions and limitations under the License.
23 
24  Version: <VERSION> Build: <BUILDNUMBER>
25  Copyright (c) <COPYRIGHTYEAR> Audiokinetic Inc.
26 *******************************************************************************/
27 
28 // AkSimdAvx.h
29 
30 /// \file
31 /// AKSIMD - AVX implementation
32 
33 #ifndef _AK_SIMD_AVX_H_
34 #define _AK_SIMD_AVX_H_
35 
38 
39 #if defined(AKSIMD_AVX_SUPPORTED)
40 
41 #include <immintrin.h>
42 
43 ////////////////////////////////////////////////////////////////////////
44 /// @name AKSIMD types
45 //@{
46 
47 typedef float AKSIMD_F32; ///< 32-bit float
48 typedef __m256 AKSIMD_V8F32; ///< Vector of 8 32-bit floats
49 typedef AKSIMD_V8F32 AKSIMD_V8COND; ///< Vector of 8 comparison results
50 typedef AKSIMD_V8F32 AKSIMD_V8FCOND; ///< Vector of 8 comparison results
51 
52 //@}
53 ////////////////////////////////////////////////////////////////////////
54 
55 
56 ////////////////////////////////////////////////////////////////////////
57 /// @name AKSIMD loading / setting
58 //@{
59 
60 /// Loads eight single-precision floating-point values from memory.
61 /// The address does not need to be 32-byte aligned (see _mm_loadu_ps).
62 /// On every modern x86 processor this performs the same as an aligned load.
63 #define AKSIMD_LOAD_V8F32( __addr__ ) _mm256_loadu_ps( (AkReal32*)(__addr__) )
64 
65 /// Loads a single single-precision, floating-point value, copying it into
66 /// all eight words (see _mm_load1_ps, _mm_load_ps1)
67 #define AKSIMD_LOAD1_V8F32( __scalar__ ) _mm256_broadcast_ss( &(__scalar__) )
68 
69 /// Sets the eight single-precision, floating-point values to in_value (see
70 /// _mm_set1_ps, _mm_set_ps1)
71 #define AKSIMD_SET_V8F32( __scalar__ ) _mm256_set1_ps( (__scalar__) )
72 
73 /// Populates the full vector with the 8 floating point values provided
74 #define AKSIMD_SETV_V8F32( _h, _g, _f, _e, _d, _c, _b, _a ) _mm256_set_ps( (_h), (_g), (_f), (_e), (_d), (_c), (_b), (_a) )
75 
76 /// Sets the eight single-precision, floating-point values to zero (see
77 /// _mm_setzero_ps)
78 #define AKSIMD_SETZERO_V8F32() _mm256_setzero_ps()
79 
80 /// Loads a single-precision, floating-point value into the low word
81 /// and clears the upper seven words.
82 /// r0 := *p; r1...r7 := 0.0 (see _mm_load_ss)
83 #define AKSIMD_LOAD_SS_V8F32( __addr__ ) _mm256_zextps128_ps256(_mm_load_ss( (__addr__) ))
84 
85 /// Loads the two m128i's provided into the output m256i a
86 /// Note that this should be utilized instead of, e.g. adding & utilizing a macro "AKSIMD_INSERT_V8I32(m, i, idx)"
87 /// Because there is no direct corresponding instruction for an insert into 256. You should load into 128s
88 /// and use that. Some compilers do not handle _mm256_insert_epi32 (etc) well, or even include them
89 #define AKSIMD_SET_V2F128( m1, m2) _mm256_setr_m128(m1, m2)
90 
91 #define AKSIMD_INSERT_V2F128( a, m128, idx) _mm256_insertf128_ps(a, m128, idx)
92 
93 //@}
94 ////////////////////////////////////////////////////////////////////////
95 
96 
97 ////////////////////////////////////////////////////////////////////////
98 /// @name AKSIMD storing
99 //@{
100 
101 /// Stores eight single-precision, floating-point values.
102 /// The address does not need to be 32-byte aligned (see _mm_storeu_ps).
103 /// On every modern x86 processor this performs the same as an aligned store.
104 #define AKSIMD_STORE_V8F32( __addr__, __vec__ ) _mm256_storeu_ps( (AkReal32*)(__addr__), (__vec__) )
105 
106 /// Stores the lower single-precision, floating-point value.
107 /// *p := a0 (see _mm_store_ss)
108 #define AKSIMD_STORE1_V8F32( __addr__, __vec__ ) _mm_store_ss( (AkReal32*)(__addr__), _mm256_castps256_ps128( (__vec__) ) )
109 
110 //@}
111 ////////////////////////////////////////////////////////////////////////
112 
113 ////////////////////////////////////////////////////////////////////////
114 /// @name AKSIMD shuffling
115 //@{
116 
117 /// Selects eight specific single-precision, floating-point values from
118 /// a and b, based on the mask i within 128-bit lanes (see _mm256_shuffle_ps)
119 /// This means that the AKSIMD_SHUFFLE operand still picks 1 of 4 32b components
120 /// inside of each of the 2 128b lanes.
121 // Usage: AKSIMD_SHUFFLE_V8F32( vec1, vec2, AKSIMD_SHUFFLE( z, y, x, w ) )
122 #define AKSIMD_SHUFFLE_V8F32( a, b, i ) _mm256_shuffle_ps( a, b, i )
123 
124 /// For each 128b lane, Swap the 2 lower floats together and the 2 higher floats together. ( h g f e d c b a -> g h e f c d a b )
125 #define AKSIMD_SHUFFLE_V8_BADC( __a__ ) AKSIMD_SHUFFLE_V8F32( (__a__), (__a__), AKSIMD_SHUFFLE(2,3,0,1))
126 
127 /// For each 128b lane, Swap the 2 lower floats with the 2 higher floats. ( h g f e d c b a -> f e h g b a d c )
128 #define AKSIMD_SHUFFLE_V8_CDAB( __a__ ) AKSIMD_SHUFFLE_V8F32( (__a__), (__a__), AKSIMD_SHUFFLE(1,0,3,2))
129 
130 /// For each 128b lane, barrel-shift all floats by one. ( h g f e d c b a -> e h g f a d c b )
131 #define AKSIMD_SHUFFLE_V8_BCDA( __a__ ) AKSIMD_SHUFFLE_V8F32( (__a__), (__a__), AKSIMD_SHUFFLE(0,3,2,1))
132 
133 /// For each 128b lane, duplicates the odd items into the even items ( h g f e d c b a -> h h f f d d b b )
134 #define AKSIMD_DUP_V8_ODD(__vv) AKSIMD_SHUFFLE_V8F32(__vv, __vv, AKSIMD_SHUFFLE(3,3,1,1))
135 
136 /// For each 128b lane, duplicates the even items into the odd items ( h g f e d c b a -> g g e e c c a a )
137 #define AKSIMD_DUP_V8_EVEN(__vv) AKSIMD_SHUFFLE_V8F32(__vv, __vv, AKSIMD_SHUFFLE(2,2,0,0))
138 
139 // Macro for selection parameter for AKSIMD_PERMUTE_2X128_V8F32()
140 #define AKSIMD_PERMUTE128( l1, l0 ) (((l1) << 4) | (l0))
141 
142 /// For each 128b lane, select one of the four input 128b lanes across a and b,
143 /// based on the mask i. AKSIMD_SHUFFLE can still be directly used as a control
144 #define AKSIMD_PERMUTE_2X128_V8F32( a, b, i ) _mm256_permute2f128_ps(a, b, i)
145 
146 /// Selects the lower of each of the 128b lanes in a and b to be the result ( B A ), ( D C ) -> ( C A )
147 #define AKSIMD_DEINTERLEAVELANES_LO_V8F32( a, b ) AKSIMD_PERMUTE_2X128_V8F32(a, b, AKSIMD_PERMUTE128(2, 0))
148 
149 /// Selects the higher of each of the 128b lanes in a and b to be the result ( B A ), ( D C) -> ( D B )
150 #define AKSIMD_DEINTERLEAVELANES_HI_V8F32( a, b ) AKSIMD_PERMUTE_2X128_V8F32(a, b, AKSIMD_PERMUTE128(3, 1))
151 
152 
153 //@}
154 ////////////////////////////////////////////////////////////////////////
155 
156 
157 ////////////////////////////////////////////////////////////////////////
158 /// @name AKSIMD arithmetic
159 //@{
160 
161 /// Subtracts the eight single-precision, floating-point values of
162 /// a and b (a - b) (see _mm_sub_ps)
163 #define AKSIMD_SUB_V8F32( a, b ) _mm256_sub_ps( a, b )
164 
165 /// Subtracts the lower single-precision, floating-point values of a and b.
166 /// The upper three single-precision, floating-point values are passed through from a.
167 /// r0 := a0 - b0 ; r1...r7 := a1...a7 (see _mm_sub_ss)
168 #define AKSIMD_SUB_SS_V8F32( a, b ) _mm256_sub_ps( a, _mm256_and_ps(b, _mm256_setr_epi32( -1, 0, 0, 0, 0, 0, 0, 0 ) ) )
169 
170 /// Adds the eight single-precision, floating-point values of
171 /// a and b (see _mm_add_ps)
172 #define AKSIMD_ADD_V8F32( a, b ) _mm256_add_ps( a, b )
173 
174 /// Adds the lower single-precision, floating-point values of a and b; the
175 /// upper three single-precision, floating-point values are passed through from a.
176 /// r0 := a0 + b0; r1...r7 := a1...a7 (see _mm_add_ss)
177 #define AKSIMD_ADD_SS_V8F32( a, b ) _mm256_add_ps( a, _mm256_and_ps(b, _mm256_setr_epi32( -1, 0, 0, 0, 0, 0, 0, 0 ) ) )
178 
179 /// Multiplies the eight single-precision, floating-point values
180 /// of a and b (see _mm_mul_ps)
181 #define AKSIMD_MUL_V8F32( a, b ) _mm256_mul_ps( a, b )
182 
183 #define AKSIMD_DIV_V8F32( a, b ) _mm256_div_ps( a, b )
184 
185 /// Multiplies the lower single-precision, floating-point values of
186 /// a and b; the upper three single-precision, floating-point values
187 /// are passed through from a.
188 /// r0 := a0 * b0; r1...r7 := a1...a7 (see _mm_mul_ss)
189 #define AKSIMD_MUL_SS_V8F32( a, b ) _mm256_mul_ps( a, _mm256_blend_ps(b, _mm256_set1_ps(1.0f), 0xfe ) )
190 
191 /// Vector multiply-add operation.
192 #define AKSIMD_MADD_V8F32( __a__, __b__, __c__ ) _mm256_add_ps( _mm256_mul_ps( (__a__), (__b__) ), (__c__) )
193 #define AKSIMD_MSUB_V8F32( __a__, __b__, __c__ ) _mm256_sub_ps( _mm256_mul_ps( (__a__), (__b__) ), (__c__) )
194 
195 /// Vector multiply-add operation.
196 #define AKSIMD_MADD_SS_V8F32( __a__, __b__, __c__ ) AKSIMD_ADD_SS_V8F32( AKSIMD_MUL_SS_V8F32( (__a__), (__b__) ), (__c__) )
197 
198 /// Computes the minima of the eight single-precision, floating-point
199 /// values of a and b (see _mm_min_ps)
200 #define AKSIMD_MIN_V8F32( a, b ) _mm256_min_ps( a, b )
201 
202 /// Computes the maximums of the eight single-precision, floating-point
203 /// values of a and b (see _mm_max_ps)
204 #define AKSIMD_MAX_V8F32( a, b ) _mm256_max_ps( a, b )
205 
206 /// Computes the absolute value
207 #define AKSIMD_ABS_V8F32( a ) _mm256_andnot_ps(_mm256_set1_ps(-0.f), a)
208 
209 /// Changes the sign
210 #define AKSIMD_NEG_V8F32( __a__ ) _mm256_xor_ps(_mm256_set1_ps(-0.f), __a__)
211 
212 /// Vector square root aproximation (see _mm_sqrt_ps)
213 #define AKSIMD_SQRT_V8F32( __a__ ) _mm256_sqrt_ps( (__a__) )
214 
215 /// Vector reciprocal square root approximation 1/sqrt(a), or equivalently, sqrt(1/a)
216 #define AKSIMD_RSQRT_V8F32( __a__ ) _mm256_rsqrt_ps( (__a__) )
217 
218 /// Vector reciprocal
219 #define AKSIMD_RECIP_V8F32( __a__ ) _mm256_rcp_ps( (__a__) )
220 
221 /// Vector ceil
222 #define AKSIMD_CEIL_V8F32( __a__ ) _mm256_ceil_ps( (__a__) )
223 
224 #define AKSIMD_XOR_V8F32( a, b ) _mm256_xor_ps(a,b)
225 #define AKSIMD_OR_V8F32( a, b ) _mm256_or_ps(a,b)
226 #define AKSIMD_AND_V8F32( a, b) _mm256_and_ps(a,b)
227 #define AKSIMD_NOT_V8F32( a ) _mm256_xor_ps(a,_mm256_castsi256_ps(_mm256_set1_epi32(~0)))
228 
229 /// horizontal add across the entire vector - vVec will be updated to contain the sum of every input element of vVec
230 /// \akwarning
231 /// Don't expect this to be very efficient.
232 /// \endakwarning
233 static AkForceInline AKSIMD_V8F32 AKSIMD_HORIZONTALADD_V8F32(AKSIMD_V8F32 vVec)
234 {
235  __m256 vHaddAb = _mm256_hadd_ps(vVec, vVec);
236  __m256 vHaddAbcd = _mm256_hadd_ps(vHaddAb, vHaddAb);
237  __m256 vHaddEfgh = _mm256_permute2f128_ps(vHaddAbcd, vHaddAbcd, 0x01);
238  __m256 vHaddAll = _mm256_add_ps(vHaddAbcd, vHaddEfgh);
239  return vHaddAll;
240 }
241 
242 /// Cross-platform SIMD multiplication of 8 complex data elements with interleaved real and imaginary parts
243 static AkForceInline AKSIMD_V8F32 AKSIMD_COMPLEXMUL_V8F32(const AKSIMD_V8F32 cIn1, const AKSIMD_V8F32 cIn2)
244 {
245  __m256 real1Ext = _mm256_moveldup_ps(cIn1); // reals extended (a3, a3, a2, a2, a1, a1, a0, a0)
246  __m256 in2Shuf = _mm256_shuffle_ps(cIn2, cIn2, 0xB1); // shuf multiplicand (c3, d3, c2, d2, c1, d1, c0, d0)
247  __m256 imag1Ext = _mm256_movehdup_ps(cIn1); // multiplier imag (b3, b3, b2, b2, b1, b1, b0, b0)
248  __m256 temp = _mm256_mul_ps(imag1Ext, in2Shuf); // temp (b3c3, b3d3, b2c2, b2d2, b1c1, b1d1, b0c0, b0d0)
249  __m256 mul = _mm256_mul_ps(real1Ext, cIn2); // (a3d3, a3c3, a2d2, a2c2, a1d1, a1c1, a0d0, a0c0)
250  __m256 out = _mm256_addsub_ps(mul, temp); // final (a3d3+b3c3, a3c3-b3d3, a2d2+b2c2, a2c2-b2d2, a1d1+b1c1, a1c1-b1d1, a0d0+b0c0, a0c0-b0d0)
251  return out;
252 }
253 
254 //@}
255 ////////////////////////////////////////////////////////////////////////
256 
257 
258 ////////////////////////////////////////////////////////////////////////
259 /// @name AKSIMD packing / unpacking
260 //@{
261 
262 /// Selects and interleaves the lower two single-precision, floating-point
263 /// values from each 128-bit lane in a and b (see _mm_unpacklo_ps)
264 /// i.e. r0 := a0, r1 := b0, r2 := a1, r3 := b1, r4 := a4, r5 := b4, r6 := a5, r7 := b5
265 #define AKSIMD_UNPACKLO_V8F32( a, b ) _mm256_unpacklo_ps( a, b )
266 
267 /// Selects and interleaves the upper two single-precision, floating-point
268 /// values from each 128-bit lane a and b (see _mm_unpackhi_ps)
269 /// i.e. r0 := a2, r1 := b2, r2 := a3, r3 := b3, r4 := a6, r5 := b6, r6 := a7, r7 := b7
270 #define AKSIMD_UNPACKHI_V8F32( a, b ) _mm256_unpackhi_ps( a, b )
271 
272 //@}
273 ////////////////////////////////////////////////////////////////////////
274 
275 ////////////////////////////////////////////////////////////////////////
276 /// @name AKSIMD vector comparison
277 /// Apart from AKSIMD_SEL_GTEQ_V8F32, these implementations are limited to a few platforms.
278 //@{
279 
280 #define AKSIMD_CMP_CTRLMASKV8 __m256
281 
282 /// Vector "<=" operation (see _mm_cmple_ps)
283 #define AKSIMD_LTEQ_V8F32( __a__, __b__ ) _mm256_cmp_ps( (__a__), (__b__), _CMP_LE_OS )
284 
285 #define AKSIMD_LT_V8F32( __a__, __b__ ) _mm256_cmp_ps( (__a__), (__b__), _CMP_LT_OS )
286 
287 /// Vector ">=" operation (see _mm_cmple_ps)
288 #define AKSIMD_GTEQ_V8F32( __a__, __b__ ) _mm256_cmp_ps( (__a__), (__b__), _CMP_GE_OS )
289 
290 #define AKSIMD_GT_V8F32( __a__, __b__ ) _mm256_cmp_ps( (__a__), (__b__), _CMP_GT_OS )
291 
292 /// Vector "==" operation (see _mm_cmpeq_ps)
293 #define AKSIMD_EQ_V8F32( __a__, __b__ ) _mm256_cmp_ps( (__a__), (__b__), _CMP_EQ_OS )
294 
295 /// Return a when control mask is 0, return b when control mask is non zero, control mask is in c and usually provided by above comparison operations
296 static AkForceInline AKSIMD_V8F32 AKSIMD_VSEL_V8F32( AKSIMD_V8F32 vA, AKSIMD_V8F32 vB, AKSIMD_V8F32 vMask )
297 {
298  return _mm256_blendv_ps(vA, vB, vMask);
299 }
300 
301 // (cond1 >= cond2) ? b : a.
302 #define AKSIMD_SEL_GTEQ_V8F32( __a__, __b__, __cond1__, __cond2__ ) AKSIMD_VSEL_V8F32( __a__, __b__, AKSIMD_GTEQ_V8F32( __cond1__, __cond2__ ) )
303 
304 // a >= 0 ? b : c ... Written, like, you know, the normal C++ operator syntax.
305 #define AKSIMD_SEL_GTEZ_V8F32( __a__, __b__, __c__ ) AKSIMD_VSEL_V8F32( (__c__), (__b__), AKSIMD_GTEQ_V8F32( __a__, _mm256_set1_ps(0) ) )
306 
307 #define AKSIMD_SPLAT_V8F32(var, idx) AKSIMD_SHUFFLE_V8F32(var,var, AKSIMD_SHUFFLE(idx,idx,idx,idx))
308 
309 #define AKSIMD_MASK_V8F32( __a__ ) _mm256_movemask_ps( __a__ )
310 
311 // returns true if every element of the provided vector is zero
312 #define AKSIMD_TESTZERO_V8I32( __a__ ) (_mm256_testz_si256( __a__, __a__ ) != 0)
313 #define AKSIMD_TESTZERO_V8F32( __a__ ) AKSIMD_TESTZERO_V8I32(_mm256_castps_si256(__a__))
314 
315 // returns true if every element of the provided vector is one
316 #define AKSIMD_TESTONES_V8I32(__a__) (_mm256_testc_si256(__a__, _mm256_set1_epi32(~0)) != 0)
317 #define AKSIMD_TESTONES_V8F32( __a__) AKSIMD_TESTONES_V8I32(_mm256_castps_si256(__a__))
318 
319 //@}
320 ////////////////////////////////////////////////////////////////////////
321 
322 typedef __m256i AKSIMD_V8I32; ///< Vector of 8 32-bit signed integers
323 
324 typedef AKSIMD_V8I32 AKSIMD_V8ICOND;
325 
326 /// Loads 256-bit value (see _mm_loadu_si128)
327 /// On every modern x86 processor this performs the same as an aligned load.
328 #define AKSIMD_LOAD_V8I32( __addr__ ) _mm256_loadu_si256( (__addr__) )
329 
330 /// Sets the eight 32-bit integer values to zero (see _mm_setzero_si128)
331 #define AKSIMD_SETZERO_V8I32() _mm256_setzero_si256()
332 
333 /// Sets the provided scalar value at the first index of the vector, and zeroes everything else
334 #define AKSIMD_SET_V8I32( __scalar__ ) _mm256_set1_epi32( (__scalar__) )
335 
336 /// Populates the full vector with the 8 values provided
337 #define AKSIMD_SETV_V8I32( _h, _g, _f, _e, _d, _c, _b, _a ) _mm256_set_epi32( (_h), (_g), (_f), (_e), (_d), (_c), (_b), (_a) )
338 
339 /// Loads the two m128i's provided into the output m256i a
340 /// Note that this should be utilized instead of, e.g. adding & utilizing a macro "AKSIMD_INSERT_V8I32(m, i, idx)"
341 /// Because there is no direct corresponding instruction for an insert into 256. You should load into 128s
342 /// and use that. Some compilers do not handle _mm256_insert_epi32 (etc) well, or even include them
343 #define AKSIMD_SET_V2I128(m1, m2) _mm256_setr_m128i(m1, m2)
344 
345 /// Stores eight 32-bit integer values.
346 /// The address does not need to be 32-byte aligned (see _mm_storeu_si128).
347 /// On every modern x86 processor this performs the same as an aligned load.
348 #define AKSIMD_STORE_V8I32( __addr__, __vec__ ) _mm256_storeu_si256( (__addr__), (__vec__) )
349 
350 ////////////////////////////////////////////////////////////////////////
351 /// @name AKSIMD conversion
352 //@{
353 
354 /// Converts the eight signed 32-bit integer values of a to single-precision,
355 /// floating-point values (see _mm_cvtepi32_ps)
356 #define AKSIMD_CONVERT_V8I32_TO_V8F32( __vec__ ) _mm256_cvtepi32_ps( (__vec__) )
357 
358 /// Converts the eight single-precision, floating-point values of a to signed
359 /// 32-bit integer values by rounding (see _mm_cvtps_epi32)
360 #define AKSIMD_CONVERT_V8F32_TO_V8I32( __vec__ ) _mm256_cvtps_epi32( (__vec__) )
361 
362 /// Converts the eight single-precision, floating-point values of a to signed
363 /// 32-bit integer values by truncating (see _mm_cvttps_epi32)
364 #define AKSIMD_TRUNCATE_V8F32_TO_V8I32( __vec__ ) _mm256_cvttps_epi32( (__vec__) )
365 
366 /// Converts the eight half-precision floating-point values of vec to
367 /// eight full-precision floating-point values
368 /// WARNING: Using this requires F16C support, which is not guaranteed on AVX
369 #define AKSIMD_CONVERT_V8F16_TO_V8F32( __vec__ ) _mm256_cvtph_ps( (__vec__) )
370 
371 /// Converts the eight single-precision, floating-point values of vec to
372 /// eight half-precision floating-point values
373 /// WARNING: Using this requires F16C support, which is not guaranteed on AVX
374 #define AKSIMD_CONVERT_V8F32_TO_V8F16( __vec__ ) _mm256_cvtps_ph(__vec__, (_MM_FROUND_TO_NEAREST_INT ) )
375 
376 //@}
377 ////////////////////////////////////////////////////////////////////////
378 #endif //_AK_SIMD_AVX_H_
379 #endif
float32_t AKSIMD_F32
32-bit float
Definition: AkSimd.h:71
#define AkForceInline
Definition: AkTypes.h:62

Was this page helpful?

Need Support?

Questions? Problems? Need more info? Contact us, and we can help!

Visit our Support page

Tell us about your project. We're here to help.

Register your project and we'll help you get started with no strings attached!

Get started with Wwise