Skip to content

Commit

Permalink
Merge branch 'dev41' into unicode
Browse files Browse the repository at this point in the history
  • Loading branch information
starg2 committed Apr 3, 2021
2 parents b9bdbcf + 0624fe2 commit d0cdb06
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 44 deletions.
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,9 @@ if(MSVC)
add_compile_options(/QxCORE-AVX512)
else()
add_compile_options(/arch:AVX512)
if("${CMAKE_C_COMPILER_ID}" MATCHES "Clang")
add_definitions(-D__PRFCHWINTRIN_H) # Workaround for conflicting _m_prefetchw() definitions
endif()
endif()
elseif("${TIM41_X86_SIMD_LEVEL}" STREQUAL "AVX2")
if("${CMAKE_C_COMPILER_ID}" MATCHES "Intel")
Expand Down
119 changes: 79 additions & 40 deletions timidity/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -4115,41 +4115,41 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
__m512d vdb0123_37 = _mm512_permutex2var_pd(vdb01_1357, _mm512_set_epi64(15, 14, 7, 6, 11, 10, 3, 2), vdb23_1357);

_mm256_storeu_pd(&dbs[i][0], _mm512_castpd512_pd256(vdb0123_04));
_mm_storel_pd(&dbs[i][4], _mm512_castpd512_pd128(vdb4));
dbs[i][4] = MM512_EXTRACT_F64(vdb4, 0);

if (i + 1 < batch_size) {
_mm256_storeu_pd(&dbs[i + 1][0], _mm512_castpd512_pd256(vdb0123_15));
_mm_storeh_pd(&dbs[i + 1][4], _mm512_castpd512_pd128(vdb4));
dbs[i + 1][4] = MM512_EXTRACT_F64(vdb4, 1);
}

if (i + 2 < batch_size) {
_mm256_storeu_pd(&dbs[i + 2][0], _mm512_castpd512_pd256(vdb0123_26));
_mm_storel_pd(&dbs[i + 2][4], _mm256_extractf128_pd(_mm512_castpd512_pd256(vdb4), 1));
dbs[i + 2][4] = MM512_EXTRACT_F64(vdb4, 2);
}

if (i + 3 < batch_size) {
_mm256_storeu_pd(&dbs[i + 3][0], _mm512_castpd512_pd256(vdb0123_37));
_mm_storeh_pd(&dbs[i + 3][4], _mm256_extractf128_pd(_mm512_castpd512_pd256(vdb4), 1));
dbs[i + 3][4] = MM512_EXTRACT_F64(vdb4, 3);
}

if (i + 4 < batch_size) {
_mm256_storeu_pd(&dbs[i + 4][0], _mm512_extractf64x4_pd(vdb0123_04, 1));
_mm_storel_pd(&dbs[i + 4][4], _mm512_extractf64x2_pd(vdb4, 2));
dbs[i + 4][4] = MM512_EXTRACT_F64(vdb4, 4);
}

if (i + 5 < batch_size) {
_mm256_storeu_pd(&dbs[i + 5][0], _mm512_extractf64x4_pd(vdb0123_15, 1));
_mm_storeh_pd(&dbs[i + 5][4], _mm512_extractf64x2_pd(vdb4, 2));
dbs[i + 5][4] = MM512_EXTRACT_F64(vdb4, 5);
}

if (i + 6 < batch_size) {
_mm256_storeu_pd(&dbs[i + 6][0], _mm512_extractf64x4_pd(vdb0123_26, 1));
_mm_storel_pd(&dbs[i + 6][4], _mm512_extractf64x2_pd(vdb4, 3));
dbs[i + 6][4] = MM512_EXTRACT_F64(vdb4, 6);
}

if (i + 7 < batch_size) {
_mm256_storeu_pd(&dbs[i + 7][0], _mm512_extractf64x4_pd(vdb0123_37, 1));
_mm_storeh_pd(&dbs[i + 7][4], _mm512_extractf64x2_pd(vdb4, 3));
dbs[i + 7][4] = MM512_EXTRACT_F64(vdb4, 7);
}
}
}
Expand Down Expand Up @@ -4232,7 +4232,16 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[3] = _mm256_unpackhi_pd(vsp23_02, vsp23_13);

for (int k = 0; k < 4; k++) {
#if USE_X86_EXT_INTRIN >= 9
__m256d vmask = _mm256_castsi256_pd(_mm256_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
#else
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m256d vmask = _mm256_insertf128_pd(
_mm256_castpd128_pd256(_mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32))),
_mm_castsi128_pd(_mm_unpackhi_epi32(vmask32, vmask32)),
1
);
#endif

vdb0 = _mm256_blendv_pd(vdb0, vsps[k], vmask);
vdb2 = _mm256_blendv_pd(vdb2, MM256_FMA_PD(vdc0, vdb0, MM256_FMA4_PD(vdc1, vdb1, vdc2, vdb2, vdc3, vdb3, vdc4, vdb4)), vmask);
Expand Down Expand Up @@ -4281,21 +4290,21 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
vdb0123_3 = _mm256_permute2f128_pd(vdb01_13, vdb23_13, (3 << 4) | 1);

_mm256_storeu_pd(&dbs[i][0], vdb0123_0);
_mm_storel_pd(&dbs[i][4], _mm256_castpd256_pd128(vdb4));
dbs[i][4] = MM256_EXTRACT_F64(vdb4, 0);

if (i + 1 < batch_size) {
_mm256_storeu_pd(&dbs[i + 1][0], vdb0123_1);
_mm_storeh_pd(&dbs[i + 1][4], _mm256_castpd256_pd128(vdb4));
dbs[i + 1][4] = MM256_EXTRACT_F64(vdb4, 1);
}

if (i + 2 < batch_size) {
_mm256_storeu_pd(&dbs[i + 2][0], vdb0123_2);
_mm_storel_pd(&dbs[i + 2][4], _mm256_extractf128_pd(vdb4, 1));
dbs[i + 2][4] = MM256_EXTRACT_F64(vdb4, 2);
}

if (i + 3 < batch_size) {
_mm256_storeu_pd(&dbs[i + 3][0], vdb0123_3);
_mm_storeh_pd(&dbs[i + 3][4], _mm256_extractf128_pd(vdb4, 1));
dbs[i + 3][4] = MM256_EXTRACT_F64(vdb4, 3);
}
}
}
Expand All @@ -4308,11 +4317,14 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
if (i >= batch_size)
break;

int32 count0 = counts[i];
int32 count1 = i + 1 < batch_size ? counts[i + 1] : 0;

__m128i vcounts = _mm_set_epi32(
0,
0,
i + 1 < batch_size ? counts[i + 1] : 0,
counts[i]
count1,
count0
);

__m128d vdb01_0 = _mm_loadu_pd(&dbs[i][0]);
Expand Down Expand Up @@ -4343,7 +4355,7 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
dcs[i][4]
);

int32 count_max = _mm_cvtsi128_si32(_mm_max_epi32(vcounts, _mm_shuffle_epi32(vcounts, 1)));
int32 count_max = count0 < count1 ? count1 : count0;

for (int32 j = 0; j < count_max; j += 2) {
__m128d vsp01_0 = j < counts[i] ? _mm_loadu_pd(&sps[i][j]) : _mm_setzero_pd();
Expand All @@ -4354,7 +4366,8 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[1] = _mm_unpackhi_pd(vsp01_0, vsp01_1);

for (int k = 0; k < 2; k++) {
__m128d vmask = _mm_castsi128_pd(_mm_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m128d vmask = _mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32));

vdb0 = MM_BLENDV_PD(vdb0, vsps[k], vmask);
vdb2 = MM_BLENDV_PD(vdb2, MM_FMA5_PD(vdc0, vdb0, vdc1, vdb1, vdc2, vdb2, vdc3, vdb3, vdc4, vdb4), vmask);
Expand Down Expand Up @@ -4386,12 +4399,12 @@ static void sample_filter_LPF_BW_batch(int batch_size, FILTER_T **dcs, FILTER_T

_mm_storeu_pd(&dbs[i][0], vdb01_0);
_mm_storeu_pd(&dbs[i][2], vdb23_0);
_mm_storel_pd(&dbs[i][4], vdb4);
dbs[i][4] = MM_EXTRACT_F64(vdb4, 0);

if (i + 1 < batch_size) {
_mm_storeu_pd(&dbs[i + 1][0], vdb01_1);
_mm_storeu_pd(&dbs[i + 1][2], vdb23_1);
_mm_storeh_pd(&dbs[i + 1][4], vdb4);
dbs[i + 1][4] = MM_EXTRACT_F64(vdb4, 1);
}
}
}
Expand Down Expand Up @@ -4566,42 +4579,42 @@ static void recalc_filter_LPF_BW_batch(int batch_size, FilterCoefficients **fcs)

if (imask & 1) {
_mm256_storeu_pd(&fcs[i]->dc[0], _mm512_castpd512_pd256(vdc0123_04));
_mm_storel_pd(&fcs[i]->dc[4], _mm512_castpd512_pd128(vdc4));
fcs[i]->dc[4] = MM512_EXTRACT_F64(vdc4, 0);
}

if (imask & (1 << 1)) {
_mm256_storeu_pd(&fcs[i + 1]->dc[0], _mm512_castpd512_pd256(vdc0123_15));
_mm_storeh_pd(&fcs[i + 1]->dc[4], _mm512_castpd512_pd128(vdc4));
fcs[i + 1]->dc[4] = MM512_EXTRACT_F64(vdc4, 1);
}

if (imask & (1 << 2)) {
_mm256_storeu_pd(&fcs[i + 2]->dc[0], _mm512_castpd512_pd256(vdc0123_26));
_mm_storel_pd(&fcs[i + 2]->dc[4], _mm256_extractf128_pd(_mm512_castpd512_pd256(vdc4), 1));
fcs[i + 2]->dc[4] = MM512_EXTRACT_F64(vdc4, 2);
}

if (imask & (1 << 3)) {
_mm256_storeu_pd(&fcs[i + 3]->dc[0], _mm512_castpd512_pd256(vdc0123_37));
_mm_storeh_pd(&fcs[i + 3]->dc[4], _mm256_extractf128_pd(_mm512_castpd512_pd256(vdc4), 1));
fcs[i + 3]->dc[4] = MM512_EXTRACT_F64(vdc4, 3);
}

if (imask & (1 << 4)) {
_mm256_storeu_pd(&fcs[i + 4]->dc[0], _mm512_extractf64x4_pd(vdc0123_04, 1));
_mm_storel_pd(&fcs[i + 4]->dc[4], _mm512_extractf64x2_pd(vdc4, 2));
fcs[i + 4]->dc[4] = MM512_EXTRACT_F64(vdc4, 4);
}

if (imask & (1 << 5)) {
_mm256_storeu_pd(&fcs[i + 5]->dc[0], _mm512_extractf64x4_pd(vdc0123_15, 1));
_mm_storeh_pd(&fcs[i + 5]->dc[4], _mm512_extractf64x2_pd(vdc4, 2));
fcs[i + 5]->dc[4] = MM512_EXTRACT_F64(vdc4, 5);
}

if (imask & (1 << 6)) {
_mm256_storeu_pd(&fcs[i + 6]->dc[0], _mm512_extractf64x4_pd(vdc0123_26, 1));
_mm_storel_pd(&fcs[i + 6]->dc[4], _mm512_extractf64x2_pd(vdc4, 3));
fcs[i + 6]->dc[4] = MM512_EXTRACT_F64(vdc4, 6);
}

if (imask & (1 << 7)) {
_mm256_storeu_pd(&fcs[i + 7]->dc[0], _mm512_extractf64x4_pd(vdc0123_37, 1));
_mm_storeh_pd(&fcs[i + 7]->dc[4], _mm512_extractf64x2_pd(vdc4, 3));
fcs[i + 7]->dc[4] = MM512_EXTRACT_F64(vdc4, 7);
}
}
}
Expand Down Expand Up @@ -4736,22 +4749,22 @@ static void recalc_filter_LPF_BW_batch(int batch_size, FilterCoefficients **fcs)

if (imask & 1) {
_mm256_storeu_pd(&fcs[i]->dc[0], vdc0123_0);
_mm_storel_pd(&fcs[i]->dc[4], _mm256_castpd256_pd128(vdc4));
fcs[i]->dc[4] = MM256_EXTRACT_F64(vdc4, 0);
}

if (imask & (1 << 1)) {
_mm256_storeu_pd(&fcs[i + 1]->dc[0], vdc0123_1);
_mm_storeh_pd(&fcs[i + 1]->dc[4], _mm256_castpd256_pd128(vdc4));
fcs[i + 1]->dc[4] = MM256_EXTRACT_F64(vdc4, 1);
}

if (imask & (1 << 2)) {
_mm256_storeu_pd(&fcs[i + 2]->dc[0], vdc0123_2);
_mm_storel_pd(&fcs[i + 2]->dc[4], _mm256_extractf128_pd(vdc4, 1));
fcs[i + 2]->dc[4] = MM256_EXTRACT_F64(vdc4, 2);
}

if (imask & (1 << 3)) {
_mm256_storeu_pd(&fcs[i + 3]->dc[0], vdc0123_3);
_mm_storeh_pd(&fcs[i + 3]->dc[4], _mm256_extractf128_pd(vdc4, 1));
fcs[i + 3]->dc[4] = MM256_EXTRACT_F64(vdc4, 3);
}
}
}
Expand Down Expand Up @@ -4862,13 +4875,13 @@ static void recalc_filter_LPF_BW_batch(int batch_size, FilterCoefficients **fcs)
if (imask & 1) {
_mm_storeu_pd(&fcs[i]->dc[0], vdc01_0);
_mm_storeu_pd(&fcs[i]->dc[2], vdc23_0);
_mm_storel_pd(&fcs[i]->dc[4], vdc4);
fcs[i]->dc[4] = MM_EXTRACT_F64(vdc4, 0);
}

if (imask & (1 << 1)) {
_mm_storeu_pd(&fcs[i + 1]->dc[0], vdc01_1);
_mm_storeu_pd(&fcs[i + 1]->dc[2], vdc23_1);
_mm_storeh_pd(&fcs[i + 1]->dc[4], vdc4);
fcs[i + 1]->dc[4] = MM_EXTRACT_F64(vdc4, 1);
}
}
}
Expand Down Expand Up @@ -5092,7 +5105,16 @@ static void sample_filter_LPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[3] = _mm256_unpackhi_pd(vsp23_02, vsp23_13);

for (int k = 0; k < 4; k++) {
#if USE_X86_EXT_INTRIN >= 9
__m256d vmask = _mm256_castsi256_pd(_mm256_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
#else
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m256d vmask = _mm256_insertf128_pd(
_mm256_castpd128_pd256(_mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32))),
_mm_castsi128_pd(_mm_unpackhi_epi32(vmask32, vmask32)),
1
);
#endif

vdb1 = _mm256_blendv_pd(vdb1, MM256_FMA_PD(_mm256_sub_pd(vsps[k], vdb0), vdc1, vdb1), vmask);
vdb0 = _mm256_blendv_pd(vdb0, _mm256_add_pd(vdb0, vdb1), vmask);
Expand Down Expand Up @@ -5147,11 +5169,14 @@ static void sample_filter_LPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
if (i >= batch_size)
break;

int32 count0 = counts[i];
int32 count1 = i + 1 < batch_size ? counts[i + 1] : 0;

__m128i vcounts = _mm_set_epi32(
0,
0,
i + 1 < batch_size ? counts[i + 1] : 0,
counts[i]
count1,
count0
);

__m128d vdb01_0 = _mm_loadu_pd(dbs[i]);
Expand All @@ -5166,7 +5191,7 @@ static void sample_filter_LPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
__m128d vdc0 = _mm_unpacklo_pd(vdc01_0, vdc01_1);
__m128d vdc1 = _mm_unpackhi_pd(vdc01_0, vdc01_1);

int32 count_max = _mm_cvtsi128_si32(_mm_max_epi32(vcounts, _mm_shuffle_epi32(vcounts, 1)));
int32 count_max = count0 < count1 ? count1 : count0;

for (int32 j = 0; j < count_max; j += 2) {
__m128d vsp01_0 = j < counts[i] ? _mm_loadu_pd(&sps[i][j]) : _mm_setzero_pd();
Expand All @@ -5177,7 +5202,8 @@ static void sample_filter_LPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[1] = _mm_unpackhi_pd(vsp01_0, vsp01_1);

for (int k = 0; k < 2; k++) {
__m128d vmask = _mm_castsi128_pd(_mm_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m128d vmask = _mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32));

vdb1 = MM_BLENDV_PD(vdb1, MM_FMA_PD(_mm_sub_pd(vsps[k], vdb0), vdc1, vdb1), vmask);
vdb0 = MM_BLENDV_PD(vdb0, _mm_add_pd(vdb0, vdb1), vmask);
Expand Down Expand Up @@ -5830,7 +5856,16 @@ static void sample_filter_HPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[3] = _mm256_unpackhi_pd(vsp23_02, vsp23_13);

for (int k = 0; k < 4; k++) {
#if USE_X86_EXT_INTRIN >= 9
__m256d vmask = _mm256_castsi256_pd(_mm256_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
#else
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m256d vmask = _mm256_insertf128_pd(
_mm256_castpd128_pd256(_mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32))),
_mm_castsi128_pd(_mm_unpackhi_epi32(vmask32, vmask32)),
1
);
#endif

vdb1 = _mm256_blendv_pd(vdb1, MM256_FMA_PD(_mm256_sub_pd(vsps[k], vdb0), vdc1, vdb1), vmask);
vdb0 = _mm256_blendv_pd(vdb0, _mm256_add_pd(vdb0, vdb1), vmask);
Expand Down Expand Up @@ -5885,11 +5920,14 @@ static void sample_filter_HPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
if (i >= batch_size)
break;

int32 count0 = counts[i];
int32 count1 = i + 1 < batch_size ? counts[i + 1] : 0;

__m128i vcounts = _mm_set_epi32(
0,
0,
i + 1 < batch_size ? counts[i + 1] : 0,
counts[i]
count1,
count0
);

__m128d vdb01_0 = _mm_loadu_pd(dbs[i]);
Expand All @@ -5904,7 +5942,7 @@ static void sample_filter_HPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
__m128d vdc0 = _mm_unpacklo_pd(vdc01_0, vdc01_1);
__m128d vdc1 = _mm_unpackhi_pd(vdc01_0, vdc01_1);

int32 count_max = _mm_cvtsi128_si32(_mm_max_epi32(vcounts, _mm_shuffle_epi32(vcounts, 1)));
int32 count_max = count0 < count1 ? count1 : count0;

for (int32 j = 0; j < count_max; j += 2) {
__m128d vsp01_0 = j < counts[i] ? _mm_loadu_pd(&sps[i][j]) : _mm_setzero_pd();
Expand All @@ -5915,7 +5953,8 @@ static void sample_filter_HPF12_2_batch(int batch_size, FILTER_T **dcs, FILTER_T
vsps[1] = _mm_unpackhi_pd(vsp01_0, vsp01_1);

for (int k = 0; k < 2; k++) {
__m128d vmask = _mm_castsi128_pd(_mm_cvtepi32_epi64(_mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts)));
__m128i vmask32 = _mm_cmplt_epi32(_mm_set1_epi32(j + k), vcounts);
__m128d vmask = _mm_castsi128_pd(_mm_unpacklo_epi32(vmask32, vmask32));

vdb1 = MM_BLENDV_PD(vdb1, MM_FMA_PD(_mm_sub_pd(vsps[k], vdb0), vdc1, vdb1), vmask);
vdb0 = MM_BLENDV_PD(vdb0, _mm_add_pd(vdb0, vdb1), vmask);
Expand Down
4 changes: 2 additions & 2 deletions timidity/optcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ LSU : Unalignment (use loadu/storeu
#endif

#if (USE_X86_EXT_INTRIN >= 1)
#if !defined(_MSC_VER) || defined(__clang__)
#if !defined(_MSC_VER) || defined(__clang__) || defined(__INTEL_COMPILER)
#define MM_EXTRACT_F32(reg,idx) _mm_cvtss_f32(_mm_shuffle_ps(reg,reg,idx))
#define MM_EXTRACT_F64(reg,idx) _mm_cvtsd_f64(_mm_shuffle_pd(reg,reg,idx))
#define MM_EXTRACT_I32(reg,idx) _mm_cvtsi128_si32(_mm_shuffle_epi32(reg,idx))
Expand All @@ -943,7 +943,7 @@ LSU : Unalignment (use loadu/storeu
#define MM256_EXTRACT_I32(reg,idx) _mm256_extract_epi32(reg,idx)
#define MM512_EXTRACT_F32(reg,idx) _mm_cvtss_f32(_mm_permute_ps(_mm512_extractf32x4_ps(reg, idx >> 2), idx & 3)))
#define MM512_EXTRACT_F64(reg,idx) _mm_cvtsd_f64(_mm_permute_pd(_mm512_extractf64x2_pd(reg, idx >> 1), idx & 1))
#define MM512_EXTRACT_I32(reg,idx) _mm_cvtsi128_si32(_mm_bsrli_si128(_mm512_extracti32x4_epi32(reg, idx >> 2), (idx & 3) * 4))
#define MM512_EXTRACT_I32(reg,idx) _mm_cvtsi128_si32(_mm_shuffle_epi32(_mm512_extracti32x4_epi32(reg, idx >> 2), idx & 3))
#else
#define MM_EXTRACT_F32(reg,idx) reg.m128_f32[idx]
#define MM_EXTRACT_F64(reg,idx) reg.m128d_f64[idx]
Expand Down
4 changes: 2 additions & 2 deletions timidity/playmidi.c
Original file line number Diff line number Diff line change
Expand Up @@ -4709,7 +4709,7 @@ static void note_on_prescan(MidiEvent *ev)
// if(!channel[ch].portamento && channel[ch].portamento_control != -1)
{
int nv;
int vlist[32];
int vlist[MAX_VOICES];
Voice *vp;

nv = find_samples(ev, vlist);
Expand All @@ -4732,7 +4732,7 @@ static void note_on_prescan(MidiEvent *ev)
static void note_on(MidiEvent *e)
{
int i, nv, v, ch = e->channel, note = MIDI_EVENT_NOTE(e);
int vlist[32];
int vlist[MAX_VOICES];
int vid;
int32 random_delay = 0;
int porta_flg = 0;
Expand Down

0 comments on commit d0cdb06

Please sign in to comment.