48#ifdef OJPH_COMPILER_MSVC
50 #define unlikely(x) (x)
52 #define likely(x) __builtin_expect((x), 1)
53 #define unlikely(x) __builtin_expect((x), 0)
79 struct vlc_src_table {
int c_q, rho, u_off, e_k, e_1, cwd, cwd_len; };
80 vlc_src_table tbl0[] = {
83 size_t tbl0_size =
sizeof(tbl0) /
sizeof(vlc_src_table);
85 si32 pattern_popcnt[16];
86 for (
ui32 i = 0; i < 16; ++i)
89 vlc_src_table* src_tbl = tbl0;
91 size_t tbl_size = tbl0_size;
92 for (
int i = 0; i < 2048; ++i)
94 int c_q = i >> 8, rho = (i >> 4) & 0xF, emb = i & 0xF;
95 if (((emb & rho) != emb) || (rho == 0 && c_q == 0))
99 vlc_src_table *best_entry = NULL;
103 for (
size_t j = 0; j < tbl_size; ++j)
105 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
106 if (src_tbl[j].u_off == 1)
107 if ((emb & src_tbl[j].e_k) == src_tbl[j].e_1)
111 int ones_count = pattern_popcnt[src_tbl[j].e_k];
112 if (ones_count >= best_e_k)
114 best_entry = src_tbl + j;
115 best_e_k = ones_count;
122 for (
size_t j = 0; j < tbl_size; ++j)
124 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
125 if (src_tbl[j].u_off == 0)
127 best_entry = src_tbl + j;
133 tgt_tbl[i] = (
ui16)((best_entry->cwd<<8) + (best_entry->cwd_len<<4)
138 vlc_src_table tbl1[] = {
141 size_t tbl1_size =
sizeof(tbl1) /
sizeof(vlc_src_table);
145 tbl_size = tbl1_size;
146 for (
int i = 0; i < 2048; ++i)
148 int c_q = i >> 8, rho = (i >> 4) & 0xF, emb = i & 0xF;
149 if (((emb & rho) != emb) || (rho == 0 && c_q == 0))
153 vlc_src_table *best_entry = NULL;
157 for (
size_t j = 0; j < tbl_size; ++j)
159 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
160 if (src_tbl[j].u_off == 1)
161 if ((emb & src_tbl[j].e_k) == src_tbl[j].e_1)
165 int ones_count = pattern_popcnt[src_tbl[j].e_k];
166 if (ones_count >= best_e_k)
168 best_entry = src_tbl + j;
169 best_e_k = ones_count;
176 for (
size_t j = 0; j < tbl_size; ++j)
178 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
179 if (src_tbl[j].u_off == 0)
181 best_entry = src_tbl + j;
187 tgt_tbl[i] = (
ui16)((best_entry->cwd<<8) + (best_entry->cwd_len<<4)
210 for (
int i = 5; i < 33; ++i)
257 melp->buf_size = buffer_size;
258 melp->remaining_bits = 8;
269 melp->tmp = (melp->tmp << 1) + v;
270 melp->remaining_bits--;
271 if (melp->remaining_bits == 0) {
272 melp->buf[melp->pos++] = (
ui8)melp->tmp;
273 melp->remaining_bits = (melp->tmp == 0xFF ? 7 : 8);
283 static const int mel_exp[13] = {0,0,0,1,1,1,2,2,2,3,3,4,5};
287 if (melp->run >= melp->threshold) {
290 melp->k =
ojph_min(12, melp->k + 1);
291 melp->threshold = 1 << mel_exp[melp->k];
295 int t = mel_exp[melp->k];
301 melp->threshold = 1 << mel_exp[melp->k];
323 vlcp->
buf = data + buffer_size - 1;
344 tmp = vlcp->
tmp & 0x7F;
346 if (
likely(tmp != 0x7F)) {
347 tmp = vlcp->
tmp & 0xFF;
348 *(vlcp->
buf - vlcp->
pos) = tmp;
353 *(vlcp->
buf - vlcp->
pos) = tmp;
360 tmp = vlcp->
tmp & 0xFF;
361 *(vlcp->
buf - vlcp->
pos) = tmp;
381 *(vlcp->
buf - vlcp->
pos) = 0x7f;
387 melp->tmp = melp->tmp << melp->remaining_bits;
388 int mel_mask = (0xFF << melp->remaining_bits) & 0xFF;
389 int vlc_mask = 0xFF >> (8 - vlcp->
used_bits);
390 if ((mel_mask | vlc_mask) == 0)
393 if (melp->pos >= melp->buf_size)
394 OJPH_ERROR(0x00020003,
"mel encoder's buffer is full");
396 int fuse = melp->tmp | vlcp_tmp;
397 if ( ( ((fuse ^ melp->tmp) & mel_mask)
398 | ((fuse ^ vlcp_tmp) & vlc_mask) ) == 0
399 && (fuse != 0xFF) && vlcp->
pos > 1)
401 melp->buf[melp->pos++] = (
ui8)fuse;
406 OJPH_ERROR(0x00020004,
"vlc encoder's buffer is full");
407 melp->buf[melp->pos++] = (
ui8)melp->tmp;
408 *(vlcp->
buf - vlcp->
pos) = (
ui8)vlcp_tmp;
433 msp->buf_size = buffer_size;
445 if (msp->pos >= msp->buf_size)
446 OJPH_ERROR(0x00020005,
"magnitude sign encoder's buffer is full");
447 int t =
ojph_min(msp->max_bits - msp->used_bits, cwd_len);
448 msp->tmp |= ((
ui32)(cwd & ((1U << t) - 1))) << msp->used_bits;
452 if (msp->used_bits >= msp->max_bits)
454 msp->buf[msp->pos++] = (
ui8)msp->tmp;
455 msp->max_bits = (msp->tmp == 0xFF) ? 7 : 8;
468 int t = msp->max_bits - msp->used_bits;
469 msp->tmp |= (0xFF & ((1U << t) - 1)) << msp->used_bits;
471 if (msp->tmp != 0xFF)
473 if (msp->pos >= msp->buf_size)
474 OJPH_ERROR(0x00020006,
"magnitude sign encoder's buffer is full");
475 msp->buf[msp->pos++] = (
ui8)msp->tmp;
478 else if (msp->max_bits == 7)
482#define ZERO _mm256_setzero_si256()
483#define ONE _mm256_set1_epi32(1)
488 v = _mm256_andnot_si256(_mm256_srli_epi32(v, 8), v);
490 v = _mm256_castps_si256(_mm256_cvtepi32_ps(v));
491 v = _mm256_srli_epi32(v, 23);
492 v = _mm256_subs_epu16(_mm256_set1_epi32(158), v);
493 v = _mm256_min_epi16(v, _mm256_set1_epi32(32));
499 return _mm256_xor_si256(_mm256_cmpeq_epi32(v, v2), _mm256_set1_epi32((int32_t)0xffffffff));
503 __m256i *eq_vec, __m256i *s_vec,
504 __m256i &rho_vec, __m256i &e_qmax_vec)
511 for (
ui32 i = 0; i < 4; ++i) {
513 val_vec[i] = _mm256_add_epi32(src_vec[i], src_vec[i]);
516 val_vec[i] = _mm256_srli_epi32(val_vec[i], (
int)p);
519 val_vec[i] = _mm256_and_si256(val_vec[i], _mm256_set1_epi32((
int)~1u));
529 val_vec[i] = _mm256_sub_epi32(val_vec[i],
ONE);
531 _eq_vec[i] = _mm256_sub_epi32(_mm256_set1_epi32(32), _eq_vec[i]);
538 val_vec[i] = _mm256_sub_epi32(val_vec[i],
ONE);
539 _s_vec[i] = _mm256_srli_epi32(src_vec[i], 31);
540 _s_vec[i] = _mm256_add_epi32(_s_vec[i], val_vec[i]);
542 _eq_vec[i] = _mm256_and_si256(_eq_vec[i], val_notmask);
543 _s_vec[i] = _mm256_and_si256(_s_vec[i], val_notmask);
544 val_vec[i] = _mm256_srli_epi32(val_notmask, 31);
548 const __m256i idx = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0);
562 for (
ui32 i = 0; i < 2; ++i) {
563 tmp1 = _mm256_permutevar8x32_epi32(_eq_vec[0 + i], idx);
564 tmp2 = _mm256_permutevar8x32_epi32(_eq_vec[2 + i], idx);
565 eq_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
566 eq_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
568 tmp1 = _mm256_permutevar8x32_epi32(_s_vec[0 + i], idx);
569 tmp2 = _mm256_permutevar8x32_epi32(_s_vec[2 + i], idx);
570 s_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
571 s_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
573 tmp1 = _mm256_permutevar8x32_epi32(val_vec[0 + i], idx);
574 tmp2 = _mm256_permutevar8x32_epi32(val_vec[2 + i], idx);
575 _rho_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
576 _rho_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
579 e_qmax_vec = _mm256_max_epi32(eq_vec[0], eq_vec[1]);
580 e_qmax_vec = _mm256_max_epi32(e_qmax_vec, eq_vec[2]);
581 e_qmax_vec = _mm256_max_epi32(e_qmax_vec, eq_vec[3]);
582 _rho_vec[1] = _mm256_slli_epi32(_rho_vec[1], 1);
583 _rho_vec[2] = _mm256_slli_epi32(_rho_vec[2], 2);
584 _rho_vec[3] = _mm256_slli_epi32(_rho_vec[3], 3);
585 rho_vec = _mm256_or_si256(_rho_vec[0], _rho_vec[1]);
586 rho_vec = _mm256_or_si256(rho_vec, _rho_vec[2]);
587 rho_vec = _mm256_or_si256(rho_vec, _rho_vec[3]);
605 __m256i tmp1 = _mm256_unpacklo_epi32(matrix[0], matrix[1]);
606 __m256i tmp2 = _mm256_unpacklo_epi32(matrix[2], matrix[3]);
607 __m256i tmp3 = _mm256_unpackhi_epi32(matrix[0], matrix[1]);
608 __m256i tmp4 = _mm256_unpackhi_epi32(matrix[2], matrix[3]);
610 matrix[0] = _mm256_unpacklo_epi64(tmp1, tmp2);
611 matrix[1] = _mm256_unpacklo_epi64(tmp3, tmp4);
612 matrix[2] = _mm256_unpackhi_epi64(tmp1, tmp2);
613 matrix[3] = _mm256_unpackhi_epi64(tmp3, tmp4);
615 tmp1 = _mm256_permute2x128_si256(matrix[0], matrix[2], 0x20);
616 matrix[2] = _mm256_permute2x128_si256(matrix[0], matrix[2], 0x31);
619 tmp1 = _mm256_permute2x128_si256(matrix[1], matrix[3], 0x20);
620 matrix[3] = _mm256_permute2x128_si256(matrix[1], matrix[3], 0x31);
634 auto tmp = _mm256_and_si256(tuple_vec,
ONE);
635 tmp = _mm256_sub_epi32(uq_vec, tmp);
636 auto tmp1 = _mm256_and_si256(rho_vec,
ONE);
638 m_vec[0] = _mm256_and_si256(mask, tmp);
641 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(2));
642 tmp = _mm256_srli_epi32(tmp, 1);
643 tmp = _mm256_sub_epi32(uq_vec, tmp);
644 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(2));
646 m_vec[1] = _mm256_and_si256(mask, tmp);
649 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(4));
650 tmp = _mm256_srli_epi32(tmp, 2);
651 tmp = _mm256_sub_epi32(uq_vec, tmp);
652 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(4));
654 m_vec[2] = _mm256_and_si256(mask, tmp);
657 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(8));
658 tmp = _mm256_srli_epi32(tmp, 3);
659 tmp = _mm256_sub_epi32(uq_vec, tmp);
660 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(8));
662 m_vec[3] = _mm256_and_si256(mask, tmp);
684 for (
ui32 i = 0; i < 4; ++i) {
688 _mm256_storeu_si256((__m256i *)cwd_len, m_vec[i]);
689 tmp = _mm256_sllv_epi32(
ONE, m_vec[i]);
690 tmp = _mm256_sub_epi32(tmp,
ONE);
691 tmp = _mm256_and_si256(tmp, s_vec[i]);
692 _mm256_storeu_si256((__m256i*)cwd, tmp);
694 for (
ui32 j = 0; j < 4; ++j) {
697 _cwd_len = cwd_len[idx];
698 _cwd |= ((
ui64)cwd[idx + 1]) << _cwd_len;
699 _cwd_len += cwd_len[idx + 1];
715 auto u_q_mask = _mm256_cmpgt_epi32(u_q_vec,
ZERO);
717 auto mask = _mm256_cmpeq_epi32(eq_vec[0], e_qmax_vec);
718 auto eps_vec = _mm256_srli_epi32(mask, 31);
720 mask = _mm256_cmpeq_epi32(eq_vec[1], e_qmax_vec);
721 auto tmp = _mm256_srli_epi32(mask, 31);
722 tmp = _mm256_slli_epi32(tmp, 1);
723 eps_vec = _mm256_or_si256(eps_vec, tmp);
725 mask = _mm256_cmpeq_epi32(eq_vec[2], e_qmax_vec);
726 tmp = _mm256_srli_epi32(mask, 31);
727 tmp = _mm256_slli_epi32(tmp, 2);
728 eps_vec = _mm256_or_si256(eps_vec, tmp);
730 mask = _mm256_cmpeq_epi32(eq_vec[3], e_qmax_vec);
731 tmp = _mm256_srli_epi32(mask, 31);
732 tmp = _mm256_slli_epi32(tmp, 3);
733 eps_vec = _mm256_or_si256(eps_vec, tmp);
735 return _mm256_and_si256(u_q_mask, eps_vec);
739 __m256i *eq_vec, __m256i *e_val_vec,
740 const __m256i left_shift)
746 auto tmp = _mm256_permutevar8x32_epi32(eq_vec[3], left_shift);
747 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(prev_e_val_vec)), 0);
748 prev_e_val_vec = _mm256_insert_epi32(
ZERO, _mm256_extract_epi32(eq_vec[3], 7), 0);
749 e_val_vec[x] = _mm256_max_epi32(eq_vec[1], tmp);
754 __m256i &rho_vec, __m256i *cx_val_vec,
755 const __m256i left_shift)
761 auto tmp = _mm256_permutevar8x32_epi32(rho_vec, left_shift);
762 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(prev_cx_val_vec)), 0);
763 prev_cx_val_vec = _mm256_insert_epi32(
ZERO, _mm256_extract_epi32(rho_vec, 7), 0);
765 tmp = _mm256_and_si256(tmp, _mm256_set1_epi32(8));
766 tmp = _mm256_srli_epi32(tmp, 3);
768 auto tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(2));
769 tmp1 = _mm256_srli_epi32(tmp1, 1);
770 cx_val_vec[x] = _mm256_or_si256(tmp, tmp1);
773static __m256i
cal_tuple(__m256i &cq_vec, __m256i &rho_vec,
774 __m256i &eps_vec,
ui32 *vlc_tbl)
777 auto tmp = _mm256_slli_epi32(cq_vec, 8);
778 auto tmp1 = _mm256_slli_epi32(rho_vec, 4);
779 tmp = _mm256_add_epi32(tmp, tmp1);
780 tmp = _mm256_add_epi32(tmp, eps_vec);
781 return _mm256_i32gather_epi32((
const int *)vlc_tbl, tmp, 4);
785 const __m256i right_shift)
792 auto tmp = _mm256_srli_epi32(rho_vec, 1);
793 auto tmp1 = _mm256_and_si256(rho_vec,
ONE);
794 return _mm256_or_si256(tmp, tmp1);
798 const __m256i right_shift)
802 auto lcxp1_vec = _mm256_permutevar8x32_epi32(cx_val_vec[x], right_shift);
803 auto tmp = _mm256_permutevar8x32_epi32(lcxp1_vec, right_shift);
805 tmp = _mm256_insert_epi64(tmp, _mm_cvtsi128_si64(_mm256_castsi256_si128(cx_val_vec[x + 1])), 3);
806 tmp = _mm256_slli_epi32(tmp, 2);
807 auto tmp1 = _mm256_insert_epi32(lcxp1_vec, _mm_cvtsi128_si32(_mm256_castsi256_si128(cx_val_vec[x + 1])), 7);
808 tmp = _mm256_add_epi32(tmp1, tmp);
810 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(4));
811 tmp1 = _mm256_srli_epi32(tmp1, 1);
812 tmp = _mm256_or_si256(tmp, tmp1);
814 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(8));
815 tmp1 = _mm256_srli_epi32(tmp1, 2);
817 return _mm256_or_si256(tmp, tmp1);
823 __m256i &rho_vec, __m256i u_q_vec,
ui32 ignore,
824 const __m256i right_shift)
826 int32_t mel_need_encode[8];
827 int32_t mel_need_encode2[8];
832 _mm256_storeu_si256((__m256i *)mel_need_encode, _mm256_cmpeq_epi32(cq_vec,
ZERO));
838 auto tmp = _mm256_permutevar8x32_epi32(u_q_vec, right_shift);
839 auto tmp1 = _mm256_min_epi32(u_q_vec, tmp);
840 _mm256_storeu_si256((__m256i*)mel_bit2, _mm256_srli_epi32(_mm256_cmpgt_epi32(tmp1, _mm256_set1_epi32(2)), 31));
843 auto need_encode2 = _mm256_cmpgt_epi32(u_q_vec,
ZERO);
844 _mm256_storeu_si256((__m256i*)mel_need_encode2, _mm256_and_si256(need_encode2, _mm256_cmpgt_epi32(tmp,
ZERO)));
846 ui32 i_max = 8 - (ignore / 2);
848 for (
ui32 i = 0; i < i_max; i += 2) {
849 if (mel_need_encode[i]) {
854 if (mel_need_encode[i + 1]) {
859 if (mel_need_encode2[i]) {
866 __m256i &rho_vec, __m256i u_q_vec,
ui32 ignore,
867 const __m256i right_shift)
871 int32_t mel_need_encode[8];
876 _mm256_storeu_si256((__m256i*)mel_need_encode, _mm256_cmpeq_epi32(cq_vec,
ZERO));
881 ui32 i_max = 8 - (ignore / 2);
883 for (
ui32 i = 0; i < i_max; ++i) {
884 if (mel_need_encode[i]) {
891 __m256i,
ui32,
const __m256i);
896 ui32 i_max = 8 - (ignore / 2);
898 for (
ui32 i = 0; i < i_max; i += 2) {
900 ui32 val = tuple[i + 0] >> 4;
901 int size = tuple[i + 0] & 7;
905 val |= (tuple[i + 1] >> 4) <<
size;
906 size += tuple[i + 1] & 7;
909 if (u_q[i] > 2 && u_q[i + 1] > 2) {
926 }
else if (u_q[i] > 2 && u_q[i + 1] > 0) {
932 val |= (u_q[i + 1] - 1) <<
size;
964 ui32 i_max = 8 - (ignore / 2);
966 for (
ui32 i = 0; i < i_max; i += 2) {
968 ui32 val = tuple[i + 0] >> 4;
969 int size = tuple[i + 0] & 7;
973 val |= (tuple[i + 1] >> 4) <<
size;
974 size += tuple[i + 1] & 7;
1007 ui32 width = (_width + 15) & ~15u;
1008 ui32 ignore = width - _width;
1009 const int ms_size = (16384 * 16 + 14) / 15;
1010 const int mel_vlc_size = 3072;
1011 const int mel_size = 192;
1012 const int vlc_size = mel_vlc_size - mel_size;
1014 ui8 ms_buf[ms_size];
1015 ui8 mel_vlc_buf[mel_vlc_size];
1016 ui8 *mel_buf = mel_vlc_buf;
1017 ui8 *vlc_buf = mel_vlc_buf + mel_size;
1024 ms_init(&ms, ms_size, ms_buf);
1026 const ui32 p = 30 - missing_msbs;
1037 const __m256i right_shift = _mm256_set_epi32(
1038 0, 7, 6, 5, 4, 3, 2, 1
1041 const __m256i left_shift = _mm256_set_epi32(
1042 6, 5, 4, 3, 2, 1, 0, 7
1045 ui32 n_loop = (width + 15) / 16;
1047 __m256i e_val_vec[65];
1049 e_val_vec[i] =
ZERO;
1051 __m256i prev_e_val_vec =
ZERO;
1053 __m256i cx_val_vec[65];
1054 __m256i prev_cx_val_vec =
ZERO;
1068 for (
ui32 y = 0; y < height; y += 2)
1070 e_val_vec[n_loop] = prev_e_val_vec;
1072 __m256i tmp = _mm256_and_si256(prev_cx_val_vec, _mm256_set1_epi32(8));
1073 cx_val_vec[n_loop] = _mm256_srli_epi32(tmp, 3);
1075 prev_e_val_vec =
ZERO;
1076 prev_cx_val_vec =
ZERO;
1078 ui32 *sp = buf + y * stride;
1081 for (
ui32 x = 0; x < n_loop; ++x) {
1084 if ((x == (n_loop - 1)) && (_width % 16)) {
1085 ui32 tmp_buf[16] = { 0 };
1086 memcpy(tmp_buf, sp, (_width % 16) *
sizeof(
ui32));
1087 src_vec[0] = _mm256_loadu_si256((__m256i*)(tmp_buf));
1088 src_vec[2] = _mm256_loadu_si256((__m256i*)(tmp_buf + 8));
1089 if (y + 1 < height) {
1090 memcpy(tmp_buf, sp + stride, (_width % 16) *
sizeof(
ui32));
1091 src_vec[1] = _mm256_loadu_si256((__m256i*)(tmp_buf));
1092 src_vec[3] = _mm256_loadu_si256((__m256i*)(tmp_buf + 8));
1100 src_vec[0] = _mm256_loadu_si256((__m256i*)(sp));
1101 src_vec[2] = _mm256_loadu_si256((__m256i*)(sp + 8));
1103 if (y + 1 < height) {
1104 src_vec[1] = _mm256_loadu_si256((__m256i*)(sp + stride));
1105 src_vec[3] = _mm256_loadu_si256((__m256i*)(sp + 8 + stride));
1120 __m256i rho_vec, e_qmax_vec;
1121 proc_pixel(src_vec, p, eq_vec, s_vec, rho_vec, e_qmax_vec);
1124 tmp = _mm256_permutevar8x32_epi32(e_val_vec[x], right_shift);
1125 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(e_val_vec[x + 1])), 7);
1127 auto max_e_vec = _mm256_max_epi32(tmp, e_val_vec[x]);
1128 max_e_vec = _mm256_sub_epi32(max_e_vec,
ONE);
1131 tmp = _mm256_max_epi32(max_e_vec,
ONE);
1132 __m256i tmp1 = _mm256_sub_epi32(rho_vec,
ONE);
1133 tmp1 = _mm256_and_si256(rho_vec, tmp1);
1135 auto cmp = _mm256_cmpeq_epi32(tmp1,
ZERO);
1136 auto kappa_vec1_ = _mm256_and_si256(cmp,
ONE);
1137 auto kappa_vec2_ = _mm256_and_si256(_mm256_xor_si256(cmp, _mm256_set1_epi32((int32_t)0xffffffff)), tmp);
1138 const __m256i kappa_vec = _mm256_max_epi32(kappa_vec1_, kappa_vec2_);
1143 tmp = proc_cq(x, cx_val_vec, rho_vec, right_shift);
1145 auto cq_vec = _mm256_permutevar8x32_epi32(tmp, left_shift);
1146 cq_vec = _mm256_insert_epi32(cq_vec, prev_cq, 0);
1147 prev_cq = (
ui32)_mm256_extract_epi32(tmp, 7);
1149 update_lep(x, prev_e_val_vec, eq_vec, e_val_vec, left_shift);
1150 update_lcxp(x, prev_cx_val_vec, rho_vec, cx_val_vec, left_shift);
1154 auto uq_vec = _mm256_max_epi32(kappa_vec, e_qmax_vec);
1155 auto u_q_vec = _mm256_sub_epi32(uq_vec, kappa_vec);
1157 auto eps_vec =
cal_eps_vec(eq_vec, u_q_vec, e_qmax_vec);
1158 __m256i tuple_vec =
cal_tuple(cq_vec, rho_vec, eps_vec, vlc_tbl);
1159 ui32 _ignore = ((n_loop - 1) == x) ? ignore : 0;
1161 proc_mel_encode(&mel, cq_vec, rho_vec, u_q_vec, _ignore,
1174 tuple_vec = _mm256_srli_epi32(tuple_vec, 4);
1175 _mm256_storeu_si256((__m256i*)tuple, tuple_vec);
1176 _mm256_storeu_si256((__m256i*)u_q, u_q_vec);
1178 proc_vlc_encode(&vlc, tuple, u_q, _ignore);
1181 tmp = _mm256_permutevar8x32_epi32(cx_val_vec[0], right_shift);
1182 tmp = _mm256_slli_epi32(tmp, 2);
1183 tmp = _mm256_add_epi32(tmp, cx_val_vec[0]);
1184 prev_cq = (
ui32)_mm_cvtsi128_si32(_mm256_castsi256_si128(tmp));
1196 lengths[0] = mel.pos + vlc.
pos + ms.pos;
1198 memcpy(coded->
buf, ms.buf, ms.pos);
1199 memcpy(coded->
buf + ms.pos, mel.buf, mel.pos);
1200 memcpy(coded->
buf + ms.pos + mel.pos, vlc.
buf - vlc.
pos + 1, vlc.
pos);
1203 ui32 num_bytes = mel.pos + vlc.
pos;
1204 coded->
buf[lengths[0]-1] = (
ui8)(num_bytes >> 4);
1205 coded->
buf[lengths[0]-2] = coded->
buf[lengths[0]-2] & 0xF0;
1206 coded->
buf[lengths[0]-2] =
1207 (
ui8)(coded->
buf[lengths[0]-2] | (num_bytes & 0xF));
void get_buffer(ui32 needed_bytes, coded_lists *&p)
static bool uvlc_init_tables()
Initializes uvlc_tbl0 and uvlc_tbl1 tables.
static bool vlc_init_tables()
Initializes vlc_tbl0 and vlc_tbl1 tables, from table0.h and table1.h.
ui16 vlc_tbl0[1024]
vlc_tbl0 contains decoding information for initial row of quads
ui16 vlc_tbl1[1024]
vlc_tbl1 contains decoding information for non-initial row of quads
void(*)(vlc_struct_avx2 *, ui32 *, ui32 *, ui32) fn_proc_vlc_encode
static void ms_terminate(ms_struct *msp)
static int ulvc_cwd_suf_len[33]
void(*)(mel_struct *, __m256i &, __m256i &, __m256i, ui32, const __m256i) fn_proc_mel_encode
static __m256i proc_cq1(ui32 x, __m256i *cx_val_vec, __m256i &rho_vec, const __m256i right_shift)
static void vlc_encode(vlc_struct *vlcp, int cwd, int cwd_len)
static void proc_pixel(__m256i *src_vec, ui32 p, __m256i *eq_vec, __m256i *s_vec, __m256i &rho_vec, __m256i &e_qmax_vec)
static ui32 ulvc_cwd_suf[33]
static void proc_ms_encode(ms_struct *msp, __m256i &tuple_vec, __m256i &uq_vec, __m256i &rho_vec, __m256i *s_vec)
static void terminate_mel_vlc(mel_struct *melp, vlc_struct *vlcp)
static void update_lep(ui32 x, __m256i &prev_e_val_vec, __m256i *eq_vec, __m256i *e_val_vec, const __m256i left_shift)
void ojph_encode_codeblock_avx2(ui32 *buf, ui32 missing_msbs, ui32 num_passes, ui32 width, ui32 height, ui32 stride, ui32 *lengths, ojph::mem_elastic_allocator *elastic, ojph::coded_lists *&coded)
static __m256i proc_cq2(ui32 x, __m256i *cx_val_vec, __m256i &rho_vec, const __m256i right_shift)
static void mel_init(dec_mel_st *melp, ui8 *bbuf, int lcup, int scup)
Initiates a dec_mel_st structure for MEL decoding and reads some bytes in order to get the read addre...
static __m256i cal_eps_vec(__m256i *eq_vec, __m256i &u_q_vec, __m256i &e_qmax_vec)
static void rotate_matrix(__m256i *matrix)
static ui32 ulvc_cwd_pre[33]
static void ms_init(ms_struct *msp, ui32 buffer_size, ui8 *data)
static void ms_encode(ms_struct *msp, ui32 cwd, int cwd_len)
__m256i(*)(ui32, __m256i *, __m256i &, const __m256i) fn_proc_cq
static int ulvc_cwd_pre_len[33]
static void proc_mel_encode1(mel_struct *melp, __m256i &cq_vec, __m256i &rho_vec, __m256i u_q_vec, ui32 ignore, const __m256i right_shift)
static void proc_vlc_encode2(vlc_struct_avx2 *vlcp, ui32 *tuple, ui32 *u_q, ui32 ignore)
static void mel_encode(mel_struct *melp, bool bit)
static void mel_emit_bit(mel_struct *melp, int v)
static void update_lcxp(ui32 x, __m256i &prev_cx_val_vec, __m256i &rho_vec, __m256i *cx_val_vec, const __m256i left_shift)
__m256i avx2_cmpneq_epi32(__m256i v, __m256i v2)
__m256i avx2_lzcnt_epi32(__m256i v)
static bool tables_initialized
bool initialize_block_encoder_tables_avx2()
static void vlc_init(vlc_struct *vlcp, ui32 buffer_size, ui8 *data)
static __m256i cal_tuple(__m256i &cq_vec, __m256i &rho_vec, __m256i &eps_vec, ui32 *vlc_tbl)
static void proc_mel_encode2(mel_struct *melp, __m256i &cq_vec, __m256i &rho_vec, __m256i u_q_vec, ui32 ignore, const __m256i right_shift)
static void proc_vlc_encode1(vlc_struct_avx2 *vlcp, ui32 *tuple, ui32 *u_q, ui32 ignore)
static ui32 population_count(ui32 val)
#define OJPH_ERROR(t,...)
bool last_greater_than_8F