+// Note: table strongly recommend to be aligned by sizeof(uint16_vec8_t).
+// This is sizeof(uint16) * 8, some compilers may require to align 16bytes(128)
+// when using SIMD128 -- 20181105 K.O
+void DLL_PREFIX PrepareBitTransTableUint16(_bit_trans_table_t *tbl, uint16_t on_val, uint16_t off_val)
+{
+ if(tbl == NULL) return;
+ for(uint16_t i = 0; i < 256; i++) {
+ uint16_t n = i;
+ for(int j = 0; j < 8; j++) {
+ tbl->plane_table[i].w[j] = ((n & 0x80) == 0) ? off_val : on_val;
+ n <<= 1;
+ }
+ }
+}
+
+// Note: table strongly recommend to be aligned by sizeof(scrntype_vec8_t).
+// This is sizeof(uint16) * 8, some compilers may require to align 32bytes(256) or 16bytes(128)
+// when using SIMD256 or SIMD128 -- 20181105 K.O
+void DLL_PREFIX PrepareBitTransTableScrnType(_bit_trans_table_scrn_t *tbl, scrntype_t on_val, scrntype_t off_val)
+{
+ if(tbl == NULL) return;
+ for(uint16_t i = 0; i < 256; i++) {
+ uint16_t n = i;
+ for(int j = 0; j < 8; j++) {
+ tbl->plane_table[i].w[j] = ((n & 0x80) == 0) ? off_val : on_val;
+ n <<= 1;
+ }
+ }
+}
+
+// Prepare reverse byte-order table(s).
+void DLL_PREFIX PrepareReverseBitTransTableUint16(_bit_trans_table_t *tbl, uint16_t on_val, uint16_t off_val)
+{
+ if(tbl == NULL) return;
+ for(uint16_t i = 0; i < 256; i++) {
+ uint16_t n = i;
+ for(int j = 0; j < 8; j++) {
+ tbl->plane_table[i].w[j] = ((n & 0x01) == 0) ? off_val : on_val;
+ n >>= 1;
+ }
+ }
+}
+
+void DLL_PREFIX PrepareReverseBitTransTableScrnType(_bit_trans_table_scrn_t *tbl, scrntype_t on_val, scrntype_t off_val)
+{
+ if(tbl == NULL) return;
+ for(uint16_t i = 0; i < 256; i++) {
+ uint16_t n = i;
+ for(int j = 0; j < 8; j++) {
+ tbl->plane_table[i].w[j] = ((n & 0x01) == 0) ? off_val : on_val;
+ n >>= 1;
+ }
+ }
+}
+
+// With _bit_trans_table_scrn_t.
+void DLL_PREFIX ConvertByteToPackedPixelByColorTable2(uint8_t *src, scrntype_t* dst, int bytes, _bit_trans_table_scrn_t *tbl, scrntype_t *on_color_table, scrntype_t* off_color_table)
+{
+
+ __DECL_ALIGNED(32) scrntype_vec8_t tmpd;
+ __DECL_ALIGNED(32) scrntype_vec8_t tmpdd;
+ __DECL_ALIGNED(32) scrntype_vec8_t colors;
+ scrntype_vec8_t* vt = (scrntype_vec8_t*)__builtin_assume_aligned(&(tbl->plane_table[0]), sizeof(scrntype_vec8_t));
+
+ uintptr_t disalign = (uintptr_t)dst;
+ disalign = disalign & (sizeof(scrntype_vec8_t) - 1); //Is align by 128bits or 256bytes?
+ if(disalign == 0) {
+ // Yes.
+ scrntype_vec8_t *vdst = (scrntype_vec8_t*)__builtin_assume_aligned(dst, sizeof(scrntype_vec8_t));
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpdd.v = ~tmpd.v;
+
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ colors.w[j] = on_color_table[j];
+ }
+ tmpd.v = tmpd.v & colors.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ colors.w[j] = off_color_table[j];
+ }
+ tmpdd.v = tmpdd.v & colors.v;
+ vdst->v = (tmpd.v | tmpdd.v);
+ off_color_table += 8;
+ on_color_table += 8;
+ vdst++;
+ }
+ } else {
+ // Sorry, not aligned.
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpdd.v = ~tmpd.v;
+
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ colors.w[j] = on_color_table[j];
+ }
+ tmpd.v = tmpd.v & colors.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ colors.w[j] = off_color_table[j];
+ }
+ tmpdd.v = tmpdd.v & colors.v;
+ tmpdd.v = tmpdd.v | tmpd.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ dst[j] = tmpdd.w[j];
+ }
+ off_color_table += 8;
+ on_color_table += 8;
+ dst += 8;
+ }
+ }
+}
+
+
+// Convert uint8_t[] ed VRAM to uint16_t[] mono pixel pattern.
+// You must set table to "ON_VALUE" : "OFF_VALUE" via PrepareBitTransTableUint16().
+// -- 20181105 K.O
+void DLL_PREFIX ConvertByteToSparceUint16(uint8_t *src, uint16_t* dst, int bytes, _bit_trans_table_t *tbl, uint16_t mask)
+{
+
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ uint16_vec8_t* vt = (uint16_vec8_t*)__builtin_assume_aligned(&(tbl->plane_table[0]), sizeof(uint16_vec8_t));
+
+ __DECL_ALIGNED(16) uint16_vec8_t __masks;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ __masks.w[i] = mask;
+ }
+ uintptr_t disalign = (uintptr_t)dst;
+ disalign = disalign & 0x0f; //Is align by 128bits?
+ if(disalign == 0) {
+ // Yes.
+ uint16_vec8_t *vdst = (uint16_vec8_t*)__builtin_assume_aligned(dst, sizeof(uint16_vec8_t));
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpd.v = tmpd.v & __masks.v;
+ vdst->v = tmpd.v;
+ vdst++;
+ }
+ } else {
+ // Sorry, not aligned.
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpd.v = tmpd.v & __masks.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ dst[j] = tmpd.w[j];
+ }
+ dst += 8;
+ }
+ }
+}
+
+// Convert uint8_t[] ed VRAM to uint8_t[] mono pixel pattern.
+// You must set table to "ON_VALUE" : "OFF_VALUE" via PrepareBitTransTableUint16().
+// -- 20181105 K.O
+void DLL_PREFIX ConvertByteToSparceUint8(uint8_t *src, uint16_t* dst, int bytes, _bit_trans_table_t *tbl, uint16_t mask)
+{
+
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ uint16_vec8_t* vt = (uint16_vec8_t*)__builtin_assume_aligned(&(tbl->plane_table[0]), sizeof(uint16_vec8_t));
+
+ __DECL_ALIGNED(16) uint16_vec8_t __masks;
+ __DECL_ALIGNED(16) uint8_vec8_t tmpdd;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ __masks.w[i] = mask;
+ }
+ uintptr_t disalign = (uintptr_t)dst;
+ disalign = disalign & 0x07; //Is align by 128bits?
+ if(disalign == 0) {
+ // Yes.
+ uint8_vec8_t *vdst = (uint8_vec8_t*)__builtin_assume_aligned(dst, sizeof(uint8_vec8_t));
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpd.v = tmpd.v & __masks.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ tmpdd.w[j] = (uint8_t)(tmpd.w[j]);
+ }
+ vdst->v = tmpdd.v;
+ vdst++;
+ }
+ } else {
+ // Sorry, not aligned.
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+ tmpd.v = tmpd.v & __masks.v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ dst[j] = (uint8_t)(tmpd.w[j]);
+ }
+ dst += 8;
+ }
+ }
+}
+
+
+void DLL_PREFIX ConvertByteToPackedPixelByColorTable(uint8_t *src, scrntype_t* dst, int bytes, _bit_trans_table_t *tbl, scrntype_t *on_color_table, scrntype_t* off_color_table)
+{
+
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ __DECL_ALIGNED(32) scrntype_vec8_t tmpdd;
+ uint16_vec8_t* vt = (uint16_vec8_t*)__builtin_assume_aligned(&(tbl->plane_table[0]), sizeof(uint16_vec8_t));
+
+ uintptr_t disalign = (uintptr_t)dst;
+ disalign = disalign & 0x0f; //Is align by 128bits?
+ if(disalign == 0) {
+ // Yes.
+ scrntype_vec8_t *vdst = (scrntype_vec8_t*)__builtin_assume_aligned(dst, sizeof(scrntype_vec8_t));
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ tmpdd.w[j] = (tmpd.w[j] == 0) ? off_color_table[j] : on_color_table[j];
+ }
+ vdst->v = tmpdd.v;
+ off_color_table += 8;
+ on_color_table += 8;
+ vdst++;
+ }
+ } else {
+ // Sorry, not aligned.
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < bytes; i++) {
+ tmpd.v = vt[src[i]].v;
+__DECL_VECTORIZED_LOOP
+ for(int j = 0; j < 8; j++) {
+ dst[j] = (tmpd.w[j] == 0) ? off_color_table[j] : on_color_table[j];
+ }
+ off_color_table += 8;
+ on_color_table += 8;
+ dst += 8;
+ }
+ }
+}
+
+
+void DLL_PREFIX Render8Colors_Line(_render_command_data_t *src, scrntype_t *dst, scrntype_t* dst2, bool scan_line)
+{
+ if(src == NULL) return;
+ if(dst == NULL) return;
+
+//__DECL_VECTORIZED_LOOP
+// for(int i = 0; i < 3; i++) {
+// if(src->bit_trans_table[i] == NULL) return;
+// if(src->data[i] == NULL) return;
+// }
+ scrntype_t dummy_palette[8]; // fallback
+ scrntype_t *palette = src->palette;
+
+ uint16_vec8_t *vpb = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[0], sizeof(uint16_vec8_t));
+ uint16_vec8_t *vpr = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[1], sizeof(uint16_vec8_t));
+ uint16_vec8_t *vpg = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[2], sizeof(uint16_vec8_t));
+
+ uint32_t x;
+ __DECL_ALIGNED(16) uint32_t offset[4] = {0};
+ __DECL_ALIGNED(16) uint32_t beginaddr[4] = {0};
+ uint32_t mask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 3; i++) {
+ offset[i] = src->voffset[i];
+ }
+ if(palette == NULL) {
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ dummy_palette[i] = RGB_COLOR(((i & 2) << 5) | 0x1f,
+ ((i & 4) << 5) | 0x1f,
+ ((i & 1) << 5) | 0x1f);
+ }
+ palette = dummy_palette;
+ }
+ uint8_t *bp = &(src->data[0][src->baseaddress[0]]);
+ uint8_t *rp = &(src->data[1][src->baseaddress[1]]);
+ uint8_t *gp = &(src->data[2][src->baseaddress[2]]);
+
+ uint8_t r, g, b;
+ int shift = src->shift;
+ const bool is_render[3] = { src->is_render[0], src->is_render[1], src->is_render[2] };
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ __DECL_ALIGNED(32) scrntype_vec8_t tmp_dd;
+ scrntype_vec8_t* vdp = (scrntype_vec8_t*)__builtin_assume_aligned(dst, sizeof(scrntype_vec8_t));
+
+ x = src->begin_pos;
+ uint32_t n = x;
+ if(dst2 == NULL) {
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ b = (is_render[0]) ? bp[(offset[0] + n) & mask] : 0;
+ r = (is_render[1]) ? rp[(offset[1] + n) & mask] : 0;
+ g = (is_render[2]) ? gp[(offset[2] + n) & mask] : 0;
+ tmpd.v = vpb[b].v;
+ tmpd.v = tmpd.v | vpr[r].v;
+ tmpd.v = tmpd.v | vpg[g].v;
+ tmpd.v = tmpd.v >> shift;
+ n = (n + 1) & offsetmask;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ }
+ } else {
+#if defined(_RGB555) || defined(_RGBA565)
+ static const int shift_factor = 2;
+#else // 24bit
+ static const int shift_factor = 3;
+#endif
+ __DECL_ALIGNED(32) scrntype_vec8_t sline;
+ scrntype_vec8_t* vdp2 = (scrntype_vec8_t*)__builtin_assume_aligned(dst2, sizeof(scrntype_vec8_t));
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ sline.w[i] = (scrntype_t)RGBA_COLOR(31, 31, 31, 255);
+ }
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ b = (is_render[0]) ? bp[(offset[0] + n) & mask] : 0;
+ r = (is_render[1]) ? rp[(offset[1] + n) & mask] : 0;
+ g = (is_render[2]) ? gp[(offset[2] + n) & mask] : 0;
+ tmpd.v = vpb[b].v;
+ tmpd.v = tmpd.v | vpr[r].v;
+ tmpd.v = tmpd.v | vpg[g].v;
+ tmpd.v = tmpd.v >> shift;
+ n = (n + 1) & offsetmask;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ if(scan_line) {
+ tmp_dd.v = tmp_dd.v >> shift_factor;
+ tmp_dd.v = tmp_dd.v & sline.v;
+ }
+ vdp2[xx].v = tmp_dd.v;
+ }
+ }
+}
+
+void DLL_PREFIX Render16Colors_Line(_render_command_data_t *src, scrntype_t *dst, scrntype_t* dst2, bool scan_line)
+{
+ if(src == NULL) return;
+ if(dst == NULL) return;
+
+//__DECL_VECTORIZED_LOOP
+// for(int i = 0; i < 3; i++) {
+// if(src->bit_trans_table[i] == NULL) return;
+// if(src->data[i] == NULL) return;
+// }
+ scrntype_t dummy_palette[16]; // fallback
+ scrntype_t *palette = src->palette;
+
+ uint16_vec8_t *vpb = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[0], sizeof(uint16_vec8_t));
+ uint16_vec8_t *vpr = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[1], sizeof(uint16_vec8_t));
+ uint16_vec8_t *vpg = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[2], sizeof(uint16_vec8_t));
+ uint16_vec8_t *vpn = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[3], sizeof(uint16_vec8_t));
+
+ uint32_t x;
+ __DECL_ALIGNED(16) uint32_t offset[4];
+ __DECL_ALIGNED(16) uint32_t beginaddr[4];
+ uint32_t mask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 4; i++) {
+ offset[i] = src->voffset[i];
+ }
+ if(palette == NULL) {
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 16; i++) {
+ dummy_palette[i] = RGB_COLOR((((i & 2) + (i & 8)) << 4) | 0x0f,
+ (((i & 4) + (i & 8)) << 4) | 0x0f,
+ (((i & 1) + (i & 8)) << 4) | 0x0f);
+ }
+ palette = dummy_palette;
+ }
+ uint8_t *bp = &(src->data[0][src->baseaddress[0]]);
+ uint8_t *rp = &(src->data[1][src->baseaddress[1]]);
+ uint8_t *gp = &(src->data[2][src->baseaddress[2]]);
+ uint8_t *np = &(src->data[3][src->baseaddress[3]]);
+
+ uint8_t r, g, b, n;
+ int shift = src->shift;
+ const bool is_render[4] = { src->is_render[0], src->is_render[1], src->is_render[2], src->is_render[3] };
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ __DECL_ALIGNED(32) scrntype_vec8_t tmp_dd;
+ scrntype_vec8_t* vdp = (scrntype_vec8_t*)__builtin_assume_aligned(dst, sizeof(scrntype_vec8_t));
+
+ x = src->begin_pos;
+ uint32_t xn = x;
+ if(dst2 == NULL) {
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ b = (is_render[0]) ? bp[(offset[0] + xn) & mask] : 0;
+ r = (is_render[1]) ? rp[(offset[1] + xn) & mask] : 0;
+ g = (is_render[2]) ? gp[(offset[2] + xn) & mask] : 0;
+ n = (is_render[3]) ? np[(offset[3] + xn) & mask] : 0;
+ tmpd.v = vpb[b].v;
+ tmpd.v = tmpd.v | vpr[r].v;
+ tmpd.v = tmpd.v | vpg[g].v;
+ tmpd.v = tmpd.v | vpn[n].v;
+ tmpd.v = tmpd.v >> shift;
+ xn = (xn + 1) & offsetmask;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ }
+ } else {
+#if defined(_RGB555) || defined(_RGBA565)
+ static const int shift_factor = 2;
+#else // 24bit
+ static const int shift_factor = 3;
+#endif
+ __DECL_ALIGNED(32) scrntype_vec8_t sline;
+ scrntype_vec8_t* vdp2 = (scrntype_vec8_t*)__builtin_assume_aligned(dst2, sizeof(scrntype_vec8_t));
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ sline.w[i] = (scrntype_t)RGBA_COLOR(31, 31, 31, 255);
+ }
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ b = (is_render[0]) ? bp[(offset[0] + xn) & mask] : 0;
+ r = (is_render[1]) ? rp[(offset[1] + xn) & mask] : 0;
+ g = (is_render[2]) ? gp[(offset[2] + xn) & mask] : 0;
+ n = (is_render[3]) ? np[(offset[3] + xn) & mask] : 0;
+ tmpd.v = vpb[b].v;
+ tmpd.v = tmpd.v | vpr[r].v;
+ tmpd.v = tmpd.v | vpg[g].v;
+ tmpd.v = tmpd.v | vpn[n].v;
+ tmpd.v = tmpd.v >> shift;
+ xn = (xn + 1) & offsetmask;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ if(scan_line) {
+ tmp_dd.v = tmp_dd.v >> shift_factor;
+ tmp_dd.v = tmp_dd.v & sline.v;
+ }
+ vdp2[xx].v = tmp_dd.v;
+ }
+ }
+}
+
+// src->palette Must be 2^planes entries.
+void DLL_PREFIX Render2NColors_Line(_render_command_data_t *src, scrntype_t *dst, scrntype_t* dst2, bool scan_line, int planes)
+{
+ if(src == NULL) return;
+ if(dst == NULL) return;
+ if(src->palette == NULL) return;
+ if(planes <= 0) return;
+ if(planes >= 16) planes = 16;
+//__DECL_VECTORIZED_LOOP
+// for(int i = 0; i < 3; i++) {
+// if(src->bit_trans_table[i] == NULL) return;
+// if(src->data[i] == NULL) return;
+// }
+ scrntype_t *palette = src->palette;
+
+ uint16_vec8_t* vp[16];
+ for(int i = 0; i < planes; i++) {
+ vp[i] = (uint16_vec8_t*)__builtin_assume_aligned(src->bit_trans_table[i], sizeof(uint16_vec8_t));
+ }
+
+ uint32_t x;
+ __DECL_ALIGNED(16) uint32_t offset[16];
+ __DECL_ALIGNED(16) uint32_t beginaddr[16];
+ uint32_t mask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ offset[i] = src->voffset[i];
+ }
+ uint8_t *pp[16];
+ for(int i = 0; i < planes; i++) {
+ pp[i] = &(src->data[i][src->baseaddress[i]]);
+ }
+
+ uint8_t d[16];
+ int shift = src->shift;
+ const bool is_render[4] = { src->is_render[0], src->is_render[1], src->is_render[2], src->is_render[3] };
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+ __DECL_ALIGNED(32) scrntype_vec8_t tmp_dd;
+ scrntype_vec8_t* vdp = (scrntype_vec8_t*)__builtin_assume_aligned(dst, sizeof(scrntype_vec8_t));
+
+ x = src->begin_pos;
+ if(dst2 == NULL) {
+ uint32_t n = x;
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ d[0] = (is_render[0]) ? pp[0][(offset[0] + n) & mask] : 0;
+ tmpd.v = vp[0][d[0]].v;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 1; i < planes; i++) {
+ d[i] = (is_render[i]) ? pp[i][(offset[i] + n) & mask] : 0;
+ tmpd.v = tmpd.v | vp[i][d[i]].v;
+ }
+ n = (n + 1) & offsetmask;
+ tmpd.v = tmpd.v >> shift;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ }
+ } else {
+#if defined(_RGB555) || defined(_RGBA565)
+ static const int shift_factor = 2;
+#else // 24bit
+ static const int shift_factor = 3;
+#endif
+ __DECL_ALIGNED(32) scrntype_vec8_t sline;
+ scrntype_vec8_t* vdp2 = (scrntype_vec8_t*)__builtin_assume_aligned(dst2, sizeof(scrntype_vec8_t));
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ sline.w[i] = (scrntype_t)RGBA_COLOR(31, 31, 31, 255);
+ }
+ uint32_t n = x;
+ __DECL_VECTORIZED_LOOP
+ for(uint32_t xx = 0; xx < src->render_width; xx++) {
+ d[0] = (is_render[0]) ? pp[0][(offset[0] + n) & mask] : 0;
+ tmpd.v = vp[0][d[0]].v;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 1; i < planes; i++) {
+ d[i] = (is_render[i]) ? pp[i][(offset[i] + n) & mask] : 0;
+ tmpd.v = tmpd.v | vp[i][d[i]].v;
+ }
+ n = (n + 1) & offsetmask;
+ tmpd.v = tmpd.v >> shift;
+ __DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ tmp_dd.w[i] = palette[tmpd.w[i]];
+ }
+ vdp[xx].v = tmp_dd.v;
+ if(scan_line) {
+ tmp_dd.v = tmp_dd.v >> shift_factor;
+ tmp_dd.v = tmp_dd.v & sline.v;
+ }
+ vdp2[xx].v = tmp_dd.v;
+ }
+ }
+}
+
+void DLL_PREFIX Convert2NColorsToByte_Line(_render_command_data_t *src, uint8_t *dst, int planes)
+{
+ if(planes >= 8) planes = 8;
+ if(planes <= 0) return;
+
+ uint8_t* srcp[8];
+ __DECL_ALIGNED(32) uint32_t offset[8] = {0};
+ __DECL_ALIGNED(16) uint16_vec8_t dat;
+ uint16_vec8_t* bp[8] ;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ bp[i] = (uint16_vec8_t*)__builtin_assume_aligned(&(src->bit_trans_table[i]->plane_table[0]), sizeof(uint16_vec8_t));
+ srcp[i] = &(src->data[i][src->baseaddress[i]]);
+ }
+ uint32_t addrmask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+ int shift = src->shift;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ offset[i] = src->voffset[i];
+ }
+
+ uint32_t noffset = src->begin_pos & offsetmask;
+ uint8_t td[16];
+__DECL_VECTORIZED_LOOP
+ for(int x = 0; x < src->render_width; x++) {
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ td[i] = srcp[i][(noffset + offset[i]) & addrmask];
+ }
+ noffset = (noffset + 1) & offsetmask;
+ dat.v = bp[0][td[0]].v;
+__DECL_VECTORIZED_LOOP
+ for(int i = 1; i < planes; i++) {
+ dat.v = dat.v | bp[i][td[i]].v;
+ }
+ dat.v = dat.v >> shift;
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ dst[i] = (uint8_t)(dat.w[i]);
+ }
+ dst += 8;
+
+ }
+}
+
+void DLL_PREFIX Convert2NColorsToByte_LineZoom2(_render_command_data_t *src, uint8_t *dst, int planes)
+{
+ if(planes >= 8) planes = 8;
+ if(planes <= 0) return;
+
+ uint8_t* srcp[8];
+ __DECL_ALIGNED(32) uint32_t offset[8] = {0};
+ __DECL_ALIGNED(16) uint16_vec8_t dat;
+ uint16_vec8_t* bp[8] ;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ bp[i] = (uint16_vec8_t*)__builtin_assume_aligned(&(src->bit_trans_table[i]->plane_table[0]), sizeof(uint16_vec8_t));
+ srcp[i] = &(src->data[i][src->baseaddress[i]]);
+ }
+ uint32_t addrmask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+ int shift = src->shift;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ offset[i] = src->voffset[i];
+ }
+
+ uint32_t noffset = src->begin_pos & offsetmask;
+ uint8_t td[16];
+__DECL_VECTORIZED_LOOP
+ for(int x = 0; x < src->render_width; x++) {
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < planes; i++) {
+ td[i] = srcp[i][(noffset + offset[i]) & addrmask];
+ }
+ noffset = (noffset + 1) & offsetmask;
+ dat.v = bp[0][td[0]].v;
+__DECL_VECTORIZED_LOOP
+ for(int i = 1; i < planes; i++) {
+ dat.v = dat.v | bp[i][td[i]].v;
+ }
+ dat.v = dat.v >> shift;
+__DECL_VECTORIZED_LOOP
+ for(int i = 0, j = 0; i < 16; i +=2, j++) {
+ dst[i] = (uint8_t)(dat.w[j]);
+ dst[i + 1] = (uint8_t)(dat.w[j]);
+ }
+ dst += 16;
+ }
+}
+
+void DLL_PREFIX Convert8ColorsToByte_Line(_render_command_data_t *src, uint8_t *dst)
+{
+ uint8_t *bp = &(src->data[0][src->baseaddress[0]]);
+ uint8_t *rp = &(src->data[1][src->baseaddress[1]]);
+ uint8_t *gp = &(src->data[2][src->baseaddress[2]]);
+ __DECL_ALIGNED(16) uint32_t offset[4] = {0};
+
+ __DECL_ALIGNED(16) uint16_vec8_t rdat;
+ __DECL_ALIGNED(16) uint16_vec8_t gdat;
+ __DECL_ALIGNED(16) uint16_vec8_t bdat;
+ __DECL_ALIGNED(16) uint16_vec8_t tmpd;
+
+ uint16_vec8_t* bpb = (uint16_vec8_t*)__builtin_assume_aligned(&(src->bit_trans_table[0]->plane_table[0]), sizeof(uint16_vec8_t));
+ uint16_vec8_t* bpr = (uint16_vec8_t*)__builtin_assume_aligned(&(src->bit_trans_table[1]->plane_table[0]), sizeof(uint16_vec8_t));
+ uint16_vec8_t* bpg = (uint16_vec8_t*)__builtin_assume_aligned(&(src->bit_trans_table[2]->plane_table[0]), sizeof(uint16_vec8_t));
+
+ uint32_t addrmask = src->addrmask;
+ uint32_t offsetmask = src->addrmask2;
+ int shift = src->shift;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 3; i++) {
+ offset[i] = src->voffset[i];
+ }
+
+ uint32_t noffset = src->begin_pos & offsetmask;
+ uint8_t b, r, g;
+__DECL_VECTORIZED_LOOP
+ for(int x = 0; x < src->render_width; x++) {
+ b = bp[(noffset + offset[0]) & addrmask];
+ r = rp[(noffset + offset[1]) & addrmask];
+ g = gp[(noffset + offset[2]) & addrmask];
+
+ noffset = (noffset + 1) & offsetmask;
+
+ bdat.v = bpb[b].v;
+ rdat.v = bpr[r].v;
+ gdat.v = bpg[g].v;
+ tmpd.v = bdat.v;
+ tmpd.v = tmpd.v | rdat.v;
+ tmpd.v = tmpd.v | gdat.v;
+ tmpd.v = tmpd.v >> shift;
+
+__DECL_VECTORIZED_LOOP
+ for(int i = 0; i < 8; i++) {
+ dst[i] = (uint8_t)(tmpd.w[i]);
+ }
+ dst += 8;
+ }
+}
+
+